v4.19.13 snapshot.
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
new file mode 100644
index 0000000..bdf0563
--- /dev/null
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -0,0 +1,564 @@
+/*
+ * MPC8xx Internal Memory Map
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * The I/O on the MPC860 is comprised of blocks of special registers
+ * and the dual port ram for the Communication Processor Module.
+ * Within this space are functional units such as the SIU, memory
+ * controller, system timers, and other control functions.  It is
+ * a combination that I found difficult to separate into logical
+ * functional files.....but anyone else is welcome to try.  -- Dan
+ */
+#ifdef __KERNEL__
+#ifndef __IMMAP_8XX__
+#define __IMMAP_8XX__
+
+/* System configuration registers.
+*/
+typedef	struct sys_conf {
+	uint	sc_siumcr;
+	uint	sc_sypcr;
+	uint	sc_swt;
+	char	res1[2];
+	ushort	sc_swsr;
+	uint	sc_sipend;
+	uint	sc_simask;
+	uint	sc_siel;
+	uint	sc_sivec;
+	uint	sc_tesr;
+	char	res2[0xc];
+	uint	sc_sdcr;
+	char	res3[0x4c];
+} sysconf8xx_t;
+
+/* PCMCIA configuration registers.
+*/
+typedef struct pcmcia_conf {
+	uint	pcmc_pbr0;
+	uint	pcmc_por0;
+	uint	pcmc_pbr1;
+	uint	pcmc_por1;
+	uint	pcmc_pbr2;
+	uint	pcmc_por2;
+	uint	pcmc_pbr3;
+	uint	pcmc_por3;
+	uint	pcmc_pbr4;
+	uint	pcmc_por4;
+	uint	pcmc_pbr5;
+	uint	pcmc_por5;
+	uint	pcmc_pbr6;
+	uint	pcmc_por6;
+	uint	pcmc_pbr7;
+	uint	pcmc_por7;
+	char	res1[0x20];
+	uint	pcmc_pgcra;
+	uint	pcmc_pgcrb;
+	uint	pcmc_pscr;
+	char	res2[4];
+	uint	pcmc_pipr;
+	char	res3[4];
+	uint	pcmc_per;
+	char	res4[4];
+} pcmconf8xx_t;
+
+/* Memory controller registers.
+*/
+typedef struct	mem_ctlr {
+	uint	memc_br0;
+	uint	memc_or0;
+	uint	memc_br1;
+	uint	memc_or1;
+	uint	memc_br2;
+	uint	memc_or2;
+	uint	memc_br3;
+	uint	memc_or3;
+	uint	memc_br4;
+	uint	memc_or4;
+	uint	memc_br5;
+	uint	memc_or5;
+	uint	memc_br6;
+	uint	memc_or6;
+	uint	memc_br7;
+	uint	memc_or7;
+	char	res1[0x24];
+	uint	memc_mar;
+	uint	memc_mcr;
+	char	res2[4];
+	uint	memc_mamr;
+	uint	memc_mbmr;
+	ushort	memc_mstat;
+	ushort	memc_mptpr;
+	uint	memc_mdr;
+	char	res3[0x80];
+} memctl8xx_t;
+
+/*-----------------------------------------------------------------------
+ * BR - Memory Controller: Base Register					16-9
+ */
+#define BR_BA_MSK	0xffff8000	/* Base Address Mask			*/
+#define BR_AT_MSK	0x00007000	/* Address Type Mask			*/
+#define BR_PS_MSK	0x00000c00	/* Port Size Mask			*/
+#define BR_PS_32	0x00000000	/* 32 bit port size			*/
+#define BR_PS_16	0x00000800	/* 16 bit port size			*/
+#define BR_PS_8		0x00000400	/*  8 bit port size			*/
+#define BR_PARE		0x00000200	/* Parity Enable			*/
+#define BR_WP		0x00000100	/* Write Protect			*/
+#define BR_MS_MSK	0x000000c0	/* Machine Select Mask			*/
+#define BR_MS_GPCM	0x00000000	/* G.P.C.M. Machine Select		*/
+#define BR_MS_UPMA	0x00000080	/* U.P.M.A Machine Select		*/
+#define BR_MS_UPMB	0x000000c0	/* U.P.M.B Machine Select		*/
+#define BR_V		0x00000001	/* Bank Valid				*/
+
+/*-----------------------------------------------------------------------
+ * OR - Memory Controller: Option Register				16-11
+ */
+#define OR_AM_MSK	0xffff8000	/* Address Mask Mask			*/
+#define OR_ATM_MSK	0x00007000	/* Address Type Mask Mask		*/
+#define OR_CSNT_SAM	0x00000800	/* Chip Select Negation Time/ Start	*/
+					/* Address Multiplex			*/
+#define OR_ACS_MSK	0x00000600	/* Address to Chip Select Setup mask	*/
+#define OR_ACS_DIV1	0x00000000	/* CS is output at the same time	*/
+#define OR_ACS_DIV4	0x00000400	/* CS is output 1/4 a clock later	*/
+#define OR_ACS_DIV2	0x00000600	/* CS is output 1/2 a clock later	*/
+#define OR_G5LA		0x00000400	/* Output #GPL5 on #GPL_A5		*/
+#define OR_G5LS		0x00000200	/* Drive #GPL high on falling edge of...*/
+#define OR_BI		0x00000100	/* Burst inhibit			*/
+#define OR_SCY_MSK	0x000000f0	/* Cycle Length in Clocks		*/
+#define OR_SCY_0_CLK	0x00000000	/* 0 clock cycles wait states		*/
+#define OR_SCY_1_CLK	0x00000010	/* 1 clock cycles wait states		*/
+#define OR_SCY_2_CLK	0x00000020	/* 2 clock cycles wait states		*/
+#define OR_SCY_3_CLK	0x00000030	/* 3 clock cycles wait states		*/
+#define OR_SCY_4_CLK	0x00000040	/* 4 clock cycles wait states		*/
+#define OR_SCY_5_CLK	0x00000050	/* 5 clock cycles wait states		*/
+#define OR_SCY_6_CLK	0x00000060	/* 6 clock cycles wait states		*/
+#define OR_SCY_7_CLK	0x00000070	/* 7 clock cycles wait states		*/
+#define OR_SCY_8_CLK	0x00000080	/* 8 clock cycles wait states		*/
+#define OR_SCY_9_CLK	0x00000090	/* 9 clock cycles wait states		*/
+#define OR_SCY_10_CLK	0x000000a0	/* 10 clock cycles wait states		*/
+#define OR_SCY_11_CLK	0x000000b0	/* 11 clock cycles wait states		*/
+#define OR_SCY_12_CLK	0x000000c0	/* 12 clock cycles wait states		*/
+#define OR_SCY_13_CLK	0x000000d0	/* 13 clock cycles wait states		*/
+#define OR_SCY_14_CLK	0x000000e0	/* 14 clock cycles wait states		*/
+#define OR_SCY_15_CLK	0x000000f0	/* 15 clock cycles wait states		*/
+#define OR_SETA		0x00000008	/* External Transfer Acknowledge	*/
+#define OR_TRLX		0x00000004	/* Timing Relaxed			*/
+#define OR_EHTR		0x00000002	/* Extended Hold Time on Read		*/
+
+/* System Integration Timers.
+*/
+typedef struct	sys_int_timers {
+	ushort	sit_tbscr;
+	char	res0[0x02];
+	uint	sit_tbreff0;
+	uint	sit_tbreff1;
+	char	res1[0x14];
+	ushort	sit_rtcsc;
+	char	res2[0x02];
+	uint	sit_rtc;
+	uint	sit_rtsec;
+	uint	sit_rtcal;
+	char	res3[0x10];
+	ushort	sit_piscr;
+	char	res4[2];
+	uint	sit_pitc;
+	uint	sit_pitr;
+	char	res5[0x34];
+} sit8xx_t;
+
+#define TBSCR_TBIRQ_MASK	((ushort)0xff00)
+#define TBSCR_REFA		((ushort)0x0080)
+#define TBSCR_REFB		((ushort)0x0040)
+#define TBSCR_REFAE		((ushort)0x0008)
+#define TBSCR_REFBE		((ushort)0x0004)
+#define TBSCR_TBF		((ushort)0x0002)
+#define TBSCR_TBE		((ushort)0x0001)
+
+#define RTCSC_RTCIRQ_MASK	((ushort)0xff00)
+#define RTCSC_SEC		((ushort)0x0080)
+#define RTCSC_ALR		((ushort)0x0040)
+#define RTCSC_38K		((ushort)0x0010)
+#define RTCSC_SIE		((ushort)0x0008)
+#define RTCSC_ALE		((ushort)0x0004)
+#define RTCSC_RTF		((ushort)0x0002)
+#define RTCSC_RTE		((ushort)0x0001)
+
+#define PISCR_PIRQ_MASK		((ushort)0xff00)
+#define PISCR_PS		((ushort)0x0080)
+#define PISCR_PIE		((ushort)0x0004)
+#define PISCR_PTF		((ushort)0x0002)
+#define PISCR_PTE		((ushort)0x0001)
+
+/* Clocks and Reset.
+*/
+typedef struct clk_and_reset {
+	uint	car_sccr;
+	uint	car_plprcr;
+	uint	car_rsr;
+	char	res[0x74];        /* Reserved area                  */
+} car8xx_t;
+
+/* System Integration Timers keys.
+*/
+typedef struct sitk {
+	uint	sitk_tbscrk;
+	uint	sitk_tbreff0k;
+	uint	sitk_tbreff1k;
+	uint	sitk_tbk;
+	char	res1[0x10];
+	uint	sitk_rtcsck;
+	uint	sitk_rtck;
+	uint	sitk_rtseck;
+	uint	sitk_rtcalk;
+	char	res2[0x10];
+	uint	sitk_piscrk;
+	uint	sitk_pitck;
+	char	res3[0x38];
+} sitk8xx_t;
+
+/* Clocks and reset keys.
+*/
+typedef struct cark {
+	uint	cark_sccrk;
+	uint	cark_plprcrk;
+	uint	cark_rsrk;
+	char	res[0x474];
+} cark8xx_t;
+
+/* The key to unlock registers maintained by keep-alive power.
+*/
+#define KAPWR_KEY	((unsigned int)0x55ccaa33)
+
+/* Video interface.  MPC823 Only.
+*/
+typedef struct vid823 {
+	ushort	vid_vccr;
+	ushort	res1;
+	u_char	vid_vsr;
+	u_char	res2;
+	u_char	vid_vcmr;
+	u_char	res3;
+	uint	vid_vbcb;
+	uint	res4;
+	uint	vid_vfcr0;
+	uint	vid_vfaa0;
+	uint	vid_vfba0;
+	uint	vid_vfcr1;
+	uint	vid_vfaa1;
+	uint	vid_vfba1;
+	u_char	res5[0x18];
+} vid823_t;
+
+/* LCD interface.  823 Only.
+*/
+typedef struct lcd {
+	uint	lcd_lccr;
+	uint	lcd_lchcr;
+	uint	lcd_lcvcr;
+	char	res1[4];
+	uint	lcd_lcfaa;
+	uint	lcd_lcfba;
+	char	lcd_lcsr;
+	char	res2[0x7];
+} lcd823_t;
+
+/* I2C
+*/
+typedef struct i2c {
+	u_char	i2c_i2mod;
+	char	res1[3];
+	u_char	i2c_i2add;
+	char	res2[3];
+	u_char	i2c_i2brg;
+	char	res3[3];
+	u_char	i2c_i2com;
+	char	res4[3];
+	u_char	i2c_i2cer;
+	char	res5[3];
+	u_char	i2c_i2cmr;
+	char	res6[0x8b];
+} i2c8xx_t;
+
+/* DMA control/status registers.
+*/
+typedef struct sdma_csr {
+	char	res1[4];
+	uint	sdma_sdar;
+	u_char	sdma_sdsr;
+	char	res3[3];
+	u_char	sdma_sdmr;
+	char	res4[3];
+	u_char	sdma_idsr1;
+	char	res5[3];
+	u_char	sdma_idmr1;
+	char	res6[3];
+	u_char	sdma_idsr2;
+	char	res7[3];
+	u_char	sdma_idmr2;
+	char	res8[0x13];
+} sdma8xx_t;
+
+/* Communication Processor Module Interrupt Controller.
+*/
+typedef struct cpm_ic {
+	ushort	cpic_civr;
+	char	res[0xe];
+	uint	cpic_cicr;
+	uint	cpic_cipr;
+	uint	cpic_cimr;
+	uint	cpic_cisr;
+} cpic8xx_t;
+
+/* Input/Output Port control/status registers.
+*/
+typedef struct io_port {
+	ushort	iop_padir;
+	ushort	iop_papar;
+	ushort	iop_paodr;
+	ushort	iop_padat;
+	char	res1[8];
+	ushort	iop_pcdir;
+	ushort	iop_pcpar;
+	ushort	iop_pcso;
+	ushort	iop_pcdat;
+	ushort	iop_pcint;
+	char	res2[6];
+	ushort	iop_pddir;
+	ushort	iop_pdpar;
+	char	res3[2];
+	ushort	iop_pddat;
+	uint	utmode;
+	char	res4[4];
+} iop8xx_t;
+
+/* Communication Processor Module Timers
+*/
+typedef struct cpm_timers {
+	ushort	cpmt_tgcr;
+	char	res1[0xe];
+	ushort	cpmt_tmr1;
+	ushort	cpmt_tmr2;
+	ushort	cpmt_trr1;
+	ushort	cpmt_trr2;
+	ushort	cpmt_tcr1;
+	ushort	cpmt_tcr2;
+	ushort	cpmt_tcn1;
+	ushort	cpmt_tcn2;
+	ushort	cpmt_tmr3;
+	ushort	cpmt_tmr4;
+	ushort	cpmt_trr3;
+	ushort	cpmt_trr4;
+	ushort	cpmt_tcr3;
+	ushort	cpmt_tcr4;
+	ushort	cpmt_tcn3;
+	ushort	cpmt_tcn4;
+	ushort	cpmt_ter1;
+	ushort	cpmt_ter2;
+	ushort	cpmt_ter3;
+	ushort	cpmt_ter4;
+	char	res2[8];
+} cpmtimer8xx_t;
+
+/* Finally, the Communication Processor stuff.....
+*/
+typedef struct scc {		/* Serial communication channels */
+	uint	scc_gsmrl;
+	uint	scc_gsmrh;
+	ushort	scc_psmr;
+	char	res1[2];
+	ushort	scc_todr;
+	ushort	scc_dsr;
+	ushort	scc_scce;
+	char	res2[2];
+	ushort	scc_sccm;
+	char	res3;
+	u_char	scc_sccs;
+	char	res4[8];
+} scc_t;
+
+typedef struct smc {		/* Serial management channels */
+	char	res1[2];
+	ushort	smc_smcmr;
+	char	res2[2];
+	u_char	smc_smce;
+	char	res3[3];
+	u_char	smc_smcm;
+	char	res4[5];
+} smc_t;
+
+/* MPC860T Fast Ethernet Controller.  It isn't part of the CPM, but
+ * it fits within the address space.
+ */
+
+typedef struct fec {
+	uint	fec_addr_low;		/* lower 32 bits of station address	*/
+	ushort	fec_addr_high;		/* upper 16 bits of station address	*/
+	ushort	res1;			/* reserved				*/
+	uint	fec_grp_hash_table_high;	/* upper 32-bits of hash table		*/
+	uint	fec_grp_hash_table_low;	/* lower 32-bits of hash table		*/
+	uint	fec_r_des_start;	/* beginning of Rx descriptor ring	*/
+	uint	fec_x_des_start;	/* beginning of Tx descriptor ring	*/
+	uint	fec_r_buff_size;	/* Rx buffer size			*/
+	uint	res2[9];		/* reserved				*/
+	uint	fec_ecntrl;		/* ethernet control register		*/
+	uint	fec_ievent;		/* interrupt event register		*/
+	uint	fec_imask;		/* interrupt mask register		*/
+	uint	fec_ivec;		/* interrupt level and vector status	*/
+	uint	fec_r_des_active;	/* Rx ring updated flag			*/
+	uint	fec_x_des_active;	/* Tx ring updated flag			*/
+	uint	res3[10];		/* reserved				*/
+	uint	fec_mii_data;		/* MII data register			*/
+	uint	fec_mii_speed;		/* MII speed control register		*/
+	uint	res4[17];		/* reserved				*/
+	uint	fec_r_bound;		/* end of RAM (read-only)		*/
+	uint	fec_r_fstart;		/* Rx FIFO start address		*/
+	uint	res5[6];		/* reserved				*/
+	uint	fec_x_fstart;		/* Tx FIFO start address		*/
+	uint	res6[17];		/* reserved				*/
+	uint	fec_fun_code;		/* fec SDMA function code		*/
+	uint	res7[3];		/* reserved				*/
+	uint	fec_r_cntrl;		/* Rx control register			*/
+	uint	fec_r_hash;		/* Rx hash register			*/
+	uint	res8[14];		/* reserved				*/
+	uint	fec_x_cntrl;		/* Tx control register			*/
+	uint	res9[0x1e];		/* reserved				*/
+} fec_t;
+
+/* The FEC and LCD color map share the same address space....
+ * I guess we will never see an 823T :-).
+ */
+union fec_lcd {
+	fec_t	fl_un_fec;
+	u_char	fl_un_cmap[0x200];
+};
+
+typedef struct comm_proc {
+	/* General control and status registers.
+	*/
+	ushort	cp_cpcr;
+	u_char	res1[2];
+	ushort	cp_rccr;
+	u_char	res2;
+	u_char	cp_rmds;
+	u_char	res3[4];
+	ushort	cp_cpmcr1;
+	ushort	cp_cpmcr2;
+	ushort	cp_cpmcr3;
+	ushort	cp_cpmcr4;
+	u_char	res4[2];
+	ushort	cp_rter;
+	u_char	res5[2];
+	ushort	cp_rtmr;
+	u_char	res6[0x14];
+
+	/* Baud rate generators.
+	*/
+	uint	cp_brgc1;
+	uint	cp_brgc2;
+	uint	cp_brgc3;
+	uint	cp_brgc4;
+
+	/* Serial Communication Channels.
+	*/
+	scc_t	cp_scc[4];
+
+	/* Serial Management Channels.
+	*/
+	smc_t	cp_smc[2];
+
+	/* Serial Peripheral Interface.
+	*/
+	ushort	cp_spmode;
+	u_char	res7[4];
+	u_char	cp_spie;
+	u_char	res8[3];
+	u_char	cp_spim;
+	u_char	res9[2];
+	u_char	cp_spcom;
+	u_char	res10[2];
+
+	/* Parallel Interface Port.
+	*/
+	u_char	res11[2];
+	ushort	cp_pipc;
+	u_char	res12[2];
+	ushort	cp_ptpr;
+	uint	cp_pbdir;
+	uint	cp_pbpar;
+	u_char	res13[2];
+	ushort	cp_pbodr;
+	uint	cp_pbdat;
+
+	/* Port E - MPC87x/88x only.
+	 */
+	uint	cp_pedir;
+	uint	cp_pepar;
+	uint	cp_peso;
+	uint	cp_peodr;
+	uint	cp_pedat;
+
+	/* Communications Processor Timing Register -
+	   Contains RMII Timing for the FECs on MPC87x/88x only.
+	*/
+	uint	cp_cptr;
+
+	/* Serial Interface and Time Slot Assignment.
+	*/
+	uint	cp_simode;
+	u_char	cp_sigmr;
+	u_char	res15;
+	u_char	cp_sistr;
+	u_char	cp_sicmr;
+	u_char	res16[4];
+	uint	cp_sicr;
+	uint	cp_sirp;
+	u_char	res17[0xc];
+
+	/* 256 bytes of MPC823 video controller RAM array.
+	*/
+	u_char	cp_vcram[0x100];
+	u_char	cp_siram[0x200];
+
+	/* The fast ethernet controller is not really part of the CPM,
+	 * but it resides in the address space.
+	 * The LCD color map is also here.
+	 */
+	union	fec_lcd	fl_un;
+#define cp_fec		fl_un.fl_un_fec
+#define lcd_cmap	fl_un.fl_un_cmap
+	char	res18[0xE00];
+
+	/* The DUET family has a second FEC here */
+	fec_t	cp_fec2;
+#define cp_fec1	cp_fec	/* consistency macro */
+
+	/* Dual Ported RAM follows.
+	 * There are many different formats for this memory area
+	 * depending upon the devices used and options chosen.
+	 * Some processors don't have all of it populated.
+	 */
+	u_char	cp_dpmem[0x1C00];	/* BD / Data / ucode */
+	u_char	cp_dparam[0x400];	/* Parameter RAM */
+} cpm8xx_t;
+
+/* Internal memory map.
+*/
+typedef struct immap {
+	sysconf8xx_t	im_siu_conf;	/* SIU Configuration */
+	pcmconf8xx_t	im_pcmcia;	/* PCMCIA Configuration */
+	memctl8xx_t	im_memctl;	/* Memory Controller */
+	sit8xx_t	im_sit;		/* System integration timers */
+	car8xx_t	im_clkrst;	/* Clocks and reset */
+	sitk8xx_t	im_sitk;	/* Sys int timer keys */
+	cark8xx_t	im_clkrstk;	/* Clocks and reset keys */
+	vid823_t	im_vid;		/* Video (823 only) */
+	lcd823_t	im_lcd;		/* LCD (823 only) */
+	i2c8xx_t	im_i2c;		/* I2C control/status */
+	sdma8xx_t	im_sdma;	/* SDMA control/status */
+	cpic8xx_t	im_cpic;	/* CPM Interrupt Controller */
+	iop8xx_t	im_ioport;	/* IO Port control/status */
+	cpmtimer8xx_t	im_cpmtimer;	/* CPM timers */
+	cpm8xx_t	im_cpm;		/* Communication processor */
+} immap_t;
+
+#endif /* __IMMAP_8XX__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
new file mode 100644
index 0000000..3196d22
--- /dev/null
+++ b/arch/powerpc/include/asm/Kbuild
@@ -0,0 +1,10 @@
+generic-y += div64.h
+generic-y += export.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += rwsem.h
+generic-y += vtime.h
+generic-y += msi.h
diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h
new file mode 100644
index 0000000..3abcf98
--- /dev/null
+++ b/arch/powerpc/include/asm/accounting.h
@@ -0,0 +1,32 @@
+/*
+ * Common time accounting prototypes and such for all ppc machines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __POWERPC_ACCOUNTING_H
+#define __POWERPC_ACCOUNTING_H
+
+/* Stuff for accurate time accounting */
+struct cpu_accounting_data {
+	/* Accumulated cputime values to flush on ticks*/
+	unsigned long utime;
+	unsigned long stime;
+	unsigned long utime_scaled;
+	unsigned long stime_scaled;
+	unsigned long gtime;
+	unsigned long hardirq_time;
+	unsigned long softirq_time;
+	unsigned long steal_time;
+	unsigned long idle_time;
+	/* Internal counters */
+	unsigned long starttime;	/* TB value snapshot */
+	unsigned long starttime_user;	/* TB value on exit to usermode */
+	unsigned long startspurr;	/* SPURR value snapshot */
+	unsigned long utime_sspurr;	/* ->user_time when ->startspurr set */
+};
+
+#endif
diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h
new file mode 100644
index 0000000..b29b118
--- /dev/null
+++ b/arch/powerpc/include/asm/agp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_AGP_H
+#define _ASM_POWERPC_AGP_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+#define map_page_into_agp(page)
+#define unmap_page_from_agp(page)
+#define flush_agp_cache() mb()
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order)		\
+	((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order)	\
+	free_pages((unsigned long)(table), (order))
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_AGP_H */
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
new file mode 100644
index 0000000..9c63b59
--- /dev/null
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_ARCHRANDOM_H
+#define _ASM_POWERPC_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <asm/machdep.h>
+
+static inline int arch_get_random_long(unsigned long *v)
+{
+	return 0;
+}
+
+static inline int arch_get_random_int(unsigned int *v)
+{
+	return 0;
+}
+
+static inline int arch_get_random_seed_long(unsigned long *v)
+{
+	if (ppc_md.get_random_seed)
+		return ppc_md.get_random_seed(v);
+
+	return 0;
+}
+static inline int arch_get_random_seed_int(unsigned int *v)
+{
+	unsigned long val;
+	int rc;
+
+	rc = arch_get_random_long(&val);
+	if (rc)
+		*v = val;
+
+	return rc;
+}
+
+static inline int arch_has_random(void)
+{
+	return 0;
+}
+
+static inline int arch_has_random_seed(void)
+{
+	return !!ppc_md.get_random_seed;
+}
+#endif /* CONFIG_ARCH_RANDOM */
+
+#ifdef CONFIG_PPC_POWERNV
+int powernv_hwrng_present(void);
+int powernv_get_random_long(unsigned long *v);
+int powernv_get_random_real_mode(unsigned long *v);
+#else
+static inline int powernv_hwrng_present(void) { return 0; }
+static inline int powernv_get_random_real_mode(unsigned long *v) { return 0; }
+#endif
+
+#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/asm-405.h b/arch/powerpc/include/asm/asm-405.h
new file mode 100644
index 0000000..7270d3a
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-405.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_POWERPC_ASM_405_H
+#define _ASM_POWERPC_ASM_405_H
+
+#include <asm/asm-const.h>
+
+#ifdef __KERNEL__
+#ifdef CONFIG_IBM405_ERR77
+/* Erratum #77 on the 405 means we need a sync or dcbt before every
+ * stwcx.  The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#define PPC405_ERR77(ra,rb)	stringify_in_c(dcbt	ra, rb;)
+#define	PPC405_ERR77_SYNC	stringify_in_c(sync;)
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+#endif
+
+#endif /* _ASM_POWERPC_ASM_405_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
new file mode 100644
index 0000000..19b70c5
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -0,0 +1,62 @@
+#ifndef _ASM_POWERPC_ASM_COMPAT_H
+#define _ASM_POWERPC_ASM_COMPAT_H
+
+#include <asm/asm-const.h>
+#include <asm/types.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef __powerpc64__
+
+/* operations for longs and pointers */
+#define PPC_LL		stringify_in_c(ld)
+#define PPC_STL		stringify_in_c(std)
+#define PPC_STLU	stringify_in_c(stdu)
+#define PPC_LCMPI	stringify_in_c(cmpdi)
+#define PPC_LCMPLI	stringify_in_c(cmpldi)
+#define PPC_LCMP	stringify_in_c(cmpd)
+#define PPC_LONG	stringify_in_c(.8byte)
+#define PPC_LONG_ALIGN	stringify_in_c(.balign 8)
+#define PPC_TLNEI	stringify_in_c(tdnei)
+#define PPC_LLARX(t, a, b, eh)	PPC_LDARX(t, a, b, eh)
+#define PPC_STLCX	stringify_in_c(stdcx.)
+#define PPC_CNTLZL	stringify_in_c(cntlzd)
+#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
+#define PPC_LR_STKOFF	16
+#define PPC_MIN_STKFRM	112
+
+#ifdef __BIG_ENDIAN__
+#define LHZX_BE	stringify_in_c(lhzx)
+#define LWZX_BE	stringify_in_c(lwzx)
+#define LDX_BE	stringify_in_c(ldx)
+#define STWX_BE	stringify_in_c(stwx)
+#define STDX_BE	stringify_in_c(stdx)
+#else
+#define LHZX_BE	stringify_in_c(lhbrx)
+#define LWZX_BE	stringify_in_c(lwbrx)
+#define LDX_BE	stringify_in_c(ldbrx)
+#define STWX_BE	stringify_in_c(stwbrx)
+#define STDX_BE	stringify_in_c(stdbrx)
+#endif
+
+#else /* 32-bit */
+
+/* operations for longs and pointers */
+#define PPC_LL		stringify_in_c(lwz)
+#define PPC_STL		stringify_in_c(stw)
+#define PPC_STLU	stringify_in_c(stwu)
+#define PPC_LCMPI	stringify_in_c(cmpwi)
+#define PPC_LCMPLI	stringify_in_c(cmplwi)
+#define PPC_LCMP	stringify_in_c(cmpw)
+#define PPC_LONG	stringify_in_c(.long)
+#define PPC_LONG_ALIGN	stringify_in_c(.balign 4)
+#define PPC_TLNEI	stringify_in_c(twnei)
+#define PPC_LLARX(t, a, b, eh)	PPC_LWARX(t, a, b, eh)
+#define PPC_STLCX	stringify_in_c(stwcx.)
+#define PPC_CNTLZL	stringify_in_c(cntlzw)
+#define PPC_MTOCRF	stringify_in_c(mtcrf)
+#define PPC_LR_STKOFF	4
+#define PPC_MIN_STKFRM	16
+
+#endif
+
+#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h
new file mode 100644
index 0000000..082c153
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-const.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_POWERPC_ASM_CONST_H
+#define _ASM_POWERPC_ASM_CONST_H
+
+#ifdef __ASSEMBLY__
+#  define stringify_in_c(...)	__VA_ARGS__
+#  define ASM_CONST(x)		x
+#else
+/* This version of stringify will deal with commas... */
+#  define __stringify_in_c(...)	#__VA_ARGS__
+#  define stringify_in_c(...)	__stringify_in_c(__VA_ARGS__) " "
+#  define __ASM_CONST(x)	x##UL
+#  define ASM_CONST(x)		__ASM_CONST(x)
+#endif
+#endif /* _ASM_POWERPC_ASM_CONST_H */
diff --git a/arch/powerpc/include/asm/asm-offsets.h b/arch/powerpc/include/asm/asm-offsets.h
new file mode 100644
index 0000000..d370ee3
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
new file mode 100644
index 0000000..1f4691c
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -0,0 +1,153 @@
+#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
+#define _ASM_POWERPC_ASM_PROTOTYPES_H
+/*
+ * This file is for prototypes of C functions that are only called
+ * from asm, and any associated variables.
+ *
+ * Copyright 2016, Daniel Axtens, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <linux/uaccess.h>
+#include <asm/epapr_hcalls.h>
+#include <asm/dcr.h>
+#include <asm/mmu_context.h>
+
+#include <uapi/asm/ucontext.h>
+
+/* SMP */
+extern struct thread_info *current_set[NR_CPUS];
+extern struct thread_info *secondary_ti;
+void start_secondary(void *unused);
+
+/* kexec */
+struct paca_struct;
+struct kimage;
+extern struct paca_struct kexec_paca;
+void kexec_copy_flush(struct kimage *image);
+
+/* pseries hcall tracing */
+extern struct static_key hcall_tracepoint_key;
+void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
+void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
+/* OPAL tracing */
+#ifdef HAVE_JUMP_LABEL
+extern struct static_key opal_tracepoint_key;
+#endif
+
+void __trace_opal_entry(unsigned long opcode, unsigned long *args);
+void __trace_opal_exit(long opcode, unsigned long retval);
+
+/* VMX copying */
+int enter_vmx_usercopy(void);
+int exit_vmx_usercopy(void);
+int enter_vmx_ops(void);
+void *exit_vmx_ops(void *dest);
+
+/* Traps */
+long machine_check_early(struct pt_regs *regs);
+long hmi_exception_realmode(struct pt_regs *regs);
+void SMIException(struct pt_regs *regs);
+void handle_hmi_exception(struct pt_regs *regs);
+void instruction_breakpoint_exception(struct pt_regs *regs);
+void RunModeException(struct pt_regs *regs);
+void single_step_exception(struct pt_regs *regs);
+void program_check_exception(struct pt_regs *regs);
+void alignment_exception(struct pt_regs *regs);
+void slb_miss_bad_addr(struct pt_regs *regs);
+void StackOverflow(struct pt_regs *regs);
+void nonrecoverable_exception(struct pt_regs *regs);
+void kernel_fp_unavailable_exception(struct pt_regs *regs);
+void altivec_unavailable_exception(struct pt_regs *regs);
+void vsx_unavailable_exception(struct pt_regs *regs);
+void fp_unavailable_tm(struct pt_regs *regs);
+void altivec_unavailable_tm(struct pt_regs *regs);
+void vsx_unavailable_tm(struct pt_regs *regs);
+void facility_unavailable_exception(struct pt_regs *regs);
+void TAUException(struct pt_regs *regs);
+void altivec_assist_exception(struct pt_regs *regs);
+void unrecoverable_exception(struct pt_regs *regs);
+void kernel_bad_stack(struct pt_regs *regs);
+void system_reset_exception(struct pt_regs *regs);
+void machine_check_exception(struct pt_regs *regs);
+void emulation_assist_interrupt(struct pt_regs *regs);
+
+/* signals, syscalls and interrupts */
+long sys_swapcontext(struct ucontext __user *old_ctx,
+		    struct ucontext __user *new_ctx,
+		    long ctx_size);
+#ifdef CONFIG_PPC32
+long sys_debug_setcontext(struct ucontext __user *ctx,
+			  int ndbg, struct sig_dbg_op __user *dbg);
+int
+ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
+unsigned long __init early_init(unsigned long dt_ptr);
+void __init machine_init(u64 dt_ptr);
+#endif
+
+long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
+		      u32 len_high, u32 len_low);
+long sys_switch_endian(void);
+notrace unsigned int __check_irq_replay(void);
+void notrace restore_interrupts(void);
+
+/* ptrace */
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+/* process */
+void restore_math(struct pt_regs *regs);
+void restore_tm_state(struct pt_regs *regs);
+
+/* prom_init (OpenFirmware) */
+unsigned long __init prom_init(unsigned long r3, unsigned long r4,
+			       unsigned long pp,
+			       unsigned long r6, unsigned long r7,
+			       unsigned long kbase);
+
+/* setup */
+void __init early_setup(unsigned long dt_ptr);
+void early_setup_secondary(void);
+
+/* time */
+void accumulate_stolen_time(void);
+
+/* misc runtime */
+extern u64 __bswapdi2(u64);
+extern s64 __lshrdi3(s64, int);
+extern s64 __ashldi3(s64, int);
+extern s64 __ashrdi3(s64, int);
+extern int __cmpdi2(s64, s64);
+extern int __ucmpdi2(u64, u64);
+
+/* tracing */
+void _mcount(void);
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
+
+void pnv_power9_force_smt4_catch(void);
+void pnv_power9_force_smt4_release(void);
+
+/* Transaction memory related */
+void tm_enable(void);
+void tm_disable(void);
+void tm_abort(uint8_t cause);
+
+struct kvm_vcpu;
+void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+
+/* Patch sites */
+extern s32 patch__call_flush_count_cache;
+extern s32 patch__flush_count_cache_return;
+extern s32 patch__memset_nocache, patch__memcpy_nocache;
+
+extern long flush_count_cache;
+
+#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/async_tx.h b/arch/powerpc/include/asm/async_tx.h
new file mode 100644
index 0000000..8b2dc55
--- /dev/null
+++ b/arch/powerpc/include/asm/async_tx.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008-2009 DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef _ASM_POWERPC_ASYNC_TX_H_
+#define _ASM_POWERPC_ASYNC_TX_H_
+
+#if defined(CONFIG_440SPe) || defined(CONFIG_440SP)
+extern struct dma_chan *
+ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
+	struct page **dst_lst, int dst_cnt, struct page **src_lst,
+	int src_cnt, size_t src_sz);
+
+#define async_tx_find_channel(dep, cap, dst_lst, dst_cnt, src_lst, \
+			      src_cnt, src_sz) \
+	ppc440spe_async_tx_find_best_channel(cap, dst_lst, dst_cnt, src_lst, \
+					     src_cnt, src_sz)
+#else
+
+#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
+	__async_tx_find_channel(dep, type)
+
+struct dma_chan *
+__async_tx_find_channel(struct async_submit_ctl *submit,
+			enum dma_transaction_type tx_type);
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
new file mode 100644
index 0000000..52eafaf
--- /dev/null
+++ b/arch/powerpc/include/asm/atomic.h
@@ -0,0 +1,561 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_ATOMIC_H_
+#define _ASM_POWERPC_ATOMIC_H_
+
+/*
+ * PowerPC atomic operations
+ */
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/asm-405.h>
+
+#define ATOMIC_INIT(i)		{ (i) }
+
+/*
+ * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
+ * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
+ * on the platform without lwsync.
+ */
+#define __atomic_acquire_fence()					\
+	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+
+#define __atomic_release_fence()					\
+	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
+
+static __inline__ int atomic_read(const atomic_t *v)
+{
+	int t;
+
+	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+	return t;
+}
+
+static __inline__ void atomic_set(atomic_t *v, int i)
+{
+	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
+
+#define ATOMIC_OP(op, asm_op)						\
+static __inline__ void atomic_##op(int a, atomic_t *v)			\
+{									\
+	int t;								\
+									\
+	__asm__ __volatile__(						\
+"1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
+	#asm_op " %0,%2,%0\n"						\
+	PPC405_ERR77(0,%3)						\
+"	stwcx.	%0,0,%3 \n"						\
+"	bne-	1b\n"							\
+	: "=&r" (t), "+m" (v->counter)					\
+	: "r" (a), "r" (&v->counter)					\
+	: "cc");							\
+}									\
+
+#define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
+static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
+{									\
+	int t;								\
+									\
+	__asm__ __volatile__(						\
+"1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
+	#asm_op " %0,%2,%0\n"						\
+	PPC405_ERR77(0, %3)						\
+"	stwcx.	%0,0,%3\n"						\
+"	bne-	1b\n"							\
+	: "=&r" (t), "+m" (v->counter)					\
+	: "r" (a), "r" (&v->counter)					\
+	: "cc");							\
+									\
+	return t;							\
+}
+
+#define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
+static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
+{									\
+	int res, t;							\
+									\
+	__asm__ __volatile__(						\
+"1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
+	#asm_op " %1,%3,%0\n"						\
+	PPC405_ERR77(0, %4)						\
+"	stwcx.	%1,0,%4\n"						\
+"	bne-	1b\n"							\
+	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
+	: "r" (a), "r" (&v->counter)					\
+	: "cc");							\
+									\
+	return res;							\
+}
+
+#define ATOMIC_OPS(op, asm_op)						\
+	ATOMIC_OP(op, asm_op)						\
+	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
+	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, subf)
+
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op)						\
+	ATOMIC_OP(op, asm_op)						\
+	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP_RELAXED
+#undef ATOMIC_OP_RETURN_RELAXED
+#undef ATOMIC_OP
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+	int t;
+
+	__asm__ __volatile__(
+"1:	lwarx	%0,0,%2		# atomic_inc\n\
+	addic	%0,%0,1\n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%0,0,%2 \n\
+	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+}
+#define atomic_inc atomic_inc
+
+static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+{
+	int t;
+
+	__asm__ __volatile__(
+"1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
+"	addic	%0,%0,1\n"
+	PPC405_ERR77(0, %2)
+"	stwcx.	%0,0,%2\n"
+"	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+
+	return t;
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+	int t;
+
+	__asm__ __volatile__(
+"1:	lwarx	%0,0,%2		# atomic_dec\n\
+	addic	%0,%0,-1\n"
+	PPC405_ERR77(0,%2)\
+"	stwcx.	%0,0,%2\n\
+	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+}
+#define atomic_dec atomic_dec
+
+static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+{
+	int t;
+
+	__asm__ __volatile__(
+"1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
+"	addic	%0,%0,-1\n"
+	PPC405_ERR77(0, %2)
+"	stwcx.	%0,0,%2\n"
+"	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+
+	return t;
+}
+
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg_relaxed(v, o, n) \
+	cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define atomic_cmpxchg_acquire(v, o, n) \
+	cmpxchg_acquire(&((v)->counter), (o), (n))
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+
+/**
+ * atomic_fetch_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+	int t;
+
+	__asm__ __volatile__ (
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
+	cmpw	0,%0,%3 \n\
+	beq	2f \n\
+	add	%0,%2,%0 \n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%0,0,%1 \n\
+	bne-	1b \n"
+	PPC_ATOMIC_EXIT_BARRIER
+"	subf	%0,%2,%0 \n\
+2:"
+	: "=&r" (t)
+	: "r" (&v->counter), "r" (a), "r" (u)
+	: "cc", "memory");
+
+	return t;
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+
+/**
+ * atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+static __inline__ int atomic_inc_not_zero(atomic_t *v)
+{
+	int t1, t2;
+
+	__asm__ __volatile__ (
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
+	cmpwi	0,%0,0\n\
+	beq-	2f\n\
+	addic	%1,%0,1\n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%1,0,%2\n\
+	bne-	1b\n"
+	PPC_ATOMIC_EXIT_BARRIER
+	"\n\
+2:"
+	: "=&r" (t1), "=&r" (t2)
+	: "r" (&v->counter)
+	: "cc", "xer", "memory");
+
+	return t1;
+}
+#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+	int t;
+
+	__asm__ __volatile__(
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
+	cmpwi	%0,1\n\
+	addi	%0,%0,-1\n\
+	blt-	2f\n"
+	PPC405_ERR77(0,%1)
+"	stwcx.	%0,0,%1\n\
+	bne-	1b"
+	PPC_ATOMIC_EXIT_BARRIER
+	"\n\
+2:"	: "=&b" (t)
+	: "r" (&v->counter)
+	: "cc", "memory");
+
+	return t;
+}
+#define atomic_dec_if_positive atomic_dec_if_positive
+
+#ifdef __powerpc64__
+
+#define ATOMIC64_INIT(i)	{ (i) }
+
+static __inline__ long atomic64_read(const atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+	return t;
+}
+
+static __inline__ void atomic64_set(atomic64_t *v, long i)
+{
+	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
+
+#define ATOMIC64_OP(op, asm_op)						\
+static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
+{									\
+	long t;								\
+									\
+	__asm__ __volatile__(						\
+"1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
+	#asm_op " %0,%2,%0\n"						\
+"	stdcx.	%0,0,%3 \n"						\
+"	bne-	1b\n"							\
+	: "=&r" (t), "+m" (v->counter)					\
+	: "r" (a), "r" (&v->counter)					\
+	: "cc");							\
+}
+
+#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
+static inline long							\
+atomic64_##op##_return_relaxed(long a, atomic64_t *v)			\
+{									\
+	long t;								\
+									\
+	__asm__ __volatile__(						\
+"1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
+	#asm_op " %0,%2,%0\n"						\
+"	stdcx.	%0,0,%3\n"						\
+"	bne-	1b\n"							\
+	: "=&r" (t), "+m" (v->counter)					\
+	: "r" (a), "r" (&v->counter)					\
+	: "cc");							\
+									\
+	return t;							\
+}
+
+#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
+static inline long							\
+atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
+{									\
+	long res, t;							\
+									\
+	__asm__ __volatile__(						\
+"1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
+	#asm_op " %1,%3,%0\n"						\
+"	stdcx.	%1,0,%4\n"						\
+"	bne-	1b\n"							\
+	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
+	: "r" (a), "r" (&v->counter)					\
+	: "cc");							\
+									\
+	return res;							\
+}
+
+#define ATOMIC64_OPS(op, asm_op)					\
+	ATOMIC64_OP(op, asm_op)						\
+	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
+	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, subf)
+
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op)					\
+	ATOMIC64_OP(op, asm_op)						\
+	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC64_OPS(and, and)
+ATOMIC64_OPS(or, or)
+ATOMIC64_OPS(xor, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
+#undef ATOPIC64_OPS
+#undef ATOMIC64_FETCH_OP_RELAXED
+#undef ATOMIC64_OP_RETURN_RELAXED
+#undef ATOMIC64_OP
+
+static __inline__ void atomic64_inc(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2		# atomic64_inc\n\
+	addic	%0,%0,1\n\
+	stdcx.	%0,0,%2 \n\
+	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+}
+#define atomic64_inc atomic64_inc
+
+static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
+"	addic	%0,%0,1\n"
+"	stdcx.	%0,0,%2\n"
+"	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+
+	return t;
+}
+
+static __inline__ void atomic64_dec(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2		# atomic64_dec\n\
+	addic	%0,%0,-1\n\
+	stdcx.	%0,0,%2\n\
+	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+}
+#define atomic64_dec atomic64_dec
+
+static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
+"	addic	%0,%0,-1\n"
+"	stdcx.	%0,0,%2\n"
+"	bne-	1b"
+	: "=&r" (t), "+m" (v->counter)
+	: "r" (&v->counter)
+	: "cc", "xer");
+
+	return t;
+}
+
+#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
+	addic.	%0,%0,-1\n\
+	blt-	2f\n\
+	stdcx.	%0,0,%1\n\
+	bne-	1b"
+	PPC_ATOMIC_EXIT_BARRIER
+	"\n\
+2:"	: "=&r" (t)
+	: "r" (&v->counter)
+	: "cc", "xer", "memory");
+
+	return t;
+}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+
+#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_cmpxchg_relaxed(v, o, n) \
+	cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define atomic64_cmpxchg_acquire(v, o, n) \
+	cmpxchg_acquire(&((v)->counter), (o), (n))
+
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+
+/**
+ * atomic64_fetch_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
+{
+	long t;
+
+	__asm__ __volatile__ (
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
+	cmpd	0,%0,%3 \n\
+	beq	2f \n\
+	add	%0,%2,%0 \n"
+"	stdcx.	%0,0,%1 \n\
+	bne-	1b \n"
+	PPC_ATOMIC_EXIT_BARRIER
+"	subf	%0,%2,%0 \n\
+2:"
+	: "=&r" (t)
+	: "r" (&v->counter), "r" (a), "r" (u)
+	: "cc", "memory");
+
+	return t;
+}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+
+/**
+ * atomic_inc64_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+{
+	long t1, t2;
+
+	__asm__ __volatile__ (
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
+	cmpdi	0,%0,0\n\
+	beq-	2f\n\
+	addic	%1,%0,1\n\
+	stdcx.	%1,0,%2\n\
+	bne-	1b\n"
+	PPC_ATOMIC_EXIT_BARRIER
+	"\n\
+2:"
+	: "=&r" (t1), "=&r" (t2)
+	: "r" (&v->counter)
+	: "cc", "xer", "memory");
+
+	return t1 != 0;
+}
+#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
+
+#endif /* __powerpc64__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/backlight.h b/arch/powerpc/include/asm/backlight.h
new file mode 100644
index 0000000..1b5eab6
--- /dev/null
+++ b/arch/powerpc/include/asm/backlight.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Routines for handling backlight control on PowerBooks
+ *
+ * For now, implementation resides in
+ * arch/powerpc/platforms/powermac/backlight.c
+ *
+ */
+#ifndef __ASM_POWERPC_BACKLIGHT_H
+#define __ASM_POWERPC_BACKLIGHT_H
+#ifdef __KERNEL__
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+
+/* For locking instructions, see the implementation file */
+extern struct backlight_device *pmac_backlight;
+extern struct mutex pmac_backlight_mutex;
+
+extern int pmac_backlight_curve_lookup(struct fb_info *info, int value);
+
+extern int pmac_has_backlight_type(const char *type);
+
+extern void pmac_backlight_key(int direction);
+static inline void pmac_backlight_key_up(void)
+{
+	pmac_backlight_key(0);
+}
+static inline void pmac_backlight_key_down(void)
+{
+	pmac_backlight_key(1);
+}
+
+extern void pmac_backlight_set_legacy_brightness_pmu(int brightness);
+extern int pmac_backlight_set_legacy_brightness(int brightness);
+extern int pmac_backlight_get_legacy_brightness(void);
+
+extern void pmac_backlight_enable(void);
+extern void pmac_backlight_disable(void);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
new file mode 100644
index 0000000..fbe8df4
--- /dev/null
+++ b/arch/powerpc/include/asm/barrier.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_BARRIER_H
+#define _ASM_POWERPC_BARRIER_H
+
+#include <asm/asm-const.h>
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory).  The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ *	across this point (nop on PPC).
+ *
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
+ *
+ * For the smp_ barriers, ordering is for cacheable memory operations
+ * only. We have to use the sync instruction for smp_mb(), since lwsync
+ * doesn't order loads with respect to previous stores.  Lwsync can be
+ * used for smp_rmb() and smp_wmb().
+ *
+ * However, on CPUs that don't support lwsync, lwsync actually maps to a
+ * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
+ */
+#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+
+/* The sub-arch has lwsync */
+#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+#    define SMPWMB      LWSYNC
+#else
+#    define SMPWMB      eieio
+#endif
+
+#define __lwsync()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define dma_rmb()	__lwsync()
+#define dma_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+
+#define __smp_lwsync()	__lwsync()
+
+#define __smp_mb()	mb()
+#define __smp_rmb()	__lwsync()
+#define __smp_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+
+/*
+ * This is a barrier which prevents following instructions from being
+ * started until the value of the argument x is known.  For example, if
+ * x is a variable loaded from memory, this prevents following
+ * instructions from being executed until the load has been performed.
+ */
+#define data_barrier(x)	\
+	asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+
+#define __smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	__smp_lwsync();							\
+	WRITE_ONCE(*p, v);						\
+} while (0)
+
+#define __smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	__smp_lwsync();							\
+	___p1;								\
+})
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define NOSPEC_BARRIER_SLOT   nop
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#define NOSPEC_BARRIER_SLOT   nop; nop
+#endif
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+/*
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+ */
+#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
+
+// This also acts as a compiler barrier due to the memory clobber.
+#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+
+#else /* !CONFIG_PPC_BARRIER_NOSPEC */
+#define barrier_nospec_asm
+#define barrier_nospec()
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+#include <asm-generic/barrier.h>
+
+#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
new file mode 100644
index 0000000..ff71566
--- /dev/null
+++ b/arch/powerpc/include/asm/bitops.h
@@ -0,0 +1,257 @@
+/*
+ * PowerPC atomic bit operations.
+ *
+ * Merged version by David Gibson <david@gibson.dropbear.id.au>.
+ * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
+ * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard.  They
+ * originally took it from the ppc32 code.
+ *
+ * Within a word, bits are numbered LSB first.  Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for a
+ * ppc64 system the bits end up numbered:
+ *   |63..............0|127............64|191...........128|255...........192|
+ * and on ppc32:
+ *   |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ *   |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_BITOPS_H
+#define _ASM_POWERPC_BITOPS_H
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <asm/asm-compat.h>
+#include <asm/synch.h>
+#include <asm/asm-405.h>
+
+/* PPC bit number conversion */
+#define PPC_BITLSHIFT(be)	(BITS_PER_LONG - 1 - (be))
+#define PPC_BIT(bit)		(1UL << PPC_BITLSHIFT(bit))
+#define PPC_BITMASK(bs, be)	((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+
+/* Put a PPC bit into a "normal" bit position */
+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit)			\
+	((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+
+#define PPC_BITLSHIFT32(be)	(32 - 1 - (be))
+#define PPC_BIT32(bit)		(1UL << PPC_BITLSHIFT32(bit))
+#define PPC_BITMASK32(bs, be)	((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
+
+#define PPC_BITLSHIFT8(be)	(8 - 1 - (be))
+#define PPC_BIT8(bit)		(1UL << PPC_BITLSHIFT8(bit))
+#define PPC_BITMASK8(bs, be)	((PPC_BIT8(bs) - PPC_BIT8(be))|PPC_BIT8(bs))
+
+#include <asm/barrier.h>
+
+/* Macro for generating the ***_bits() functions */
+#define DEFINE_BITOP(fn, op, prefix)		\
+static __inline__ void fn(unsigned long mask,	\
+		volatile unsigned long *_p)	\
+{						\
+	unsigned long old;			\
+	unsigned long *p = (unsigned long *)_p;	\
+	__asm__ __volatile__ (			\
+	prefix					\
+"1:"	PPC_LLARX(%0,0,%3,0) "\n"		\
+	stringify_in_c(op) "%0,%0,%2\n"		\
+	PPC405_ERR77(0,%3)			\
+	PPC_STLCX "%0,0,%3\n"			\
+	"bne- 1b\n"				\
+	: "=&r" (old), "+m" (*p)		\
+	: "r" (mask), "r" (p)			\
+	: "cc", "memory");			\
+}
+
+DEFINE_BITOP(set_bits, or, "")
+DEFINE_BITOP(clear_bits, andc, "")
+DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
+DEFINE_BITOP(change_bits, xor, "")
+
+static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+{
+	set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
+{
+	clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+	clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+static __inline__ void change_bit(int nr, volatile unsigned long *addr)
+{
+	change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
+}
+
+/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
+ * operands. */
+#define DEFINE_TESTOP(fn, op, prefix, postfix, eh)	\
+static __inline__ unsigned long fn(			\
+		unsigned long mask,			\
+		volatile unsigned long *_p)		\
+{							\
+	unsigned long old, t;				\
+	unsigned long *p = (unsigned long *)_p;		\
+	__asm__ __volatile__ (				\
+	prefix						\
+"1:"	PPC_LLARX(%0,0,%3,eh) "\n"			\
+	stringify_in_c(op) "%1,%0,%2\n"			\
+	PPC405_ERR77(0,%3)				\
+	PPC_STLCX "%1,0,%3\n"				\
+	"bne- 1b\n"					\
+	postfix						\
+	: "=&r" (old), "=&r" (t)			\
+	: "r" (mask), "r" (p)				\
+	: "cc", "memory");				\
+	return (old & mask);				\
+}
+
+DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
+	      PPC_ATOMIC_EXIT_BARRIER, 0)
+DEFINE_TESTOP(test_and_set_bits_lock, or, "",
+	      PPC_ACQUIRE_BARRIER, 1)
+DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
+	      PPC_ATOMIC_EXIT_BARRIER, 0)
+DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
+	      PPC_ATOMIC_EXIT_BARRIER, 0)
+
+static __inline__ int test_and_set_bit(unsigned long nr,
+				       volatile unsigned long *addr)
+{
+	return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
+}
+
+static __inline__ int test_and_set_bit_lock(unsigned long nr,
+				       volatile unsigned long *addr)
+{
+	return test_and_set_bits_lock(BIT_MASK(nr),
+				addr + BIT_WORD(nr)) != 0;
+}
+
+static __inline__ int test_and_clear_bit(unsigned long nr,
+					 volatile unsigned long *addr)
+{
+	return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
+}
+
+static __inline__ int test_and_change_bit(unsigned long nr,
+					  volatile unsigned long *addr)
+{
+	return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
+}
+
+#ifdef CONFIG_PPC64
+static __inline__ unsigned long clear_bit_unlock_return_word(int nr,
+						volatile unsigned long *addr)
+{
+	unsigned long old, t;
+	unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
+	unsigned long mask = BIT_MASK(nr);
+
+	__asm__ __volatile__ (
+	PPC_RELEASE_BARRIER
+"1:"	PPC_LLARX(%0,0,%3,0) "\n"
+	"andc %1,%0,%2\n"
+	PPC405_ERR77(0,%3)
+	PPC_STLCX "%1,0,%3\n"
+	"bne- 1b\n"
+	: "=&r" (old), "=&r" (t)
+	: "r" (mask), "r" (p)
+	: "cc", "memory");
+
+	return old;
+}
+
+/* This is a special function for mm/filemap.c */
+#define clear_bit_unlock_is_negative_byte(nr, addr)			\
+	(clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters))
+
+#endif /* CONFIG_PPC64 */
+
+#include <asm-generic/bitops/non-atomic.h>
+
+static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+	__asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory");
+	__clear_bit(nr, addr);
+}
+
+/*
+ * Return the zero-based bit position (LE, not IBM bit numbering) of
+ * the most significant 1-bit in a double word.
+ */
+#define __ilog2(x)	ilog2(x)
+
+#include <asm-generic/bitops/ffz.h>
+
+#include <asm-generic/bitops/builtin-__ffs.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+
+/*
+ * fls: find last (most-significant) bit set.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __inline__ int fls(unsigned int x)
+{
+	return 32 - __builtin_clz(x);
+}
+
+#include <asm-generic/bitops/builtin-__fls.h>
+
+static __inline__ int fls64(__u64 x)
+{
+	return 64 - __builtin_clzll(x);
+}
+
+#ifdef CONFIG_PPC64
+unsigned int __arch_hweight8(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+#include <asm-generic/bitops/const_hweight.h>
+#else
+#include <asm-generic/bitops/hweight.h>
+#endif
+
+#include <asm-generic/bitops/find.h>
+
+/* Little-endian versions */
+#include <asm-generic/bitops/le.h>
+
+/* Bitmap functions for the ext2 filesystem */
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
new file mode 100644
index 0000000..f2892c7
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/hash.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
+#define _ASM_POWERPC_BOOK3S_32_HASH_H
+#ifdef __KERNEL__
+
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings.  We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code.  Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT	0x001	/* software: pte contains a translation */
+#define _PAGE_HASHPTE	0x002	/* hash_page has made an HPTE for this pte */
+#define _PAGE_USER	0x004	/* usermode access allowed */
+#define _PAGE_GUARDED	0x008	/* G: prohibit speculative access */
+#define _PAGE_COHERENT	0x010	/* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE	0x020	/* I: cache inhibit */
+#define _PAGE_WRITETHRU	0x040	/* W: cache write-through */
+#define _PAGE_DIRTY	0x080	/* C: page changed */
+#define _PAGE_ACCESSED	0x100	/* R: page referenced */
+#define _PAGE_RW	0x400	/* software: user write access allowed */
+#define _PAGE_SPECIAL	0x800	/* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK	(0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK	_PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT	0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD	(~PAGE_MASK)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
new file mode 100644
index 0000000..e38c913
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
+#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
+/*
+ * 32-bit hash table MMU support
+ */
+
+/*
+ * BATs
+ */
+
+/* Block size masks */
+#define BL_128K	0x000
+#define BL_256K 0x001
+#define BL_512K 0x003
+#define BL_1M   0x007
+#define BL_2M   0x00F
+#define BL_4M   0x01F
+#define BL_8M   0x03F
+#define BL_16M  0x07F
+#define BL_32M  0x0FF
+#define BL_64M  0x1FF
+#define BL_128M 0x3FF
+#define BL_256M 0x7FF
+
+/* BAT Access Protection */
+#define BPP_XX	0x00		/* No access */
+#define BPP_RX	0x01		/* Read only */
+#define BPP_RW	0x02		/* Read/write */
+
+#ifndef __ASSEMBLY__
+/* Contort a phys_addr_t into the right format/bits for a BAT */
+#ifdef CONFIG_PHYS_64BIT
+#define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
+				((x & 0x0000000e00000000ULL) >> 24) | \
+				((x & 0x0000000100000000ULL) >> 30)))
+#else
+#define BAT_PHYS_ADDR(x) (x)
+#endif
+
+struct ppc_bat {
+	u32 batu;
+	u32 batl;
+};
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Hash table
+ */
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#define PP_RWXX	0	/* Supervisor read/write, User none */
+#define PP_RWRX 1	/* Supervisor read/write, User read */
+#define PP_RWRW 2	/* Supervisor read/write, User read/write */
+#define PP_RXRX 3	/* Supervisor read,       User read */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Hardware Page Table Entry
+ * Note that the xpn and x bitfields are used only by processors that
+ * support extended addressing; otherwise, those bits are reserved.
+ */
+struct hash_pte {
+	unsigned long v:1;	/* Entry is valid */
+	unsigned long vsid:24;	/* Virtual segment identifier */
+	unsigned long h:1;	/* Hash algorithm indicator */
+	unsigned long api:6;	/* Abbreviated page index */
+	unsigned long rpn:20;	/* Real (physical) page number */
+	unsigned long xpn:3;	/* Real page number bits 0-2, optional */
+	unsigned long r:1;	/* Referenced */
+	unsigned long c:1;	/* Changed */
+	unsigned long w:1;	/* Write-thru cache mode */
+	unsigned long i:1;	/* Cache inhibited */
+	unsigned long m:1;	/* Memory coherence */
+	unsigned long g:1;	/* Guarded */
+	unsigned long x:1;	/* Real page number bit 3, optional */
+	unsigned long pp:2;	/* Page protection */
+};
+
+typedef struct {
+	unsigned long id;
+	unsigned long vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+/* We happily ignore the smaller BATs on 601, we don't actually use
+ * those definitions on hash32 at the moment anyway
+ */
+#define mmu_virtual_psize	MMU_PAGE_4K
+#define mmu_linear_psize	MMU_PAGE_256M
+
+#endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
new file mode 100644
index 0000000..82e44b1
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_PGALLOC_H
+#define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
+
+#include <linux/threads.h>
+#include <linux/slab.h>
+
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation.  For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer.  In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value.  This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE	0xf
+
+extern void __bad_pte(pmd_t *pmd);
+
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({				\
+			BUG_ON(!(shift));		\
+			pgtable_cache[(shift) - 1];	\
+		})
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+			pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+/* #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); }) */
+#define pmd_free(mm, x) 		do { } while (0)
+#define __pmd_free_tlb(tlb,x,a)		do { } while (0)
+/* #define pgd_populate(mm, pmd, pte)      BUG() */
+
+#ifndef CONFIG_BOOKE
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+				       pte_t *pte)
+{
+	*pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+				pgtable_t pte_page)
+{
+	*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#else
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+				       pte_t *pte)
+{
+	*pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+				pgtable_t pte_page)
+{
+	*pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+	pgtable_page_dtor(ptepage);
+	__free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+	if (!index_size) {
+		pgtable_page_dtor(virt_to_page(table));
+		free_page((unsigned long)table);
+	} else {
+		BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+		kmem_cache_free(PGT_CACHE(index_size), table);
+	}
+}
+
+#define check_pgt_cache()	do { } while (0)
+#define get_hugepd_cache_index(x)  (x)
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+				    void *table, int shift)
+{
+	unsigned long pgf = (unsigned long)table;
+	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+	pgf |= shift;
+	tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+	pgtable_free(table, shift);
+}
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+				    void *table, int shift)
+{
+	pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+				  unsigned long address)
+{
+	pgtable_free_tlb(tlb, page_address(table), 0);
+}
+#endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
new file mode 100644
index 0000000..751cf93
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -0,0 +1,506 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+
+#define __ARCH_USE_5LEVEL_HACK
+#include <asm-generic/pgtable-nopmd.h>
+
+#include <asm/book3s/32/hash.h>
+
+/* And here we include common definitions */
+#include <asm/pte-common.h>
+
+#define PTE_INDEX_SIZE	PTE_SHIFT
+#define PMD_INDEX_SIZE	0
+#define PUD_INDEX_SIZE	0
+#define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE	0
+#define PUD_TABLE_SIZE	0
+#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif	/* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table.  The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+/*
+ * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
+ * value (for now) on others, from where we can start layout kernel
+ * virtual space that goes below PKMAP and FIXMAP
+ */
+#ifdef CONFIG_HIGHMEM
+#define KVIRT_TOP	PKMAP_BASE
+#else
+#define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
+#endif
+
+/*
+ * ioremap_bot starts at that address. Early ioremaps move down from there,
+ * until mem_init() at which point this becomes the top of the vmalloc
+ * and ioremap space
+ */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#else
+#define IOREMAP_TOP	KVIRT_TOP
+#endif
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 16MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START).  For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system.  This really does become a problem for machines with good amounts
+ * of RAM.  -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#define VMALLOC_END	ioremap_bot
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+extern unsigned long ioremap_bot;
+
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS		0
+
+#define pte_ERROR(e) \
+	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+		(unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+/*
+ * Bits in a linux-style PTE.  These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#define pte_clear(mm, addr, ptep) \
+	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
+
+#define pmd_none(pmd)		(!pmd_val(pmd))
+#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
+#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	*pmdp = __pmd(0);
+}
+
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+			    unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+			  unsigned long pmdval);
+
+/* Flush an entry from the TLB/hash table */
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+			     unsigned long address);
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ */
+#ifndef CONFIG_PTE_64BIT
+static inline unsigned long pte_update(pte_t *p,
+				       unsigned long clr,
+				       unsigned long set)
+{
+	unsigned long old, tmp;
+
+	__asm__ __volatile__("\
+1:	lwarx	%0,0,%3\n\
+	andc	%1,%0,%4\n\
+	or	%1,%1,%5\n"
+"	stwcx.	%1,0,%3\n\
+	bne-	1b"
+	: "=&r" (old), "=&r" (tmp), "=m" (*p)
+	: "r" (p), "r" (clr), "r" (set), "m" (*p)
+	: "cc" );
+
+	return old;
+}
+#else /* CONFIG_PTE_64BIT */
+static inline unsigned long long pte_update(pte_t *p,
+					    unsigned long clr,
+					    unsigned long set)
+{
+	unsigned long long old;
+	unsigned long tmp;
+
+	__asm__ __volatile__("\
+1:	lwarx	%L0,0,%4\n\
+	lwzx	%0,0,%3\n\
+	andc	%1,%L0,%5\n\
+	or	%1,%1,%6\n"
+"	stwcx.	%1,0,%4\n\
+	bne-	1b"
+	: "=&r" (old), "=&r" (tmp), "=m" (*p)
+	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+	: "cc" );
+
+	return old;
+}
+#endif /* CONFIG_PTE_64BIT */
+
+/*
+ * 2.6 calls this without flushing the TLB entry; this is wrong
+ * for our hash-based implementation, we fix that up here.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+{
+	unsigned long old;
+	old = pte_update(ptep, _PAGE_ACCESSED, 0);
+	if (old & _PAGE_HASHPTE) {
+		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+		flush_hash_pages(context, addr, ptephys, 1);
+	}
+	return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+				       pte_t *ptep)
+{
+	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+				      pte_t *ptep)
+{
+	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
+}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+					   unsigned long addr, pte_t *ptep)
+{
+	ptep_set_wrprotect(mm, addr, ptep);
+}
+
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   int psize)
+{
+	unsigned long set = pte_val(entry) &
+		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+	unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+
+	pte_update(ptep, clr, set);
+
+	flush_tlb_page(vma, address);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page.  The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler).  On everything else the pmd contains the physical address
+ * of the pte page.  -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_page_vaddr(pmd)	\
+	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)		\
+	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#else
+#define pmd_page_vaddr(pmd)	\
+	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)		\
+	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#endif
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address)		\
+	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr)	\
+	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir, addr)		\
+	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte)		kunmap_atomic(pte)
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
+ *   -- paulus
+ */
+#define __swp_type(entry)		((entry).val & 0x1f)
+#define __swp_offset(entry)		((entry).val >> 5)
+#define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
+
+int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_read(pte_t pte)		{ return 1; }
+static inline int pte_dirty(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DIRTY); }
+static inline int pte_young(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_ACCESSED); }
+static inline int pte_special(pte_t pte)	{ return !!(pte_val(pte) & _PAGE_SPECIAL); }
+static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+static inline int pte_present(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/*
+ * We only find page table entry in the last level
+ * Hence no need for other accessors
+ */
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+	unsigned long pteval = pte_val(pte);
+	/*
+	 * A read-only access is controlled by _PAGE_USER bit.
+	 * We have _PAGE_READ set for WRITE and EXECUTE
+	 */
+	unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
+
+	if (write)
+		need_pte_bits |= _PAGE_WRITE;
+
+	if ((pteval & need_pte_bits) != need_pte_bits)
+		return false;
+
+	return true;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+		     pgprot_val(pgprot));
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+	return pte_val(pte) >> PTE_RPN_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+	return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, pte_t pte, int percpu)
+{
+#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
+	 * helper pte_update() which does an atomic update. We need to do that
+	 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+	 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
+	 * the hash bits instead (ie, same as the non-SMP case)
+	 */
+	if (percpu)
+		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+			      | (pte_val(pte) & ~_PAGE_HASHPTE));
+	else
+		pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+
+#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+	/* Second case is 32-bit with 64-bit PTE.  In this case, we
+	 * can just store as long as we do the two halves in the right order
+	 * with a barrier in between. This is possible because we take care,
+	 * in the hash code, to pre-invalidate if the PTE was already hashed,
+	 * which synchronizes us with any concurrent invalidation.
+	 * In the percpu case, we also fallback to the simple update preserving
+	 * the hash bits
+	 */
+	if (percpu) {
+		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+			      | (pte_val(pte) & ~_PAGE_HASHPTE));
+		return;
+	}
+	if (pte_val(*ptep) & _PAGE_HASHPTE)
+		flush_hash_entry(mm, ptep, addr);
+	__asm__ __volatile__("\
+		stw%U0%X0 %2,%0\n\
+		eieio\n\
+		stw%U0%X0 %L2,%1"
+	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+	: "r" (pte) : "memory");
+
+#elif defined(CONFIG_PPC_STD_MMU_32)
+	/* Third case is 32-bit hash table in UP mode, we need to preserve
+	 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+	 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+	 * and see we need to keep track that this PTE needs invalidating
+	 */
+	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+		      | (pte_val(pte) & ~_PAGE_HASHPTE));
+
+#else
+#error "Not supported "
+#endif
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+			 _PAGE_WRITETHRU)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+			_PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+			_PAGE_NO_CACHE);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+			_PAGE_COHERENT);
+}
+
+#define pgprot_cached_wthru pgprot_cached_wthru
+static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
+{
+	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+			_PAGE_COHERENT | _PAGE_WRITETHRU);
+}
+
+#define pgprot_cached_noncoherent pgprot_cached_noncoherent
+static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
+{
+	return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+	return pgprot_noncached_wc(prot);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
new file mode 100644
index 0000000..068085b
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+
+#define MMU_NO_CONTEXT      (0)
+/*
+ * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ */
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			    unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+					unsigned long vmaddr)
+{
+	flush_tlb_page(vma, vmaddr);
+}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+	flush_tlb_mm(mm);
+}
+
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
new file mode 100644
index 0000000..9a37986
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+/*
+ * Entries per page directory level.  The PTE level must use a 64b record
+ * for each page table entry.  The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define H_PTE_INDEX_SIZE  9
+#define H_PMD_INDEX_SIZE  7
+#define H_PUD_INDEX_SIZE  9
+#define H_PGD_INDEX_SIZE  9
+
+/*
+ * Each context is 512TB. But on 4k we restrict our max TASK size to 64TB
+ * Hence also limit max EA bits to 64TB.
+ */
+#define MAX_EA_BITS_PER_CONTEXT		46
+
+#ifndef __ASSEMBLY__
+#define H_PTE_TABLE_SIZE	(sizeof(pte_t) << H_PTE_INDEX_SIZE)
+#define H_PMD_TABLE_SIZE	(sizeof(pmd_t) << H_PMD_INDEX_SIZE)
+#define H_PUD_TABLE_SIZE	(sizeof(pud_t) << H_PUD_INDEX_SIZE)
+#define H_PGD_TABLE_SIZE	(sizeof(pgd_t) << H_PGD_INDEX_SIZE)
+
+#define H_PAGE_F_GIX_SHIFT	53
+#define H_PAGE_F_SECOND	_RPAGE_RPN44	/* HPTE is in 2ndary HPTEG */
+#define H_PAGE_F_GIX	(_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41)
+#define H_PAGE_BUSY	_RPAGE_RSV1     /* software: PTE & hash are busy */
+#define H_PAGE_HASHPTE	_RPAGE_RSV2     /* software: PTE & hash are busy */
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
+			 H_PAGE_F_SECOND | H_PAGE_F_GIX)
+/*
+ * Not supported by 4k linux page size
+ */
+#define H_PAGE_4K_PFN	0x0
+#define H_PAGE_THP_HUGE 0x0
+#define H_PAGE_COMBO	0x0
+
+/* 8 bytes per each pte entry */
+#define H_PTE_FRAG_SIZE_SHIFT  (H_PTE_INDEX_SIZE + 3)
+#define H_PTE_FRAG_NR	(PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
+#define H_PMD_FRAG_SIZE_SHIFT  (H_PMD_INDEX_SIZE + 3)
+#define H_PMD_FRAG_NR	(PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
+
+/* memory key bits, only 8 keys supported */
+#define H_PTE_PKEY_BIT0	0
+#define H_PTE_PKEY_BIT1	0
+#define H_PTE_PKEY_BIT2	_RPAGE_RSV3
+#define H_PTE_PKEY_BIT3	_RPAGE_RSV4
+#define H_PTE_PKEY_BIT4	_RPAGE_RSV5
+
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
+ */
+#define remap_4k_pfn(vma, addr, pfn, prot)	\
+	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int hash__hugepd_ok(hugepd_t hpd)
+{
+	unsigned long hpdval = hpd_val(hpd);
+	/*
+	 * if it is not a pte and have hugepd shift mask
+	 * set, then it is a hugepd directory pointer
+	 */
+	if (!(hpdval & _PAGE_PTE) &&
+	    ((hpdval & HUGEPD_SHIFT_MASK) != 0))
+		return true;
+	return false;
+}
+#endif
+
+/*
+ * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
+ * a matter of returning the PTE bits that need to be modified. On 64K PTE,
+ * things are a little more involved and hence needs many more parameters to
+ * accomplish the same. However we want to abstract this out from the caller by
+ * keeping the prototype consistent across the two formats.
+ */
+static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
+					 unsigned int subpg_index, unsigned long hidx,
+					 int offset)
+{
+	return (hidx << H_PAGE_F_GIX_SHIFT) &
+		(H_PAGE_F_SECOND | H_PAGE_F_GIX);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+static inline char *get_hpte_slot_array(pmd_t *pmdp)
+{
+	BUG();
+	return NULL;
+}
+
+static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
+{
+	BUG();
+	return 0;
+}
+
+static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
+					   int index)
+{
+	BUG();
+	return 0;
+}
+
+static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
+					unsigned int index, unsigned int hidx)
+{
+	BUG();
+}
+
+static inline int hash__pmd_trans_huge(pmd_t pmd)
+{
+	return 0;
+}
+
+static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	BUG();
+	return 0;
+}
+
+static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
+{
+	BUG();
+	return pmd;
+}
+
+extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
+					   unsigned long addr, pmd_t *pmdp,
+					   unsigned long clr, unsigned long set);
+extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
+				   unsigned long address, pmd_t *pmdp);
+extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+					 pgtable_t pgtable);
+extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
+				       unsigned long addr, pmd_t *pmdp);
+extern int hash__has_transparent_hugepage(void);
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
new file mode 100644
index 0000000..f82ee8a
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+
+#define H_PTE_INDEX_SIZE  8
+#define H_PMD_INDEX_SIZE  10
+#define H_PUD_INDEX_SIZE  10
+#define H_PGD_INDEX_SIZE  8
+
+/*
+ * Each context is 512TB size. SLB miss for first context/default context
+ * is handled in the hotpath.
+ */
+#define MAX_EA_BITS_PER_CONTEXT		49
+
+/*
+ * 64k aligned address free up few of the lower bits of RPN for us
+ * We steal that here. For more deatils look at pte_pfn/pfn_pte()
+ */
+#define H_PAGE_COMBO	_RPAGE_RPN0 /* this is a combo 4k page */
+#define H_PAGE_4K_PFN	_RPAGE_RPN1 /* PFN is for a single 4k page */
+#define H_PAGE_BUSY	_RPAGE_RPN44     /* software: PTE & hash are busy */
+#define H_PAGE_HASHPTE	_RPAGE_RPN43	/* PTE has associated HPTE */
+
+/* memory key bits. */
+#define H_PTE_PKEY_BIT0	_RPAGE_RSV1
+#define H_PTE_PKEY_BIT1	_RPAGE_RSV2
+#define H_PTE_PKEY_BIT2	_RPAGE_RSV3
+#define H_PTE_PKEY_BIT3	_RPAGE_RSV4
+#define H_PTE_PKEY_BIT4	_RPAGE_RSV5
+
+/*
+ * We need to differentiate between explicit huge page and THP huge
+ * page, since THP huge page also need to track real subpage details
+ */
+#define H_PAGE_THP_HUGE  H_PAGE_4K_PFN
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO)
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ * 8 bytes per each pte entry and another 8 bytes for storing
+ * slot details.
+ */
+#define H_PTE_FRAG_SIZE_SHIFT  (H_PTE_INDEX_SIZE + 3 + 1)
+#define H_PTE_FRAG_NR	(PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+#define H_PMD_FRAG_SIZE_SHIFT  (H_PMD_INDEX_SIZE + 3 + 1)
+#else
+#define H_PMD_FRAG_SIZE_SHIFT  (H_PMD_INDEX_SIZE + 3)
+#endif
+#define H_PMD_FRAG_NR	(PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
+
+#ifndef __ASSEMBLY__
+#include <asm/errno.h>
+
+/*
+ * With 64K pages on hash table, we have a special PTE format that
+ * uses a second "half" of the page table to encode sub-page information
+ * in order to deal with 64K made of 4K HW pages. Thus we override the
+ * generic accessors and iterators here
+ */
+#define __real_pte __real_pte
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
+{
+	real_pte_t rpte;
+	unsigned long *hidxp;
+
+	rpte.pte = pte;
+
+	/*
+	 * Ensure that we do not read the hidx before we read the PTE. Because
+	 * the writer side is expected to finish writing the hidx first followed
+	 * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that.
+	 */
+	smp_rmb();
+
+	hidxp = (unsigned long *)(ptep + offset);
+	rpte.hidx = *hidxp;
+	return rpte;
+}
+
+/*
+ * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented
+ * as 1, 1 as 2,... , and 0xf as 0.  This convention lets us represent a
+ * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when
+ * allocated. We dont have to zero them gain; thus save on the initialization.
+ */
+#define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */
+#define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL)   /* shift forward by one */
+#define HIDX_BITS(x, index)  (x << (index << 2))
+#define BITS_TO_HIDX(x, index)  ((x >> (index << 2)) & 0xfUL)
+#define INVALID_RPTE_HIDX  0x0UL
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+	return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index));
+}
+
+/*
+ * Commit the hidx and return PTE bits that needs to be modified. The caller is
+ * expected to modify the PTE bits accordingly and commit the PTE to memory.
+ */
+static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
+					 unsigned int subpg_index,
+					 unsigned long hidx, int offset)
+{
+	unsigned long *hidxp = (unsigned long *)(ptep + offset);
+
+	rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
+	*hidxp = rpte.hidx  | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
+
+	/*
+	 * Anyone reading PTE must ensure hidx bits are read after reading the
+	 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be
+	 * used for that.
+	 */
+	smp_wmb();
+
+	/* No PTE bits to be modified, return 0x0UL */
+	return 0x0UL;
+}
+
+#define __rpte_to_pte(r)	((r).pte)
+extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
+/*
+ * Trick: we set __end to va + 64k, which happens works for
+ * a 16M page as well as we want only one iteration
+ */
+#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)	\
+	do {								\
+		unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));	\
+		unsigned __split = (psize == MMU_PAGE_4K ||		\
+				    psize == MMU_PAGE_64K_AP);		\
+		shift = mmu_psize_defs[psize].shift;			\
+		for (index = 0; vpn < __end; index++,			\
+			     vpn += (1L << (shift - VPN_SHIFT))) {	\
+		if (!__split || __rpte_sub_valid(rpte, index))
+
+#define pte_iterate_hashed_end()  } } while(0)
+
+#define pte_pagesize_index(mm, addr, pte)	\
+	(((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
+
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+			   unsigned long pfn, unsigned long size, pgprot_t);
+static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
+				 unsigned long pfn, pgprot_t prot)
+{
+	if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
+		WARN(1, "remap_4k_pfn called with wrong pfn value\n");
+		return -EINVAL;
+	}
+	return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
+			       __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN));
+}
+
+#define H_PTE_TABLE_SIZE	PTE_FRAG_SIZE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
+#define H_PMD_TABLE_SIZE	((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
+				 (sizeof(unsigned long) << PMD_INDEX_SIZE))
+#else
+#define H_PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+#define H_PUD_TABLE_SIZE	((sizeof(pud_t) << PUD_INDEX_SIZE) +	\
+				 (sizeof(unsigned long) << PUD_INDEX_SIZE))
+#else
+#define H_PUD_TABLE_SIZE	(sizeof(pud_t) << PUD_INDEX_SIZE)
+#endif
+#define H_PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline char *get_hpte_slot_array(pmd_t *pmdp)
+{
+	/*
+	 * The hpte hindex is stored in the pgtable whose address is in the
+	 * second half of the PMD
+	 *
+	 * Order this load with the test for pmd_trans_huge in the caller
+	 */
+	smp_rmb();
+	return *(char **)(pmdp + PTRS_PER_PMD);
+
+
+}
+/*
+ * The linux hugepage PMD now include the pmd entries followed by the address
+ * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
+ * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
+ * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
+ * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
+ *
+ * The top three bits are intentionally left as zero. This memory location
+ * are also used as normal page PTE pointers. So if we have any pointers
+ * left around while we collapse a hugepage, we need to make sure
+ * _PAGE_PRESENT bit of that is zero when we look at them
+ */
+static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
+{
+	return hpte_slot_array[index] & 0x1;
+}
+
+static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
+					   int index)
+{
+	return hpte_slot_array[index] >> 1;
+}
+
+static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
+					unsigned int index, unsigned int hidx)
+{
+	hpte_slot_array[index] = (hidx << 1) | 0x1;
+}
+
+/*
+ *
+ * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
+ * page. The hugetlbfs page table walking and mangling paths are totally
+ * separated form the core VM paths and they're differentiated by
+ *  VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
+ *
+ * pmd_trans_huge() is defined as false at build time if
+ * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
+ * time in such case.
+ *
+ * For ppc64 we need to differntiate from explicit hugepages from THP, because
+ * for THP we also track the subpage details at the pmd level. We don't do
+ * that for explicit huge pages.
+ *
+ */
+static inline int hash__pmd_trans_huge(pmd_t pmd)
+{
+	return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
+		  (_PAGE_PTE | H_PAGE_THP_HUGE));
+}
+
+static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
+}
+
+static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
+{
+	return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
+}
+
+extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
+					   unsigned long addr, pmd_t *pmdp,
+					   unsigned long clr, unsigned long set);
+extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
+				   unsigned long address, pmd_t *pmdp);
+extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+					 pgtable_t pgtable);
+extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
+				       unsigned long addr, pmd_t *pmdp);
+extern int hash__has_transparent_hugepage(void);
+#endif /*  CONFIG_TRANSPARENT_HUGEPAGE */
+#endif	/* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
new file mode 100644
index 0000000..d52a51b
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_H
+#ifdef __KERNEL__
+
+#include <asm/asm-const.h>
+
+/*
+ * Common bits between 4K and 64K pages in a linux-style PTE.
+ * Additional bits may be defined in pgtable-hash64-*.h
+ *
+ */
+#define H_PTE_NONE_MASK		_PAGE_HPTEFLAGS
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/hash-64k.h>
+#else
+#include <asm/book3s/64/hash-4k.h>
+#endif
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
+				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
+#define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
+
+/*
+ * We store the slot details in the second half of page table.
+ * Increase the pud level table so that hugetlb ptes can be stored
+ * at pud level.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
+#define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE + 1)
+#else
+#define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE)
+#endif
+/*
+ * Define the address range of the kernel non-linear virtual area
+ */
+#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
+#define H_KERN_VIRT_SIZE  ASM_CONST(0x0000400000000000) /* 64T */
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies half of it on hash CPUs and a quarter of it on Book3E
+ * (we keep a quarter for the virtual memmap)
+ */
+#define H_VMALLOC_START	H_KERN_VIRT_START
+#define H_VMALLOC_SIZE	ASM_CONST(0x380000000000) /* 56T */
+#define H_VMALLOC_END	(H_VMALLOC_START + H_VMALLOC_SIZE)
+
+#define H_KERN_IO_START	H_VMALLOC_END
+
+/*
+ * Region IDs
+ */
+#define REGION_SHIFT		60UL
+#define REGION_MASK		(0xfUL << REGION_SHIFT)
+#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID	(REGION_ID(H_VMALLOC_START))
+#define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
+#define USER_REGION_ID		(0UL)
+
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs.
+ */
+#define H_VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)
+
+#ifdef CONFIG_PPC_MM_SLICES
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#endif /* CONFIG_PPC_MM_SLICES */
+
+
+/* PTEIDX nibble */
+#define _PTEIDX_SECONDARY	0x8
+#define _PTEIDX_GROUP_IX	0x7
+
+#define H_PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
+#define H_PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
+
+#ifndef __ASSEMBLY__
+#define	hash__pmd_bad(pmd)		(pmd_val(pmd) & H_PMD_BAD_BITS)
+#define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
+static inline int hash__pgd_bad(pgd_t pgd)
+{
+	return (pgd_val(pgd) == 0);
+}
+#ifdef CONFIG_STRICT_KERNEL_RWX
+extern void hash__mark_rodata_ro(void);
+extern void hash__mark_initmem_nx(void);
+#endif
+
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+			    pte_t *ptep, unsigned long pte, int huge);
+extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
+/* Atomic PTE updates */
+static inline unsigned long hash__pte_update(struct mm_struct *mm,
+					 unsigned long addr,
+					 pte_t *ptep, unsigned long clr,
+					 unsigned long set,
+					 int huge)
+{
+	__be64 old_be, tmp_be;
+	unsigned long old;
+
+	__asm__ __volatile__(
+	"1:	ldarx	%0,0,%3		# pte_update\n\
+	and.	%1,%0,%6\n\
+	bne-	1b \n\
+	andc	%1,%0,%4 \n\
+	or	%1,%1,%7\n\
+	stdcx.	%1,0,%3 \n\
+	bne-	1b"
+	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
+	: "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
+	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
+	: "cc" );
+	/* huge pages use the old page table lock */
+	if (!huge)
+		assert_pte_locked(mm, addr);
+
+	old = be64_to_cpu(old_be);
+	if (old & H_PAGE_HASHPTE)
+		hpte_need_flush(mm, addr, ptep, old, huge);
+
+	return old;
+}
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE, this
+ * function doesn't need to flush the hash entry
+ */
+static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+	__be64 old, tmp, val, mask;
+
+	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
+			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);
+
+	val = pte_raw(entry) & mask;
+
+	__asm__ __volatile__(
+	"1:	ldarx	%0,0,%4\n\
+		and.	%1,%0,%6\n\
+		bne-	1b \n\
+		or	%0,%3,%0\n\
+		stdcx.	%0,0,%4\n\
+		bne-	1b"
+	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+	:"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
+	:"cc");
+}
+
+static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
+{
+	return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
+}
+
+static inline int hash__pte_none(pte_t pte)
+{
+	return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
+}
+
+unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
+		int ssize, real_pte_t rpte, unsigned int subpg_index);
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
+				  pte_t *ptep, pte_t pte, int percpu)
+{
+	/*
+	 * Anything else just stores the PTE normally. That covers all 64-bit
+	 * cases, and 32-bit non-hash with 32-bit PTEs.
+	 */
+	*ptep = pte;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+				   pmd_t *pmdp, unsigned long old_pmd);
+#else
+static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
+					  unsigned long addr, pmd_t *pmdp,
+					  unsigned long old_pmd)
+{
+	WARN(1, "%s called with THP disabled\n", __func__);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+
+extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
+			     unsigned long flags);
+extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
+					      unsigned long page_size,
+					      unsigned long phys);
+extern void hash__vmemmap_remove_mapping(unsigned long start,
+				     unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
new file mode 100644
index 0000000..5088838
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+/*
+ * For radix we want generic code to handle hugetlb. But then if we want
+ * both hash and radix to be enabled together we need to workaround the
+ * limitations.
+ */
+void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern unsigned long
+radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+				unsigned long len, unsigned long pgoff,
+				unsigned long flags);
+
+static inline int hstate_get_psize(struct hstate *hstate)
+{
+	unsigned long shift;
+
+	shift = huge_page_shift(hstate);
+	if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+		return MMU_PAGE_2M;
+	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
+		return MMU_PAGE_1G;
+	else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
+		return MMU_PAGE_16M;
+	else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
+		return MMU_PAGE_16G;
+	else {
+		WARN(1, "Wrong huge page shift\n");
+		return mmu_virtual_psize;
+	}
+}
+
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void)
+{
+	return true;
+}
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
new file mode 100644
index 0000000..b3520b5
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -0,0 +1,767 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
+#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
+/*
+ * PowerPC64 memory management structures
+ *
+ * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
+ *   PPC64 rework.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/page.h>
+#include <asm/bug.h>
+#include <asm/asm-const.h>
+
+/*
+ * This is necessary to get the definition of PGTABLE_RANGE which we
+ * need for various slices related matters. Note that this isn't the
+ * complete pgtable.h but only a portion of it.
+ */
+#include <asm/book3s/64/pgtable.h>
+#include <asm/bug.h>
+#include <asm/processor.h>
+#include <asm/cpu_has_feature.h>
+
+/*
+ * SLB
+ */
+
+#define SLB_NUM_BOLTED		3
+#define SLB_CACHE_ENTRIES	8
+#define SLB_MIN_SIZE		32
+
+/* Bits in the SLB ESID word */
+#define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */
+
+/* Bits in the SLB VSID word */
+#define SLB_VSID_SHIFT		12
+#define SLB_VSID_SHIFT_256M	SLB_VSID_SHIFT
+#define SLB_VSID_SHIFT_1T	24
+#define SLB_VSID_SSIZE_SHIFT	62
+#define SLB_VSID_B		ASM_CONST(0xc000000000000000)
+#define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
+#define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
+#define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
+#define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
+#define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
+#define SLB_VSID_L		ASM_CONST(0x0000000000000100)
+#define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
+#define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
+#define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
+#define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
+#define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
+#define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
+#define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)
+
+#define SLB_VSID_KERNEL		(SLB_VSID_KP)
+#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
+
+#define SLBIE_C			(0x08000000)
+#define SLBIE_SSIZE_SHIFT	25
+
+/*
+ * Hash table
+ */
+
+#define HPTES_PER_GROUP 8
+
+#define HPTE_V_SSIZE_SHIFT	62
+#define HPTE_V_AVPN_SHIFT	7
+#define HPTE_V_COMMON_BITS	ASM_CONST(0x000fffffffffffff)
+#define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
+#define HPTE_V_AVPN_3_0		ASM_CONST(0x000fffffffffff80)
+#define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
+#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
+#define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
+#define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
+#define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
+#define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
+#define HPTE_V_VALID		ASM_CONST(0x0000000000000001)
+
+/*
+ * ISA 3.0 has a different HPTE format.
+ */
+#define HPTE_R_3_0_SSIZE_SHIFT	58
+#define HPTE_R_3_0_SSIZE_MASK	(3ull << HPTE_R_3_0_SSIZE_SHIFT)
+#define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
+#define HPTE_R_TS		ASM_CONST(0x4000000000000000)
+#define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
+#define HPTE_R_KEY_BIT0		ASM_CONST(0x2000000000000000)
+#define HPTE_R_KEY_BIT1		ASM_CONST(0x1000000000000000)
+#define HPTE_R_RPN_SHIFT	12
+#define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
+#define HPTE_R_RPN_3_0		ASM_CONST(0x01fffffffffff000)
+#define HPTE_R_PP		ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP		ASM_CONST(0x8000000000000003)
+#define HPTE_R_N		ASM_CONST(0x0000000000000004)
+#define HPTE_R_G		ASM_CONST(0x0000000000000008)
+#define HPTE_R_M		ASM_CONST(0x0000000000000010)
+#define HPTE_R_I		ASM_CONST(0x0000000000000020)
+#define HPTE_R_W		ASM_CONST(0x0000000000000040)
+#define HPTE_R_WIMG		ASM_CONST(0x0000000000000078)
+#define HPTE_R_C		ASM_CONST(0x0000000000000080)
+#define HPTE_R_R		ASM_CONST(0x0000000000000100)
+#define HPTE_R_KEY_LO		ASM_CONST(0x0000000000000e00)
+#define HPTE_R_KEY_BIT2		ASM_CONST(0x0000000000000800)
+#define HPTE_R_KEY_BIT3		ASM_CONST(0x0000000000000400)
+#define HPTE_R_KEY_BIT4		ASM_CONST(0x0000000000000200)
+#define HPTE_R_KEY		(HPTE_R_KEY_LO | HPTE_R_KEY_HI)
+
+#define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
+#define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#define PP_RWXX	0	/* Supervisor read/write, User none */
+#define PP_RWRX 1	/* Supervisor read/write, User read */
+#define PP_RWRW 2	/* Supervisor read/write, User read/write */
+#define PP_RXRX 3	/* Supervisor read,       User read */
+#define PP_RXXX	(HPTE_R_PP0 | 2)	/* Supervisor read, user none */
+
+/* Fields for tlbiel instruction in architecture 2.06 */
+#define TLBIEL_INVAL_SEL_MASK	0xc00	/* invalidation selector */
+#define  TLBIEL_INVAL_PAGE	0x000	/* invalidate a single page */
+#define  TLBIEL_INVAL_SET_LPID	0x800	/* invalidate a set for current LPID */
+#define  TLBIEL_INVAL_SET	0xc00	/* invalidate a set for all LPIDs */
+#define TLBIEL_INVAL_SET_MASK	0xfff000	/* set number to inval. */
+#define TLBIEL_INVAL_SET_SHIFT	12
+
+#define POWER7_TLB_SETS		128	/* # sets in POWER7 TLB */
+#define POWER8_TLB_SETS		512	/* # sets in POWER8 TLB */
+#define POWER9_TLB_SETS_HASH	256	/* # sets in POWER9 TLB Hash mode */
+#define POWER9_TLB_SETS_RADIX	128	/* # sets in POWER9 TLB Radix mode */
+
+#ifndef __ASSEMBLY__
+
+struct mmu_hash_ops {
+	void            (*hpte_invalidate)(unsigned long slot,
+					   unsigned long vpn,
+					   int bpsize, int apsize,
+					   int ssize, int local);
+	long		(*hpte_updatepp)(unsigned long slot,
+					 unsigned long newpp,
+					 unsigned long vpn,
+					 int bpsize, int apsize,
+					 int ssize, unsigned long flags);
+	void            (*hpte_updateboltedpp)(unsigned long newpp,
+					       unsigned long ea,
+					       int psize, int ssize);
+	long		(*hpte_insert)(unsigned long hpte_group,
+				       unsigned long vpn,
+				       unsigned long prpn,
+				       unsigned long rflags,
+				       unsigned long vflags,
+				       int psize, int apsize,
+				       int ssize);
+	long		(*hpte_remove)(unsigned long hpte_group);
+	int             (*hpte_removebolted)(unsigned long ea,
+					     int psize, int ssize);
+	void		(*flush_hash_range)(unsigned long number, int local);
+	void		(*hugepage_invalidate)(unsigned long vsid,
+					       unsigned long addr,
+					       unsigned char *hpte_slot_array,
+					       int psize, int ssize, int local);
+	int		(*resize_hpt)(unsigned long shift);
+	/*
+	 * Special for kexec.
+	 * To be called in real mode with interrupts disabled. No locks are
+	 * taken as such, concurrent access on pre POWER5 hardware could result
+	 * in a deadlock.
+	 * The linear mapping is destroyed as well.
+	 */
+	void		(*hpte_clear_all)(void);
+};
+extern struct mmu_hash_ops mmu_hash_ops;
+
+struct hash_pte {
+	__be64 v;
+	__be64 r;
+};
+
+extern struct hash_pte *htab_address;
+extern unsigned long htab_size_bytes;
+extern unsigned long htab_hash_mask;
+
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+	int psize;
+
+	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+		if (mmu_psize_defs[psize].shift == shift)
+			return psize;
+	return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+	if (mmu_psize_defs[mmu_psize].shift)
+		return mmu_psize_defs[mmu_psize].shift;
+	BUG();
+}
+
+static inline unsigned long get_sllp_encoding(int psize)
+{
+	unsigned long sllp;
+
+	sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
+		((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
+	return sllp;
+}
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Segment sizes.
+ * These are the values used by hardware in the B field of
+ * SLB entries and the first dword of MMU hashtable entries.
+ * The B field is 2 bits; the values 2 and 3 are unused and reserved.
+ */
+#define MMU_SEGSIZE_256M	0
+#define MMU_SEGSIZE_1T		1
+
+/*
+ * encode page number shift.
+ * in order to fit the 78 bit va in a 64 bit variable we shift the va by
+ * 12 bits. This enable us to address upto 76 bit va.
+ * For hpt hash from a va we can ignore the page size bits of va and for
+ * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
+ * we work in all cases including 4k page size.
+ */
+#define VPN_SHIFT	12
+
+/*
+ * HPTE Large Page (LP) details
+ */
+#define LP_SHIFT	12
+#define LP_BITS		8
+#define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+static inline int slb_vsid_shift(int ssize)
+{
+	if (ssize == MMU_SEGSIZE_256M)
+		return SLB_VSID_SHIFT;
+	return SLB_VSID_SHIFT_1T;
+}
+
+static inline int segment_shift(int ssize)
+{
+	if (ssize == MMU_SEGSIZE_256M)
+		return SID_SHIFT;
+	return SID_SHIFT_1T;
+}
+
+/*
+ * This array is indexed by the LP field of the HPTE second dword.
+ * Since this field may contain some RPN bits, some entries are
+ * replicated so that we get the same value irrespective of RPN.
+ * The top 4 bits are the page size index (MMU_PAGE_*) for the
+ * actual page size, the bottom 4 bits are the base page size.
+ */
+extern u8 hpte_page_sizes[1 << LP_BITS];
+
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+					     bool is_base_size)
+{
+	unsigned int i, lp;
+
+	if (!(h & HPTE_V_LARGE))
+		return 1ul << 12;
+
+	/* Look at the 8 bit LP value */
+	lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+	i = hpte_page_sizes[lp];
+	if (!i)
+		return 0;
+	if (!is_base_size)
+		i >>= 4;
+	return 1ul << mmu_psize_defs[i & 0xf].shift;
+}
+
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+	return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+	return __hpte_page_size(h, l, 1);
+}
+
+/*
+ * The current system page and segment sizes
+ */
+extern int mmu_kernel_ssize;
+extern int mmu_highuser_ssize;
+extern u16 mmu_slb_size;
+extern unsigned long tce_alloc_start, tce_alloc_end;
+
+/*
+ * If the processor supports 64k normal pages but not 64k cache
+ * inhibited pages, we have to be prepared to switch processes
+ * to use 4k pages when they create cache-inhibited mappings.
+ * If this is the case, mmu_ci_restrictions will be set to 1.
+ */
+extern int mmu_ci_restrictions;
+
+/*
+ * This computes the AVPN and B fields of the first dword of a HPTE,
+ * for use when we want to match an existing PTE.  The bottom 7 bits
+ * of the returned value are zero.
+ */
+static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
+					     int ssize)
+{
+	unsigned long v;
+	/*
+	 * The AVA field omits the low-order 23 bits of the 78 bits VA.
+	 * These bits are not needed in the PTE, because the
+	 * low-order b of these bits are part of the byte offset
+	 * into the virtual page and, if b < 23, the high-order
+	 * 23-b of these bits are always used in selecting the
+	 * PTEGs to be searched
+	 */
+	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
+	v <<= HPTE_V_AVPN_SHIFT;
+	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+	return v;
+}
+
+/*
+ * ISA v3.0 defines a new HPTE format, which differs from the old
+ * format in having smaller AVPN and ARPN fields, and the B field
+ * in the second dword instead of the first.
+ */
+static inline unsigned long hpte_old_to_new_v(unsigned long v)
+{
+	/* trim AVPN, drop B */
+	return v & HPTE_V_COMMON_BITS;
+}
+
+static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
+{
+	/* move B field from 1st to 2nd dword, trim ARPN */
+	return (r & ~HPTE_R_3_0_SSIZE_MASK) |
+		(((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
+}
+
+static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
+{
+	/* insert B field */
+	return (v & HPTE_V_COMMON_BITS) |
+		((r & HPTE_R_3_0_SSIZE_MASK) <<
+		 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
+}
+
+static inline unsigned long hpte_new_to_old_r(unsigned long r)
+{
+	/* clear out B field */
+	return r & ~HPTE_R_3_0_SSIZE_MASK;
+}
+
+static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
+{
+	unsigned long hpte_v;
+
+	hpte_v = be64_to_cpu(hptep->v);
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+	return hpte_v;
+}
+
+/*
+ * This function sets the AVPN and L fields of the HPTE  appropriately
+ * using the base page size and actual page size.
+ */
+static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
+					  int actual_psize, int ssize)
+{
+	unsigned long v;
+	v = hpte_encode_avpn(vpn, base_psize, ssize);
+	if (actual_psize != MMU_PAGE_4K)
+		v |= HPTE_V_LARGE;
+	return v;
+}
+
+/*
+ * This function sets the ARPN, and LP fields of the HPTE appropriately
+ * for the page size. We assume the pa is already "clean" that is properly
+ * aligned for the requested page size
+ */
+static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
+					  int actual_psize)
+{
+	/* A 4K page needs no special encoding */
+	if (actual_psize == MMU_PAGE_4K)
+		return pa & HPTE_R_RPN;
+	else {
+		unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
+		unsigned int shift = mmu_psize_defs[actual_psize].shift;
+		return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
+	}
+}
+
+/*
+ * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
+ */
+static inline unsigned long hpt_vpn(unsigned long ea,
+				    unsigned long vsid, int ssize)
+{
+	unsigned long mask;
+	int s_shift = segment_shift(ssize);
+
+	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
+	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
+}
+
+/*
+ * This hashes a virtual address
+ */
+static inline unsigned long hpt_hash(unsigned long vpn,
+				     unsigned int shift, int ssize)
+{
+	unsigned long mask;
+	unsigned long hash, vsid;
+
+	/* VPN_SHIFT can be atmost 12 */
+	if (ssize == MMU_SEGSIZE_256M) {
+		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
+		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
+			((vpn & mask) >> (shift - VPN_SHIFT));
+	} else {
+		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
+		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
+		hash = vsid ^ (vsid << 25) ^
+			((vpn & mask) >> (shift - VPN_SHIFT)) ;
+	}
+	return hash & 0x7fffffffffUL;
+}
+
+#define HPTE_LOCAL_UPDATE	0x1
+#define HPTE_NOHPTE_UPDATE	0x2
+
+extern int __hash_page_4K(unsigned long ea, unsigned long access,
+			  unsigned long vsid, pte_t *ptep, unsigned long trap,
+			  unsigned long flags, int ssize, int subpage_prot);
+extern int __hash_page_64K(unsigned long ea, unsigned long access,
+			   unsigned long vsid, pte_t *ptep, unsigned long trap,
+			   unsigned long flags, int ssize);
+struct mm_struct;
+unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
+extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
+			unsigned long access, unsigned long trap,
+			unsigned long flags);
+extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
+		     unsigned long dsisr);
+int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
+		     pte_t *ptep, unsigned long trap, unsigned long flags,
+		     int ssize, unsigned int shift, unsigned int mmu_psize);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int __hash_page_thp(unsigned long ea, unsigned long access,
+			   unsigned long vsid, pmd_t *pmdp, unsigned long trap,
+			   unsigned long flags, int ssize, unsigned int psize);
+#else
+static inline int __hash_page_thp(unsigned long ea, unsigned long access,
+				  unsigned long vsid, pmd_t *pmdp,
+				  unsigned long trap, unsigned long flags,
+				  int ssize, unsigned int psize)
+{
+	BUG();
+	return -1;
+}
+#endif
+extern void hash_failure_debug(unsigned long ea, unsigned long access,
+			       unsigned long vsid, unsigned long trap,
+			       int ssize, int psize, int lpsize,
+			       unsigned long pte);
+extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
+			     unsigned long pstart, unsigned long prot,
+			     int psize, int ssize);
+int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+			int psize, int ssize);
+extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
+extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
+
+#ifdef CONFIG_PPC_PSERIES
+void hpte_init_pseries(void);
+#else
+static inline void hpte_init_pseries(void) { }
+#endif
+
+extern void hpte_init_native(void);
+
+extern void slb_initialize(void);
+extern void slb_flush_and_rebolt(void);
+void slb_flush_all_realmode(void);
+void __slb_restore_bolted_realmode(void);
+void slb_restore_bolted_realmode(void);
+
+extern void slb_vmalloc_update(void);
+extern void slb_set_size(u16 size);
+#endif /* __ASSEMBLY__ */
+
+/*
+ * VSID allocation (256MB segment)
+ *
+ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+ * from mmu context id and effective segment id of the address.
+ *
+ * For user processes max context id is limited to MAX_USER_CONTEXT.
+
+ * For kernel space, we use context ids 1-4 to map addresses as below:
+ * NOTE: each context only support 64TB now.
+ * 0x00001 -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x00002 -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x00003 -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x00004 -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ *
+ * The proto-VSIDs are then scrambled into real VSIDs with the
+ * multiplicative hash:
+ *
+ *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
+ *
+ * VSID_MULTIPLIER is prime, so in particular it is
+ * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
+ * Because the modulus is 2^n-1 we can compute it efficiently without
+ * a divide or extra multiply (see below). The scramble function gives
+ * robust scattering in the hash table (at least based on some initial
+ * results).
+ *
+ * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
+ * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
+ * will produce a VSID of 0.
+ *
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble.
+ */
+
+/*
+ * Max Va bits we support as of now is 68 bits. We want 19 bit
+ * context ID.
+ * Restrictions:
+ * GPU has restrictions of not able to access beyond 128TB
+ * (47 bit effective address). We also cannot do more than 20bit PID.
+ * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
+ * to 16 bits (ie, we can only have 2^16 pids at the same time).
+ */
+#define VA_BITS			68
+#define CONTEXT_BITS		19
+#define ESID_BITS		(VA_BITS - (SID_SHIFT + CONTEXT_BITS))
+#define ESID_BITS_1T		(VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
+
+#define ESID_BITS_MASK		((1 << ESID_BITS) - 1)
+#define ESID_BITS_1T_MASK	((1 << ESID_BITS_1T) - 1)
+
+/*
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+ * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
+ * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^49 bytes (512TB).
+ *
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble.
+ */
+#define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 2)
+#define MIN_USER_CONTEXT	(5)
+
+/* Would be nice to use KERNEL_REGION_ID here */
+#define KERNEL_REGION_CONTEXT_OFFSET	(0xc - 1)
+
+/*
+ * For platforms that support on 65bit VA we limit the context bits
+ */
+#define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
+
+/*
+ * This should be computed such that protovosid * vsid_mulitplier
+ * doesn't overflow 64 bits. The vsid_mutliplier should also be
+ * co-prime to vsid_modulus. We also need to make sure that number
+ * of bits in multiplied result (dividend) is less than twice the number of
+ * protovsid bits for our modulus optmization to work.
+ *
+ * The below table shows the current values used.
+ * |-------+------------+----------------------+------------+-------------------|
+ * |       | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
+ * |-------+------------+----------------------+------------+-------------------|
+ * | 1T    |         24 |                   25 |         49 |                50 |
+ * |-------+------------+----------------------+------------+-------------------|
+ * | 256MB |         24 |                   37 |         61 |                74 |
+ * |-------+------------+----------------------+------------+-------------------|
+ *
+ * |-------+------------+----------------------+------------+--------------------|
+ * |       | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
+ * |-------+------------+----------------------+------------+--------------------|
+ * | 1T    |         24 |                   28 |         52 |                 56 |
+ * |-------+------------+----------------------+------------+--------------------|
+ * | 256MB |         24 |                   40 |         64 |                 80 |
+ * |-------+------------+----------------------+------------+--------------------|
+ *
+ */
+#define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
+#define VSID_BITS_256M		(VA_BITS - SID_SHIFT)
+#define VSID_BITS_65_256M	(65 - SID_SHIFT)
+/*
+ * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
+ */
+#define VSID_MULINV_256M	ASM_CONST(665548017062)
+
+#define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
+#define VSID_BITS_1T		(VA_BITS - SID_SHIFT_1T)
+#define VSID_BITS_65_1T		(65 - SID_SHIFT_1T)
+#define VSID_MULINV_1T		ASM_CONST(209034062)
+
+/* 1TB VSID reserved for VRMA */
+#define VRMA_VSID	0x1ffffffUL
+#define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
+
+/* 4 bits per slice and we have one slice per 1TB */
+#define SLICE_ARRAY_SIZE	(H_PGTABLE_RANGE >> 41)
+#define TASK_SLICE_ARRAY_SZ(x)	((x)->context.slb_addr_limit >> 41)
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+/*
+ * For the sub-page protection option, we extend the PGD with one of
+ * these.  Basically we have a 3-level tree, with the top level being
+ * the protptrs array.  To optimize speed and memory consumption when
+ * only addresses < 4GB are being protected, pointers to the first
+ * four pages of sub-page protection words are stored in the low_prot
+ * array.
+ * Each page of sub-page protection words protects 1GB (4 bytes
+ * protects 64k).  For the 3-level tree, each page of pointers then
+ * protects 8TB.
+ */
+struct subpage_prot_table {
+	unsigned long maxaddr;	/* only addresses < this are protected */
+	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
+	unsigned int *low_prot[4];
+};
+
+#define SBP_L1_BITS		(PAGE_SHIFT - 2)
+#define SBP_L2_BITS		(PAGE_SHIFT - 3)
+#define SBP_L1_COUNT		(1 << SBP_L1_BITS)
+#define SBP_L2_COUNT		(1 << SBP_L2_BITS)
+#define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
+#define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)
+
+extern void subpage_prot_free(struct mm_struct *mm);
+extern void subpage_prot_init_new_context(struct mm_struct *mm);
+#else
+static inline void subpage_prot_free(struct mm_struct *mm) {}
+static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+
+#if 0
+/*
+ * The code below is equivalent to this function for arguments
+ * < 2^VSID_BITS, which is all this should ever be called
+ * with.  However gcc is not clever enough to compute the
+ * modulus (2^n-1) without a second multiply.
+ */
+#define vsid_scramble(protovsid, size) \
+	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
+
+/* simplified form avoiding mod operation */
+#define vsid_scramble(protovsid, size) \
+	({								 \
+		unsigned long x;					 \
+		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
+		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
+		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
+	})
+
+#else /* 1 */
+static inline unsigned long vsid_scramble(unsigned long protovsid,
+				  unsigned long vsid_multiplier, int vsid_bits)
+{
+	unsigned long vsid;
+	unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
+	/*
+	 * We have same multipler for both 256 and 1T segements now
+	 */
+	vsid = protovsid * vsid_multiplier;
+	vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
+	return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
+}
+
+#endif /* 1 */
+
+/* Returns the segment size indicator for a user address */
+static inline int user_segment_size(unsigned long addr)
+{
+	/* Use 1T segments if possible for addresses >= 1T */
+	if (addr >= (1UL << SID_SHIFT_1T))
+		return mmu_highuser_ssize;
+	return MMU_SEGSIZE_256M;
+}
+
+static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
+				     int ssize)
+{
+	unsigned long va_bits = VA_BITS;
+	unsigned long vsid_bits;
+	unsigned long protovsid;
+
+	/*
+	 * Bad address. We return VSID 0 for that
+	 */
+	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
+		return 0;
+
+	if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
+		va_bits = 65;
+
+	if (ssize == MMU_SEGSIZE_256M) {
+		vsid_bits = va_bits - SID_SHIFT;
+		protovsid = (context << ESID_BITS) |
+			((ea >> SID_SHIFT) & ESID_BITS_MASK);
+		return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
+	}
+	/* 1T segment */
+	vsid_bits = va_bits - SID_SHIFT_1T;
+	protovsid = (context << ESID_BITS_1T) |
+		((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
+	return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
+}
+
+/*
+ * This is only valid for addresses >= PAGE_OFFSET
+ */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+	unsigned long context;
+
+	if (!is_kernel_addr(ea))
+		return 0;
+
+	/*
+	 * For kernel space, we use context ids 1-4 to map the address space as
+	 * below:
+	 *
+	 * 0x00001 -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
+	 * 0x00002 -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
+	 * 0x00003 -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
+	 * 0x00004 -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
+	 *
+	 * So we can compute the context from the region (top nibble) by
+	 * subtracting 11, or 0xc - 1.
+	 */
+	context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET;
+
+	return get_vsid(context, ea, ssize);
+}
+
+unsigned htab_shift_for_mem_size(unsigned long mem_size);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
new file mode 100644
index 0000000..9c8c669
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
+#define _ASM_POWERPC_BOOK3S_64_MMU_H_
+
+#ifndef __ASSEMBLY__
+/*
+ * Page size definition
+ *
+ *    shift : is the "PAGE_SHIFT" value for that page size
+ *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
+ *            directly to a slbmte "vsid" value
+ *    penc  : is the HPTE encoding mask for the "LP" field:
+ *
+ */
+struct mmu_psize_def {
+	unsigned int	shift;	/* number of bits */
+	int		penc[MMU_PAGE_COUNT];	/* HPTE encoding */
+	unsigned int	tlbiel;	/* tlbiel supported for that page size */
+	unsigned long	avpnm;	/* bits to mask out in AVPN in the HPTE */
+	union {
+		unsigned long	sllp;	/* SLB L||LP (exact mask to use in slbmte) */
+		unsigned long ap;	/* Ap encoding used by PowerISA 3.0 */
+	};
+};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+#endif /* __ASSEMBLY__ */
+
+/* 64-bit classic hash table MMU */
+#include <asm/book3s/64/mmu-hash.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * ISA 3.0 partition and process table entry format
+ */
+struct prtb_entry {
+	__be64 prtb0;
+	__be64 prtb1;
+};
+extern struct prtb_entry *process_tb;
+
+struct patb_entry {
+	__be64 patb0;
+	__be64 patb1;
+};
+extern struct patb_entry *partition_tb;
+
+/* Bits in patb0 field */
+#define PATB_HR		(1UL << 63)
+#define RPDB_MASK	0x0fffffffffffff00UL
+#define RPDB_SHIFT	(1UL << 8)
+#define RTS1_SHIFT	61		/* top 2 bits of radix tree size */
+#define RTS1_MASK	(3UL << RTS1_SHIFT)
+#define RTS2_SHIFT	5		/* bottom 3 bits of radix tree size */
+#define RTS2_MASK	(7UL << RTS2_SHIFT)
+#define RPDS_MASK	0x1f		/* root page dir. size field */
+
+/* Bits in patb1 field */
+#define PATB_GR		(1UL << 63)	/* guest uses radix; must match HR */
+#define PRTS_MASK	0x1f		/* process table size field */
+#define PRTB_MASK	0x0ffffffffffff000UL
+
+/* Number of supported PID bits */
+extern unsigned int mmu_pid_bits;
+
+/* Base PID to allocate from */
+extern unsigned int mmu_base_pid;
+
+#define PRTB_SIZE_SHIFT	(mmu_pid_bits + 4)
+#define PRTB_ENTRIES	(1ul << mmu_pid_bits)
+
+/*
+ * Power9 currently only support 64K partition table size.
+ */
+#define PATB_SIZE_SHIFT	16
+
+typedef unsigned long mm_context_id_t;
+struct spinlock;
+
+/* Maximum possible number of NPUs in a system. */
+#define NV_MAX_NPUS 8
+
+/*
+ * One bit per slice. We have lower slices which cover 256MB segments
+ * upto 4G range. That gets us 16 low slices. For the rest we track slices
+ * in 1TB size.
+ */
+struct slice_mask {
+	u64 low_slices;
+	DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
+};
+
+typedef struct {
+	union {
+		/*
+		 * We use id as the PIDR content for radix. On hash we can use
+		 * more than one id. The extended ids are used when we start
+		 * having address above 512TB. We allocate one extended id
+		 * for each 512TB. The new id is then used with the 49 bit
+		 * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
+		 * from EA and new context ids to build the new VAs.
+		 */
+		mm_context_id_t id;
+		mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
+	};
+	u16 user_psize;		/* page size index */
+
+	/* Number of bits in the mm_cpumask */
+	atomic_t active_cpus;
+
+	/* Number of users of the external (Nest) MMU */
+	atomic_t copros;
+
+	/* NPU NMMU context */
+	struct npu_context *npu_context;
+
+#ifdef CONFIG_PPC_MM_SLICES
+	 /* SLB page size encodings*/
+	unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
+	unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
+	unsigned long slb_addr_limit;
+# ifdef CONFIG_PPC_64K_PAGES
+	struct slice_mask mask_64k;
+# endif
+	struct slice_mask mask_4k;
+# ifdef CONFIG_HUGETLB_PAGE
+	struct slice_mask mask_16m;
+	struct slice_mask mask_16g;
+# endif
+#else
+	u16 sllp;		/* SLB page size encoding */
+#endif
+	unsigned long vdso_base;
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+	struct subpage_prot_table spt;
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+	/*
+	 * pagetable fragment support
+	 */
+	void *pte_frag;
+	void *pmd_frag;
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+	struct list_head iommu_group_mem_list;
+#endif
+
+#ifdef CONFIG_PPC_MEM_KEYS
+	/*
+	 * Each bit represents one protection key.
+	 * bit set   -> key allocated
+	 * bit unset -> key available for allocation
+	 */
+	u32 pkey_allocation_map;
+	s16 execute_only_pkey; /* key holding execute-only protection */
+#endif
+} mm_context_t;
+
+/*
+ * The current system page and segment sizes
+ */
+extern int mmu_linear_psize;
+extern int mmu_virtual_psize;
+extern int mmu_vmalloc_psize;
+extern int mmu_vmemmap_psize;
+extern int mmu_io_psize;
+
+/* MMU initialization */
+void mmu_early_init_devtree(void);
+void hash__early_init_devtree(void);
+void radix__early_init_devtree(void);
+extern void radix_init_native(void);
+extern void hash__early_init_mmu(void);
+extern void radix__early_init_mmu(void);
+static inline void early_init_mmu(void)
+{
+	if (radix_enabled())
+		return radix__early_init_mmu();
+	return hash__early_init_mmu();
+}
+extern void hash__early_init_mmu_secondary(void);
+extern void radix__early_init_mmu_secondary(void);
+static inline void early_init_mmu_secondary(void)
+{
+	if (radix_enabled())
+		return radix__early_init_mmu_secondary();
+	return hash__early_init_mmu_secondary();
+}
+
+extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
+					 phys_addr_t first_memblock_size);
+extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
+					 phys_addr_t first_memblock_size);
+static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+					      phys_addr_t first_memblock_size)
+{
+	if (early_radix_enabled())
+		return radix__setup_initial_memory_limit(first_memblock_base,
+						   first_memblock_size);
+	return hash__setup_initial_memory_limit(first_memblock_base,
+					   first_memblock_size);
+}
+
+extern int (*register_process_table)(unsigned long base, unsigned long page_size,
+				     unsigned long tbl_size);
+
+#ifdef CONFIG_PPC_PSERIES
+extern void radix_init_pseries(void);
+#else
+static inline void radix_init_pseries(void) { };
+#endif
+
+static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
+{
+	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
+
+	if (likely(index < ARRAY_SIZE(ctx->extended_id)))
+		return ctx->extended_id[index];
+
+	/* should never happen */
+	WARN_ON(1);
+	return 0;
+}
+
+static inline unsigned long get_user_vsid(mm_context_t *ctx,
+					  unsigned long ea, int ssize)
+{
+	unsigned long context = get_ea_context(ctx, ea);
+
+	return get_vsid(context, ea, ssize);
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
new file mode 100644
index 0000000..391ed2c
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -0,0 +1,237 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
+#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/kmemleak.h>
+#include <linux/percpu.h>
+
+struct vmemmap_backing {
+	struct vmemmap_backing *list;
+	unsigned long phys;
+	unsigned long virt_addr;
+};
+extern struct vmemmap_backing *vmemmap_list;
+
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation.  For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer.  In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value.  This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE	0xf
+
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({				\
+			BUG_ON(!(shift));		\
+			pgtable_cache[(shift) - 1];	\
+		})
+
+extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
+extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
+extern void pte_fragment_free(unsigned long *, int);
+extern void pmd_fragment_free(unsigned long *);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
+#ifdef CONFIG_SMP
+extern void __tlb_remove_table(void *_table);
+#endif
+
+static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+	return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
+#else
+	struct page *page;
+	page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
+				4);
+	if (!page)
+		return NULL;
+	return (pgd_t *) page_address(page);
+#endif
+}
+
+static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+	free_page((unsigned long)pgd);
+#else
+	free_pages((unsigned long)pgd, 4);
+#endif
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *pgd;
+
+	if (radix_enabled())
+		return radix__pgd_alloc(mm);
+
+	pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+			       pgtable_gfp_flags(mm, GFP_KERNEL));
+	/*
+	 * Don't scan the PGD for pointers, it contains references to PUDs but
+	 * those references are not full pointers and so can't be recognised by
+	 * kmemleak.
+	 */
+	kmemleak_no_scan(pgd);
+
+	/*
+	 * With hugetlb, we don't clear the second half of the page table.
+	 * If we share the same slab cache with the pmd or pud level table,
+	 * we need to make sure we zero out the full table on alloc.
+	 * With 4K we don't store slot in the second half. Hence we don't
+	 * need to do this for 4k.
+	 */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
+	(H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
+	memset(pgd, 0, PGD_TABLE_SIZE);
+#endif
+	return pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	if (radix_enabled())
+		return radix__pgd_free(mm, pgd);
+	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+	pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	pud_t *pud;
+
+	pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+			       pgtable_gfp_flags(mm, GFP_KERNEL));
+	/*
+	 * Tell kmemleak to ignore the PUD, that means don't scan it for
+	 * pointers and don't consider it a leak. PUDs are typically only
+	 * referred to by their PGD, but kmemleak is not able to recognise those
+	 * as pointers, leading to false leak reports.
+	 */
+	kmemleak_ignore(pud);
+
+	return pud;
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+	kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
+}
+
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+				  unsigned long address)
+{
+	/*
+	 * By now all the pud entries should be none entries. So go
+	 * ahead and flush the page walk cache
+	 */
+	flush_tlb_pgtable(tlb, address);
+	pgtable_free_tlb(tlb, pud, PUD_INDEX);
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	return pmd_fragment_alloc(mm, addr);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+	pmd_fragment_free((unsigned long *)pmd);
+}
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+				  unsigned long address)
+{
+	/*
+	 * By now all the pud entries should be none entries. So go
+	 * ahead and flush the page walk cache
+	 */
+	flush_tlb_pgtable(tlb, address);
+	return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
+}
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+				       pte_t *pte)
+{
+	pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+				pgtable_t pte_page)
+{
+	pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
+}
+
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+	return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					  unsigned long address)
+{
+	return (pte_t *)pte_fragment_alloc(mm, address, 1);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+				      unsigned long address)
+{
+	return (pgtable_t)pte_fragment_alloc(mm, address, 0);
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	pte_fragment_free((unsigned long *)pte, 1);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+	pte_fragment_free((unsigned long *)ptepage, 0);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+				  unsigned long address)
+{
+	/*
+	 * By now all the pud entries should be none entries. So go
+	 * ahead and flush the page walk cache
+	 */
+	flush_tlb_pgtable(tlb, address);
+	pgtable_free_tlb(tlb, table, PTE_INDEX);
+}
+
+#define check_pgt_cache()	do { } while (0)
+
+extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+static inline void update_page_count(int psize, long count)
+{
+	if (IS_ENABLED(CONFIG_PROC_FS))
+		atomic_long_add(count, &direct_pages_count[psize]);
+}
+
+#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
new file mode 100644
index 0000000..a069dfc
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H
+/*
+ * hash 4k can't share hugetlb and also doesn't support THP
+ */
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int pmd_huge(pmd_t pmd)
+{
+	/*
+	 * leaf pte for huge page
+	 */
+	if (radix_enabled())
+		return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+	return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+	/*
+	 * leaf pte for huge page
+	 */
+	if (radix_enabled())
+		return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+	return 0;
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+	/*
+	 * leaf pte for huge page
+	 */
+	if (radix_enabled())
+		return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
+	return 0;
+}
+#define pgd_huge pgd_huge
+/*
+ * With radix , we have hugepage ptes in the pud and pmd entries. We don't
+ * need to setup hugepage directory for them. Our pte and page directory format
+ * enable us to have this enabled.
+ */
+static inline int hugepd_ok(hugepd_t hpd)
+{
+	if (radix_enabled())
+		return 0;
+	return hash__hugepd_ok(hpd);
+}
+#define is_hugepd(hpd)		(hugepd_ok(hpd))
+
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX                                                      \
+	(PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+	switch (index) {
+	case H_16M_CACHE_INDEX:
+		return HTLB_16M_INDEX;
+	case H_16G_CACHE_INDEX:
+		return HTLB_16G_INDEX;
+	default:
+		BUG();
+	}
+	/* should not reach */
+}
+
+#else /* !CONFIG_HUGETLB_PAGE */
+static inline int pmd_huge(pmd_t pmd) { return 0; }
+static inline int pud_huge(pud_t pud) { return 0; }
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
new file mode 100644
index 0000000..d7ee249
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
+ * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
+ *
+ * Defined in such a way that we can optimize away code block at build time
+ * if CONFIG_HUGETLB_PAGE=n.
+ */
+static inline int pmd_huge(pmd_t pmd)
+{
+	/*
+	 * leaf pte for huge page
+	 */
+	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
+static inline int pud_huge(pud_t pud)
+{
+	/*
+	 * leaf pte for huge page
+	 */
+	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+	/*
+	 * leaf pte for huge page
+	 */
+	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
+}
+#define pgd_huge pgd_huge
+
+/*
+ * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
+ * need to setup hugepage directory for them. Our pte and page directory format
+ * enable us to have this enabled.
+ */
+static inline int hugepd_ok(hugepd_t hpd)
+{
+	return 0;
+}
+
+#define is_hugepd(pdep)			0
+
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+	BUG();
+}
+
+#else /* !CONFIG_HUGETLB_PAGE */
+static inline int pmd_huge(pmd_t pmd) { return 0; }
+static inline int pud_huge(pud_t pud) { return 0; }
+#endif /* CONFIG_HUGETLB_PAGE */
+
+static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
+			       unsigned long pfn, pgprot_t prot)
+{
+	if (radix_enabled())
+		BUG();
+	return hash__remap_4k_pfn(vma, addr, pfn, prot);
+}
+#endif	/* __ASSEMBLY__ */
+#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
new file mode 100644
index 0000000..2a24865
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -0,0 +1,1295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+
+#include <asm-generic/5level-fixup.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
+#include <linux/bug.h>
+#endif
+
+/*
+ * Common bits between hash and Radix page table
+ */
+#define _PAGE_BIT_SWAP_TYPE	0
+
+#define _PAGE_NA		0
+#define _PAGE_RO		0
+#define _PAGE_USER		0
+
+#define _PAGE_EXEC		0x00001 /* execute permission */
+#define _PAGE_WRITE		0x00002 /* write access allowed */
+#define _PAGE_READ		0x00004	/* read access allowed */
+#define _PAGE_RW		(_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_RWX		(_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
+#define _PAGE_PRIVILEGED	0x00008 /* kernel access only */
+#define _PAGE_SAO		0x00010 /* Strong access order */
+#define _PAGE_NON_IDEMPOTENT	0x00020 /* non idempotent memory */
+#define _PAGE_TOLERANT		0x00030 /* tolerant memory, cache inhibited */
+#define _PAGE_DIRTY		0x00080 /* C: page changed */
+#define _PAGE_ACCESSED		0x00100 /* R: page referenced */
+/*
+ * Software bits
+ */
+#define _RPAGE_SW0		0x2000000000000000UL
+#define _RPAGE_SW1		0x00800
+#define _RPAGE_SW2		0x00400
+#define _RPAGE_SW3		0x00200
+#define _RPAGE_RSV1		0x1000000000000000UL
+#define _RPAGE_RSV2		0x0800000000000000UL
+#define _RPAGE_RSV3		0x0400000000000000UL
+#define _RPAGE_RSV4		0x0200000000000000UL
+#define _RPAGE_RSV5		0x00040UL
+
+#define _PAGE_PTE		0x4000000000000000UL	/* distinguishes PTEs from pointers */
+#define _PAGE_PRESENT		0x8000000000000000UL	/* pte contains a translation */
+/*
+ * We need to mark a pmd pte invalid while splitting. We can do that by clearing
+ * the _PAGE_PRESENT bit. But then that will be taken as a swap pte. In order to
+ * differentiate between two use a SW field when invalidating.
+ *
+ * We do that temporary invalidate for regular pte entry in ptep_set_access_flags
+ *
+ * This is used only when _PAGE_PRESENT is cleared.
+ */
+#define _PAGE_INVALID		_RPAGE_SW0
+
+/*
+ * Top and bottom bits of RPN which can be used by hash
+ * translation mode, because we expect them to be zero
+ * otherwise.
+ */
+#define _RPAGE_RPN0		0x01000
+#define _RPAGE_RPN1		0x02000
+#define _RPAGE_RPN44		0x0100000000000000UL
+#define _RPAGE_RPN43		0x0080000000000000UL
+#define _RPAGE_RPN42		0x0040000000000000UL
+#define _RPAGE_RPN41		0x0020000000000000UL
+
+/* Max physical address bit as per radix table */
+#define _RPAGE_PA_MAX		57
+
+/*
+ * Max physical address bit we will use for now.
+ *
+ * This is mostly a hardware limitation and for now Power9 has
+ * a 51 bit limit.
+ *
+ * This is different from the number of physical bit required to address
+ * the last byte of memory. That is defined by MAX_PHYSMEM_BITS.
+ * MAX_PHYSMEM_BITS is a linux limitation imposed by the maximum
+ * number of sections we can support (SECTIONS_SHIFT).
+ *
+ * This is different from Radix page table limitation above and
+ * should always be less than that. The limit is done such that
+ * we can overload the bits between _RPAGE_PA_MAX and _PAGE_PA_MAX
+ * for hash linux page table specific bits.
+ *
+ * In order to be compatible with future hardware generations we keep
+ * some offsets and limit this for now to 53
+ */
+#define _PAGE_PA_MAX		53
+
+#define _PAGE_SOFT_DIRTY	_RPAGE_SW3 /* software: software dirty tracking */
+#define _PAGE_SPECIAL		_RPAGE_SW2 /* software: special page */
+#define _PAGE_DEVMAP		_RPAGE_SW1 /* software: ZONE_DEVICE page */
+#define __HAVE_ARCH_PTE_DEVMAP
+
+/*
+ * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
+ * Instead of fixing all of them, add an alternate define which
+ * maps CI pte mapping.
+ */
+#define _PAGE_NO_CACHE		_PAGE_TOLERANT
+/*
+ * We support _RPAGE_PA_MAX bit real address in pte. On the linux side
+ * we are limited by _PAGE_PA_MAX. Clear everything above _PAGE_PA_MAX
+ * and every thing below PAGE_SHIFT;
+ */
+#define PTE_RPN_MASK	(((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
+/*
+ * set of bits not changed in pmd_modify. Even though we have hash specific bits
+ * in here, on radix we expect them to be zero.
+ */
+#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+			 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
+			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+/*
+ * user access blocked by key
+ */
+#define _PAGE_KERNEL_RW		(_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO		 (_PAGE_PRIVILEGED | _PAGE_READ)
+#define _PAGE_KERNEL_RWX	(_PAGE_PRIVILEGED | _PAGE_DIRTY |	\
+				 _PAGE_RW | _PAGE_EXEC)
+/*
+ * No page size encoding in the linux PTE
+ */
+#define _PAGE_PSIZE		0
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+			 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE |	\
+			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+
+#define H_PTE_PKEY  (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
+		     H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
+/*
+ * Mask of bits returned by pte_pgprot()
+ */
+#define PAGE_PROT_BITS  (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
+			 H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
+			 _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_EXEC | \
+			 _PAGE_SOFT_DIRTY | H_PTE_PKEY)
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE	(_PAGE_BASE_NC)
+
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now (we could make write-only
+ * pages on BookE but we don't bother for now). Execute permission control is
+ * possible on platforms that define _PAGE_EXEC
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define PAGE_NONE	__pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
+#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_RW)
+#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY
+#define __P100	PAGE_READONLY_X
+#define __P101	PAGE_READONLY_X
+#define __P110	PAGE_COPY_X
+#define __P111	PAGE_COPY_X
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED
+#define __S100	PAGE_READONLY_X
+#define __S101	PAGE_READONLY_X
+#define __S110	PAGE_SHARED_X
+#define __S111	PAGE_SHARED_X
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_TOLERANT)
+#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NON_IDEMPOTENT)
+#define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+/*
+ * Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
+	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
+#define PAGE_AGP		(PAGE_KERNEL_NC)
+
+#ifndef __ASSEMBLY__
+/*
+ * page table defines
+ */
+extern unsigned long __pte_index_size;
+extern unsigned long __pmd_index_size;
+extern unsigned long __pud_index_size;
+extern unsigned long __pgd_index_size;
+extern unsigned long __pud_cache_index;
+#define PTE_INDEX_SIZE  __pte_index_size
+#define PMD_INDEX_SIZE  __pmd_index_size
+#define PUD_INDEX_SIZE  __pud_index_size
+#define PGD_INDEX_SIZE  __pgd_index_size
+/* pmd table use page table fragments */
+#define PMD_CACHE_INDEX  0
+#define PUD_CACHE_INDEX __pud_cache_index
+/*
+ * Because of use of pte fragments and THP, size of page table
+ * are not always derived out of index size above.
+ */
+extern unsigned long __pte_table_size;
+extern unsigned long __pmd_table_size;
+extern unsigned long __pud_table_size;
+extern unsigned long __pgd_table_size;
+#define PTE_TABLE_SIZE	__pte_table_size
+#define PMD_TABLE_SIZE	__pmd_table_size
+#define PUD_TABLE_SIZE	__pud_table_size
+#define PGD_TABLE_SIZE	__pgd_table_size
+
+extern unsigned long __pmd_val_bits;
+extern unsigned long __pud_val_bits;
+extern unsigned long __pgd_val_bits;
+#define PMD_VAL_BITS	__pmd_val_bits
+#define PUD_VAL_BITS	__pud_val_bits
+#define PGD_VAL_BITS	__pgd_val_bits
+
+extern unsigned long __pte_frag_nr;
+#define PTE_FRAG_NR __pte_frag_nr
+extern unsigned long __pte_frag_size_shift;
+#define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
+extern unsigned long __pmd_frag_nr;
+#define PMD_FRAG_NR __pmd_frag_nr
+extern unsigned long __pmd_frag_size_shift;
+#define PMD_FRAG_SIZE_SHIFT __pmd_frag_size_shift
+#define PMD_FRAG_SIZE (1UL << PMD_FRAG_SIZE_SHIFT)
+
+#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD	(1 << PUD_INDEX_SIZE)
+#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE	(1UL << PUD_SHIFT)
+#define PUD_MASK	(~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT	(PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS		0xc0000000000000ffUL
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS		0xc0000000000000ffUL
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS		0xc0000000000000ffUL
+
+/*
+ * Used as an indicator for rcu callback functions
+ */
+enum pgtable_index {
+	PTE_INDEX = 0,
+	PMD_INDEX,
+	PUD_INDEX,
+	PGD_INDEX,
+	/*
+	 * Below are used with 4k page size and hugetlb
+	 */
+	HTLB_16M_INDEX,
+	HTLB_16G_INDEX,
+};
+
+extern unsigned long __vmalloc_start;
+extern unsigned long __vmalloc_end;
+#define VMALLOC_START	__vmalloc_start
+#define VMALLOC_END	__vmalloc_end
+
+extern unsigned long __kernel_virt_start;
+extern unsigned long __kernel_virt_size;
+extern unsigned long __kernel_io_start;
+#define KERN_VIRT_START __kernel_virt_start
+#define KERN_VIRT_SIZE  __kernel_virt_size
+#define KERN_IO_START  __kernel_io_start
+extern struct page *vmemmap;
+extern unsigned long ioremap_bot;
+extern unsigned long pci_io_base;
+#endif /* __ASSEMBLY__ */
+
+#include <asm/book3s/64/hash.h>
+#include <asm/book3s/64/radix.h>
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/pgtable-64k.h>
+#else
+#include <asm/book3s/64/pgtable-4k.h>
+#endif
+
+#include <asm/barrier.h>
+/*
+ * The second half of the kernel virtual space is used for IO mappings,
+ * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
+ *
+ *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
+ *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ */
+#define FULL_IO_SIZE	0x80000000ul
+#define  ISA_IO_BASE	(KERN_IO_START)
+#define  ISA_IO_END	(KERN_IO_START + 0x10000ul)
+#define  PHB_IO_BASE	(ISA_IO_END)
+#define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
+#define IOREMAP_BASE	(PHB_IO_END)
+#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE)
+
+/* Advertise special mapping type for AGP */
+#define HAVE_PAGE_AGP
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This is the default implementation of various PTE accessors, it's
+ * used in all cases except Book3S with 64K pages where we have a
+ * concept of sub-pages
+ */
+#ifndef __real_pte
+
+#define __real_pte(e, p, o)		((real_pte_t){(e)})
+#define __rpte_to_pte(r)	((r).pte)
+#define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
+	do {							         \
+		index = 0;					         \
+		shift = mmu_psize_defs[psize].shift;		         \
+
+#define pte_iterate_hashed_end() } while(0)
+
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
+
+#endif /* __real_pte */
+
+static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
+				       pte_t *ptep, unsigned long clr,
+				       unsigned long set, int huge)
+{
+	if (radix_enabled())
+		return radix__pte_update(mm, addr, ptep, clr, set, huge);
+	return hash__pte_update(mm, addr, ptep, clr, set, huge);
+}
+/*
+ * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update.
+ * We currently remove entries from the hashtable regardless of whether
+ * the entry was young or dirty.
+ *
+ * We should be more intelligent about this but for the moment we override
+ * these functions and force a tlb flush unconditionally
+ * For radix: H_PAGE_HASHPTE should be zero. Hence we can use the same
+ * function for both hash and radix.
+ */
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+					      unsigned long addr, pte_t *ptep)
+{
+	unsigned long old;
+
+	if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+		return 0;
+	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+	return (old & _PAGE_ACCESSED) != 0;
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep)	\
+({								\
+	int __r;						\
+	__r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+	__r;							\
+})
+
+static inline int __pte_write(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+	/*
+	 * Saved write ptes are prot none ptes that doesn't have
+	 * privileged bit sit. We mark prot none as one which has
+	 * present and pviliged bit set and RWX cleared. To mark
+	 * protnone which used to have _PAGE_WRITE set we clear
+	 * the privileged bit.
+	 */
+	return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+}
+#else
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+	return false;
+}
+#endif
+
+static inline int pte_write(pte_t pte)
+{
+	return __pte_write(pte) || pte_savedwrite(pte);
+}
+
+static inline int pte_read(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+				      pte_t *ptep)
+{
+	if (__pte_write(*ptep))
+		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+	else if (unlikely(pte_savedwrite(*ptep)))
+		pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+					   unsigned long addr, pte_t *ptep)
+{
+	/*
+	 * We should not find protnone for hugetlb, but this complete the
+	 * interface.
+	 */
+	if (__pte_write(*ptep))
+		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+	else if (unlikely(pte_savedwrite(*ptep)))
+		pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long addr, pte_t *ptep)
+{
+	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+	return __pte(old);
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+					    unsigned long addr,
+					    pte_t *ptep, int full)
+{
+	if (full && radix_enabled()) {
+		/*
+		 * We know that this is a full mm pte clear and
+		 * hence can be sure there is no parallel set_pte.
+		 */
+		return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+	}
+	return ptep_get_and_clear(mm, addr, ptep);
+}
+
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+			     pte_t * ptep)
+{
+	pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
+}
+
+static inline int pte_young(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
+}
+
+static inline int pte_special(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
+}
+
+static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline bool pte_soft_dirty(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline int pte_protnone(pte_t pte)
+{
+	return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
+		cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
+}
+
+#define pte_mk_savedwrite pte_mk_savedwrite
+static inline pte_t pte_mk_savedwrite(pte_t pte)
+{
+	/*
+	 * Used by Autonuma subsystem to preserve the write bit
+	 * while marking the pte PROT_NONE. Only allow this
+	 * on PROT_NONE pte
+	 */
+	VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
+		  cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
+	return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
+}
+
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
+{
+	/*
+	 * Used by KSM subsystem to make a protnone pte readonly.
+	 */
+	VM_BUG_ON(!pte_protnone(pte));
+	return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
+}
+#else
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
+{
+	VM_WARN_ON(1);
+	return __pte(pte_val(pte) & ~_PAGE_WRITE);
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pte_present(pte_t pte)
+{
+	/*
+	 * A pte is considerent present if _PAGE_PRESENT is set.
+	 * We also need to consider the pte present which is marked
+	 * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
+	 * if we find _PAGE_PRESENT cleared.
+	 */
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
+}
+
+#ifdef CONFIG_PPC_MEM_KEYS
+extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute);
+#else
+static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
+{
+	return true;
+}
+#endif /* CONFIG_PPC_MEM_KEYS */
+
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+	unsigned long pteval = pte_val(pte);
+	/* Also check for pte_user */
+	unsigned long clear_pte_bits = _PAGE_PRIVILEGED;
+	/*
+	 * _PAGE_READ is needed for any access and will be
+	 * cleared for PROT_NONE
+	 */
+	unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ;
+
+	if (write)
+		need_pte_bits |= _PAGE_WRITE;
+
+	if ((pteval & need_pte_bits) != need_pte_bits)
+		return false;
+
+	if ((pteval & clear_pte_bits) == clear_pte_bits)
+		return false;
+
+	return arch_pte_access_permitted(pte_val(pte), write, 0);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+	return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) |
+		     pgprot_val(pgprot));
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+	return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	if (unlikely(pte_savedwrite(pte)))
+		return pte_clear_savedwrite(pte);
+	return __pte(pte_val(pte) & ~_PAGE_WRITE);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	/*
+	 * write implies read, hence set both
+	 */
+	return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+	return pte;
+}
+
+static inline pte_t pte_mkdevmap(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
+}
+
+/*
+ * This is potentially called with a pmd as the argument, in which case it's not
+ * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
+ * That's because the bit we use for _PAGE_DEVMAP is not reserved for software
+ * use in page directory entries (ie. non-ptes).
+ */
+static inline int pte_devmap(pte_t pte)
+{
+	u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
+
+	return (pte_raw(pte) & mask) == mask;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	/* FIXME!! check whether this need to be a conditional */
+	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+static inline bool pte_user(pte_t pte)
+{
+	return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() do { \
+	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+	/*							\
+	 * Don't have overlapping bits with _PAGE_HPTEFLAGS	\
+	 * We filter HPTEFLAGS on set_pte.			\
+	 */							\
+	BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+	BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);	\
+	} while (0)
+/*
+ * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
+ */
+#define SWP_TYPE_BITS 5
+#define __swp_type(x)		(((x).val >> _PAGE_BIT_SWAP_TYPE) \
+				& ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x)		(((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
+#define __swp_entry(type, offset)	((swp_entry_t) { \
+				((type) << _PAGE_BIT_SWAP_TYPE) \
+				| (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
+/*
+ * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
+ * swap type and offset we get from swap and convert that to pte to find a
+ * matching pte in linux page table.
+ * Clear bits not found in swap entries here.
+ */
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
+#define __swp_entry_to_pte(x)	__pte((x).val | _PAGE_PTE)
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY   (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
+#else
+#define _PAGE_SWP_SOFT_DIRTY	0UL
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline bool pte_swp_soft_dirty(pte_t pte)
+{
+	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+static inline bool check_pte_access(unsigned long access, unsigned long ptev)
+{
+	/*
+	 * This check for _PAGE_RWX and _PAGE_PRESENT bits
+	 */
+	if (access & ~ptev)
+		return false;
+	/*
+	 * This check for access to privilege space
+	 */
+	if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
+		return false;
+
+	return true;
+}
+/*
+ * Generic functions with hash/radix callbacks
+ */
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   int psize)
+{
+	if (radix_enabled())
+		return radix__ptep_set_access_flags(vma, ptep, entry,
+						    address, psize);
+	return hash__ptep_set_access_flags(ptep, entry);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+	if (radix_enabled())
+		return radix__pte_same(pte_a, pte_b);
+	return hash__pte_same(pte_a, pte_b);
+}
+
+static inline int pte_none(pte_t pte)
+{
+	if (radix_enabled())
+		return radix__pte_none(pte);
+	return hash__pte_none(pte);
+}
+
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, pte_t pte, int percpu)
+{
+	if (radix_enabled())
+		return radix__set_pte_at(mm, addr, ptep, pte, percpu);
+	return hash__set_pte_at(mm, addr, ptep, pte, percpu);
+}
+
+#define _PAGE_CACHE_CTL	(_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+			_PAGE_NON_IDEMPOTENT);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+			_PAGE_TOLERANT);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+	return pgprot_noncached_wc(prot);
+}
+/*
+ * check a pte mapping have cache inhibited property
+ */
+static inline bool pte_ci(pte_t pte)
+{
+	unsigned long pte_v = pte_val(pte);
+
+	if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
+	    ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
+		return true;
+	return false;
+}
+
+static inline void pmd_set(pmd_t *pmdp, unsigned long val)
+{
+	*pmdp = __pmd(val);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	*pmdp = __pmd(0);
+}
+
+static inline int pmd_none(pmd_t pmd)
+{
+	return !pmd_raw(pmd);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+
+	return !pmd_none(pmd);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+	if (radix_enabled())
+		return radix__pmd_bad(pmd);
+	return hash__pmd_bad(pmd);
+}
+
+static inline void pud_set(pud_t *pudp, unsigned long val)
+{
+	*pudp = __pud(val);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+	*pudp = __pud(0);
+}
+
+static inline int pud_none(pud_t pud)
+{
+	return !pud_raw(pud);
+}
+
+static inline int pud_present(pud_t pud)
+{
+	return !pud_none(pud);
+}
+
+extern struct page *pud_page(pud_t pud);
+extern struct page *pmd_page(pmd_t pmd);
+static inline pte_t pud_pte(pud_t pud)
+{
+	return __pte_raw(pud_raw(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+	return __pud_raw(pte_raw(pte));
+}
+#define pud_write(pud)		pte_write(pud_pte(pud))
+
+static inline int pud_bad(pud_t pud)
+{
+	if (radix_enabled())
+		return radix__pud_bad(pud);
+	return hash__pud_bad(pud);
+}
+
+#define pud_access_permitted pud_access_permitted
+static inline bool pud_access_permitted(pud_t pud, bool write)
+{
+	return pte_access_permitted(pud_pte(pud), write);
+}
+
+#define pgd_write(pgd)		pte_write(pgd_pte(pgd))
+static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+{
+	*pgdp = __pgd(val);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+	*pgdp = __pgd(0);
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+	return !pgd_raw(pgd);
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+	return !pgd_none(pgd);
+}
+
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+	return __pte_raw(pgd_raw(pgd));
+}
+
+static inline pgd_t pte_pgd(pte_t pte)
+{
+	return __pgd_raw(pte_raw(pte));
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+	if (radix_enabled())
+		return radix__pgd_bad(pgd);
+	return hash__pgd_bad(pgd);
+}
+
+#define pgd_access_permitted pgd_access_permitted
+static inline bool pgd_access_permitted(pgd_t pgd, bool write)
+{
+	return pte_access_permitted(pgd_pte(pgd), write);
+}
+
+extern struct page *pgd_page(pgd_t pgd);
+
+/* Pointers in the page table tree are physical addresses */
+#define __pgtable_ptr_val(ptr)	__pa(ptr)
+
+#define pmd_page_vaddr(pmd)	__va(pmd_val(pmd) & ~PMD_MASKED_BITS)
+#define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
+#define pgd_page_vaddr(pgd)	__va(pgd_val(pgd) & ~PGD_MASKED_BITS)
+
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
+#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
+#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
+#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
+
+/*
+ * Find an entry in a page-table-directory.  We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+
+#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
+
+#define pud_offset(pgdp, addr)	\
+	(((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
+#define pmd_offset(pudp,addr) \
+	(((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
+#define pte_offset_kernel(dir,addr) \
+	(((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
+
+#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)			do { } while(0)
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define pte_ERROR(e) \
+	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pud_ERROR(e) \
+	pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+#define pgd_ERROR(e) \
+	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+static inline int map_kernel_page(unsigned long ea, unsigned long pa,
+				  unsigned long flags)
+{
+	if (radix_enabled()) {
+#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
+		unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
+		WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
+#endif
+		return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
+	}
+	return hash__map_kernel_page(ea, pa, flags);
+}
+
+static inline int __meminit vmemmap_create_mapping(unsigned long start,
+						   unsigned long page_size,
+						   unsigned long phys)
+{
+	if (radix_enabled())
+		return radix__vmemmap_create_mapping(start, page_size, phys);
+	return hash__vmemmap_create_mapping(start, page_size, phys);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static inline void vmemmap_remove_mapping(unsigned long start,
+					  unsigned long page_size)
+{
+	if (radix_enabled())
+		return radix__vmemmap_remove_mapping(start, page_size);
+	return hash__vmemmap_remove_mapping(start, page_size);
+}
+#endif
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+	return __pte_raw(pmd_raw(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+	return __pmd_raw(pte_raw(pte));
+}
+
+static inline pte_t *pmdp_ptep(pmd_t *pmd)
+{
+	return (pte_t *)pmd;
+}
+#define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
+#define pmd_young(pmd)		pte_young(pmd_pte(pmd))
+#define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mk_savedwrite(pmd)	pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
+#define pmd_clear_savedwrite(pmd)	pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
+#define pmd_mksoft_dirty(pmd)  pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
+#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline int pmd_protnone(pmd_t pmd)
+{
+	return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#define pmd_write(pmd)		pte_write(pmd_pte(pmd))
+#define __pmd_write(pmd)	__pte_write(pmd_pte(pmd))
+#define pmd_savedwrite(pmd)	pte_savedwrite(pmd_pte(pmd))
+
+#define pmd_access_permitted pmd_access_permitted
+static inline bool pmd_access_permitted(pmd_t pmd, bool write)
+{
+	return pte_access_permitted(pmd_pte(pmd), write);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+		       pmd_t *pmdp, pmd_t pmd);
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+				 pmd_t *pmd);
+extern int hash__has_transparent_hugepage(void);
+static inline int has_transparent_hugepage(void)
+{
+	if (radix_enabled())
+		return radix__has_transparent_hugepage();
+	return hash__has_transparent_hugepage();
+}
+#define has_transparent_hugepage has_transparent_hugepage
+
+static inline unsigned long
+pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
+		    unsigned long clr, unsigned long set)
+{
+	if (radix_enabled())
+		return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set);
+	return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
+}
+
+static inline int pmd_large(pmd_t pmd)
+{
+	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+	return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
+}
+/*
+ * For radix we should always find H_PAGE_HASHPTE zero. Hence
+ * the below will work for radix too
+ */
+static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
+					      unsigned long addr, pmd_t *pmdp)
+{
+	unsigned long old;
+
+	if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+		return 0;
+	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
+	return ((old & _PAGE_ACCESSED) != 0);
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+				      pmd_t *pmdp)
+{
+	if (__pmd_write((*pmdp)))
+		pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+	else if (unlikely(pmd_savedwrite(*pmdp)))
+		pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
+}
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+	if (radix_enabled())
+		return radix__pmd_trans_huge(pmd);
+	return hash__pmd_trans_huge(pmd);
+}
+
+#define __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	if (radix_enabled())
+		return radix__pmd_same(pmd_a, pmd_b);
+	return hash__pmd_same(pmd_a, pmd_b);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+	if (radix_enabled())
+		return radix__pmd_mkhuge(pmd);
+	return hash__pmd_mkhuge(pmd);
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pmd_t *pmdp,
+				 pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+				     unsigned long address, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+					    unsigned long addr, pmd_t *pmdp)
+{
+	if (radix_enabled())
+		return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
+	return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
+}
+
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+					unsigned long address, pmd_t *pmdp)
+{
+	if (radix_enabled())
+		return radix__pmdp_collapse_flush(vma, address, pmdp);
+	return hash__pmdp_collapse_flush(vma, address, pmdp);
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
+					      pmd_t *pmdp, pgtable_t pgtable)
+{
+	if (radix_enabled())
+		return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
+	return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
+}
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
+						    pmd_t *pmdp)
+{
+	if (radix_enabled())
+		return radix__pgtable_trans_huge_withdraw(mm, pmdp);
+	return hash__pgtable_trans_huge_withdraw(mm, pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+			     pmd_t *pmdp);
+
+#define pmd_move_must_withdraw pmd_move_must_withdraw
+struct spinlock;
+static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+					 struct spinlock *old_pmd_ptl,
+					 struct vm_area_struct *vma)
+{
+	if (radix_enabled())
+		return false;
+	/*
+	 * Archs like ppc64 use pgtable to store per pmd
+	 * specific information. So when we switch the pmd,
+	 * we should also withdraw and deposit the pgtable
+	 */
+	return true;
+}
+
+
+#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
+static inline bool arch_needs_pgtable_deposit(void)
+{
+	if (radix_enabled())
+		return false;
+	return true;
+}
+extern void serialize_against_pte_lookup(struct mm_struct *mm);
+
+
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+	return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+}
+
+static inline int pmd_devmap(pmd_t pmd)
+{
+	return pte_devmap(pmd_pte(pmd));
+}
+
+static inline int pud_devmap(pud_t pud)
+{
+	return 0;
+}
+
+static inline int pgd_devmap(pgd_t pgd)
+{
+	return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline const int pud_pfn(pud_t pud)
+{
+	/*
+	 * Currently all calls to pud_pfn() are gated around a pud_devmap()
+	 * check so this should never be used. If it grows another user we
+	 * want to know about it.
+	 */
+	BUILD_BUG();
+	return 0;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h
new file mode 100644
index 0000000..863c3e8
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_RADIX_4K_H
+#define _ASM_POWERPC_PGTABLE_RADIX_4K_H
+
+/*
+ * For 4K page size supported index is 13/9/9/9
+ */
+#define RADIX_PTE_INDEX_SIZE  9  /* 2MB huge page */
+#define RADIX_PMD_INDEX_SIZE  9  /* 1G huge page */
+#define RADIX_PUD_INDEX_SIZE	 9
+#define RADIX_PGD_INDEX_SIZE  13
+/*
+ * One fragment per per page
+ */
+#define RADIX_PTE_FRAG_SIZE_SHIFT  (RADIX_PTE_INDEX_SIZE + 3)
+#define RADIX_PTE_FRAG_NR	(PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
+
+#define RADIX_PMD_FRAG_SIZE_SHIFT  (RADIX_PMD_INDEX_SIZE + 3)
+#define RADIX_PMD_FRAG_NR	(PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
+
+#endif /* _ASM_POWERPC_PGTABLE_RADIX_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix-64k.h b/arch/powerpc/include/asm/book3s/64/radix-64k.h
new file mode 100644
index 0000000..ccb78ca
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/radix-64k.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_RADIX_64K_H
+#define _ASM_POWERPC_PGTABLE_RADIX_64K_H
+
+/*
+ * For 64K page size supported index is 13/9/9/5
+ */
+#define RADIX_PTE_INDEX_SIZE  5  /* 2MB huge page */
+#define RADIX_PMD_INDEX_SIZE  9  /* 1G huge page */
+#define RADIX_PUD_INDEX_SIZE	 9
+#define RADIX_PGD_INDEX_SIZE  13
+
+/*
+ * We use a 256 byte PTE page fragment in radix
+ * 8 bytes per each PTE entry.
+ */
+#define RADIX_PTE_FRAG_SIZE_SHIFT  (RADIX_PTE_INDEX_SIZE + 3)
+#define RADIX_PTE_FRAG_NR	(PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
+
+#define RADIX_PMD_FRAG_SIZE_SHIFT  (RADIX_PMD_INDEX_SIZE + 3)
+#define RADIX_PMD_FRAG_NR	(PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
+
+#endif /* _ASM_POWERPC_PGTABLE_RADIX_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
new file mode 100644
index 0000000..7d1a3d1
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_RADIX_H
+#define _ASM_POWERPC_PGTABLE_RADIX_H
+
+#include <asm/asm-const.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/cmpxchg.h>
+#endif
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/radix-64k.h>
+#else
+#include <asm/book3s/64/radix-4k.h>
+#endif
+
+#ifndef __ASSEMBLY__
+#include <asm/book3s/64/tlbflush-radix.h>
+#include <asm/cpu_has_feature.h>
+#endif
+
+/* An empty PTE can still have a R or C writeback */
+#define RADIX_PTE_NONE_MASK		(_PAGE_DIRTY | _PAGE_ACCESSED)
+
+/* Bits to set in a RPMD/RPUD/RPGD */
+#define RADIX_PMD_VAL_BITS		(0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
+#define RADIX_PUD_VAL_BITS		(0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
+#define RADIX_PGD_VAL_BITS		(0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
+
+/* Don't have anything in the reserved bits and leaf bits */
+#define RADIX_PMD_BAD_BITS		0x60000000000000e0UL
+#define RADIX_PUD_BAD_BITS		0x60000000000000e0UL
+#define RADIX_PGD_BAD_BITS		0x60000000000000e0UL
+
+#define RADIX_PMD_SHIFT		(PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
+#define RADIX_PUD_SHIFT		(RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
+#define RADIX_PGD_SHIFT		(RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE +	\
+			      RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
+#define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
+
+/*
+ * We support 52 bit address space, Use top bit for kernel
+ * virtual mapping. Also make sure kernel fit in the top
+ * quadrant.
+ *
+ *           +------------------+
+ *           +------------------+  Kernel virtual map (0xc008000000000000)
+ *           |                  |
+ *           |                  |
+ *           |                  |
+ * 0b11......+------------------+  Kernel linear map (0xc....)
+ *           |                  |
+ *           |     2 quadrant   |
+ *           |                  |
+ * 0b10......+------------------+
+ *           |                  |
+ *           |    1 quadrant    |
+ *           |                  |
+ * 0b01......+------------------+
+ *           |                  |
+ *           |    0 quadrant    |
+ *           |                  |
+ * 0b00......+------------------+
+ *
+ *
+ * 3rd quadrant expanded:
+ * +------------------------------+
+ * |                              |
+ * |                              |
+ * |                              |
+ * +------------------------------+  Kernel IO map end (0xc010000000000000)
+ * |                              |
+ * |                              |
+ * |      1/2 of virtual map      |
+ * |                              |
+ * |                              |
+ * +------------------------------+  Kernel IO map start
+ * |                              |
+ * |      1/4 of virtual map      |
+ * |                              |
+ * +------------------------------+  Kernel vmemap start
+ * |                              |
+ * |     1/4 of virtual map       |
+ * |                              |
+ * +------------------------------+  Kernel virt start (0xc008000000000000)
+ * |                              |
+ * |                              |
+ * |                              |
+ * +------------------------------+  Kernel linear (0xc.....)
+ */
+
+#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
+#define RADIX_KERN_VIRT_SIZE  ASM_CONST(0x0008000000000000)
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies a quarter of it on radix config.
+ * (we keep a quarter for the virtual memmap)
+ */
+#define RADIX_VMALLOC_START	RADIX_KERN_VIRT_START
+#define RADIX_VMALLOC_SIZE	(RADIX_KERN_VIRT_SIZE >> 2)
+#define RADIX_VMALLOC_END	(RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs.
+ */
+#define RADIX_VMEMMAP_BASE		(RADIX_VMALLOC_END)
+
+#define RADIX_KERN_IO_START	(RADIX_KERN_VIRT_START + (RADIX_KERN_VIRT_SIZE >> 1))
+
+#ifndef __ASSEMBLY__
+#define RADIX_PTE_TABLE_SIZE	(sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
+#define RADIX_PMD_TABLE_SIZE	(sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
+#define RADIX_PUD_TABLE_SIZE	(sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
+#define RADIX_PGD_TABLE_SIZE	(sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+extern void radix__mark_rodata_ro(void);
+extern void radix__mark_initmem_nx(void);
+#endif
+
+extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+					 pte_t entry, unsigned long address,
+					 int psize);
+
+static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
+					       unsigned long set)
+{
+	__be64 old_be, tmp_be;
+
+	__asm__ __volatile__(
+	"1:	ldarx	%0,0,%3		# pte_update\n"
+	"	andc	%1,%0,%5	\n"
+	"	or	%1,%1,%4	\n"
+	"	stdcx.	%1,0,%3		\n"
+	"	bne-	1b"
+	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
+	: "r" (ptep), "r" (cpu_to_be64(set)), "r" (cpu_to_be64(clr))
+	: "cc" );
+
+	return be64_to_cpu(old_be);
+}
+
+static inline unsigned long radix__pte_update(struct mm_struct *mm,
+					unsigned long addr,
+					pte_t *ptep, unsigned long clr,
+					unsigned long set,
+					int huge)
+{
+	unsigned long old_pte;
+
+	old_pte = __radix_pte_update(ptep, clr, set);
+	if (!huge)
+		assert_pte_locked(mm, addr);
+
+	return old_pte;
+}
+
+static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
+						   unsigned long addr,
+						   pte_t *ptep, int full)
+{
+	unsigned long old_pte;
+
+	if (full) {
+		old_pte = pte_val(*ptep);
+		*ptep = __pte(0);
+	} else
+		old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
+
+	return __pte(old_pte);
+}
+
+static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
+{
+	return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
+}
+
+static inline int radix__pte_none(pte_t pte)
+{
+	return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
+}
+
+static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
+				 pte_t *ptep, pte_t pte, int percpu)
+{
+	*ptep = pte;
+
+	/*
+	 * The architecture suggests a ptesync after setting the pte, which
+	 * orders the store that updates the pte with subsequent page table
+	 * walk accesses which may load the pte. Without this it may be
+	 * possible for a subsequent access to result in spurious fault.
+	 *
+	 * This is not necessary for correctness, because a spurious fault
+	 * is tolerated by the page fault handler, and this store will
+	 * eventually be seen. In testing, there was no noticable increase
+	 * in user faults on POWER9. Avoiding ptesync here is a significant
+	 * win for things like fork. If a future microarchitecture benefits
+	 * from ptesync, it should probably go into update_mmu_cache, rather
+	 * than set_pte_at (which is used to set ptes unrelated to faults).
+	 *
+	 * Spurious faults to vmalloc region are not tolerated, so there is
+	 * a ptesync in flush_cache_vmap.
+	 */
+}
+
+static inline int radix__pmd_bad(pmd_t pmd)
+{
+	return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
+}
+
+static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
+}
+
+static inline int radix__pud_bad(pud_t pud)
+{
+	return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
+}
+
+
+static inline int radix__pgd_bad(pgd_t pgd)
+{
+	return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+static inline int radix__pmd_trans_huge(pmd_t pmd)
+{
+	return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
+}
+
+static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
+{
+	return __pmd(pmd_val(pmd) | _PAGE_PTE);
+}
+
+extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
+					  pmd_t *pmdp, unsigned long clr,
+					  unsigned long set);
+extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
+				  unsigned long address, pmd_t *pmdp);
+extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+					pgtable_t pgtable);
+extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
+				      unsigned long addr, pmd_t *pmdp);
+extern int radix__has_transparent_hugepage(void);
+#endif
+
+extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
+					     unsigned long page_size,
+					     unsigned long phys);
+extern void radix__vmemmap_remove_mapping(unsigned long start,
+				    unsigned long page_size);
+
+extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
+				 pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+	unsigned long rts_field;
+	/*
+	 * We support 52 bits, hence:
+	 * bits 52 - 31 = 21, 0b10101
+	 * RTS encoding details
+	 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+	 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+	 */
+	rts_field = (0x5UL << 5); /* 6 - 8 bits */
+	rts_field |= (0x2UL << 61);
+
+	return rts_field;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int radix__create_section_mapping(unsigned long start, unsigned long end, int nid);
+int radix__remove_section_mapping(unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
new file mode 100644
index 0000000..db0deda
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/slice.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
+#define _ASM_POWERPC_BOOK3S_64_SLICE_H
+
+#ifdef CONFIG_PPC_MM_SLICES
+
+#define SLICE_LOW_SHIFT		28
+#define SLICE_LOW_TOP		(0x100000000ul)
+#define SLICE_NUM_LOW		(SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
+#define GET_LOW_SLICE_INDEX(addr)	((addr) >> SLICE_LOW_SHIFT)
+
+#define SLICE_HIGH_SHIFT	40
+#define SLICE_NUM_HIGH		(H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
+#define GET_HIGH_SLICE_INDEX(addr)	((addr) >> SLICE_HIGH_SHIFT)
+
+#else /* CONFIG_PPC_MM_SLICES */
+
+#define get_slice_psize(mm, addr)	((mm)->context.user_psize)
+#define slice_set_user_psize(mm, psize)		\
+do {						\
+	(mm)->context.user_psize = (psize);	\
+	(mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
+} while (0)
+
+#endif /* CONFIG_PPC_MM_SLICES */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
new file mode 100644
index 0000000..64d02a7
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
+#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
+
+/*
+ * TLB flushing for 64-bit hash-MMU CPUs
+ */
+
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192
+
+struct ppc64_tlb_batch {
+	int			active;
+	unsigned long		index;
+	struct mm_struct	*mm;
+	real_pte_t		pte[PPC64_TLB_BATCH_NR];
+	unsigned long		vpn[PPC64_TLB_BATCH_NR];
+	unsigned int		psize;
+	int			ssize;
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
+
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+static inline void arch_enter_lazy_mmu_mode(void)
+{
+	struct ppc64_tlb_batch *batch;
+
+	if (radix_enabled())
+		return;
+	batch = this_cpu_ptr(&ppc64_tlb_batch);
+	batch->active = 1;
+}
+
+static inline void arch_leave_lazy_mmu_mode(void)
+{
+	struct ppc64_tlb_batch *batch;
+
+	if (radix_enabled())
+		return;
+	batch = this_cpu_ptr(&ppc64_tlb_batch);
+
+	if (batch->index)
+		__flush_tlb_pending(batch);
+	batch->active = 0;
+}
+
+#define arch_flush_lazy_mmu_mode()      do {} while (0)
+
+extern void hash__tlbiel_all(unsigned int action);
+
+extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
+			    int ssize, unsigned long flags);
+extern void flush_hash_range(unsigned long number, int local);
+extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
+				pmd_t *pmdp, unsigned int psize, int ssize,
+				unsigned long flags);
+static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void hash__flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void hash__local_flush_all_mm(struct mm_struct *mm)
+{
+	/*
+	 * There's no Page Walk Cache for hash, so what is needed is
+	 * the same as flush_tlb_mm(), which doesn't really make sense
+	 * with hash. So the only thing we could do is flush the
+	 * entire LPID! Punt for now, as it's not being used.
+	 */
+	WARN_ON_ONCE(1);
+}
+
+static inline void hash__flush_all_mm(struct mm_struct *mm)
+{
+	/*
+	 * There's no Page Walk Cache for hash, so what is needed is
+	 * the same as flush_tlb_mm(), which doesn't really make sense
+	 * with hash. So the only thing we could do is flush the
+	 * entire LPID! Punt for now, as it's not being used.
+	 */
+	WARN_ON_ONCE(1);
+}
+
+static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
+					  unsigned long vmaddr)
+{
+}
+
+static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
+				    unsigned long vmaddr)
+{
+}
+
+static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end)
+{
+}
+
+static inline void hash__flush_tlb_kernel_range(unsigned long start,
+					    unsigned long end)
+{
+}
+
+
+struct mmu_gather;
+extern void hash__tlb_flush(struct mmu_gather *tlb);
+/* Private function for use by PCI IO mapping code */
+extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+				     unsigned long end);
+extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
+				unsigned long addr);
+#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
new file mode 100644
index 0000000..1154a6d
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TLBFLUSH_RADIX_H
+#define _ASM_POWERPC_TLBFLUSH_RADIX_H
+
+struct vm_area_struct;
+struct mm_struct;
+struct mmu_gather;
+
+static inline int mmu_get_ap(int psize)
+{
+	return mmu_psize_defs[psize].ap;
+}
+
+#ifdef CONFIG_PPC_RADIX_MMU
+extern void radix__tlbiel_all(unsigned int action);
+#else
+static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); };
+#endif
+
+extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+					   unsigned long start, unsigned long end);
+extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
+					 unsigned long end, int psize);
+extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
+				       unsigned long start, unsigned long end);
+extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			    unsigned long end);
+extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
+extern void radix__local_flush_all_mm(struct mm_struct *mm);
+extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+					      int psize);
+extern void radix__tlb_flush(struct mmu_gather *tlb);
+#ifdef CONFIG_SMP
+extern void radix__flush_tlb_mm(struct mm_struct *mm);
+extern void radix__flush_all_mm(struct mm_struct *mm);
+extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+					int psize);
+#else
+#define radix__flush_tlb_mm(mm)		radix__local_flush_tlb_mm(mm)
+#define radix__flush_all_mm(mm)		radix__local_flush_all_mm(mm)
+#define radix__flush_tlb_page(vma,addr)	radix__local_flush_tlb_page(vma,addr)
+#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
+#endif
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
+extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
+extern void radix__flush_tlb_all(void);
+
+extern void radix__flush_tlb_lpid_page(unsigned int lpid,
+					unsigned long addr,
+					unsigned long page_size);
+extern void radix__flush_pwc_lpid(unsigned int lpid);
+extern void radix__local_flush_tlb_lpid(unsigned int lpid);
+extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
new file mode 100644
index 0000000..ebf572e
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
+
+#define MMU_NO_CONTEXT	~0UL
+
+#include <linux/mm_types.h>
+#include <asm/book3s/64/tlbflush-hash.h>
+#include <asm/book3s/64/tlbflush-radix.h>
+
+/* TLB flush actions. Used as argument to tlbiel_all() */
+enum {
+	TLB_INVAL_SCOPE_GLOBAL = 0,	/* invalidate all TLBs */
+	TLB_INVAL_SCOPE_LPID = 1,	/* invalidate TLBs for current LPID */
+};
+
+#ifdef CONFIG_PPC_NATIVE
+static inline void tlbiel_all(void)
+{
+	/*
+	 * This is used for host machine check and bootup.
+	 *
+	 * This uses early_radix_enabled and implementations use
+	 * early_cpu_has_feature etc because that works early in boot
+	 * and this is the machine check path which is not performance
+	 * critical.
+	 */
+	if (early_radix_enabled())
+		radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
+	else
+		hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
+}
+#else
+static inline void tlbiel_all(void) { BUG(); };
+#endif
+
+static inline void tlbiel_all_lpid(bool radix)
+{
+	/*
+	 * This is used for guest machine check.
+	 */
+	if (radix)
+		radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
+	else
+		hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
+}
+
+
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
+				       unsigned long start, unsigned long end)
+{
+	if (radix_enabled())
+		return radix__flush_pmd_tlb_range(vma, start, end);
+	return hash__flush_tlb_range(vma, start, end);
+}
+
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+					   unsigned long start,
+					   unsigned long end)
+{
+	if (radix_enabled())
+		return radix__flush_hugetlb_tlb_range(vma, start, end);
+	return hash__flush_tlb_range(vma, start, end);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	if (radix_enabled())
+		return radix__flush_tlb_range(vma, start, end);
+	return hash__flush_tlb_range(vma, start, end);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+					  unsigned long end)
+{
+	if (radix_enabled())
+		return radix__flush_tlb_kernel_range(start, end);
+	return hash__flush_tlb_kernel_range(start, end);
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+	if (radix_enabled())
+		return radix__local_flush_tlb_mm(mm);
+	return hash__local_flush_tlb_mm(mm);
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+					unsigned long vmaddr)
+{
+	if (radix_enabled())
+		return radix__local_flush_tlb_page(vma, vmaddr);
+	return hash__local_flush_tlb_page(vma, vmaddr);
+}
+
+static inline void local_flush_all_mm(struct mm_struct *mm)
+{
+	if (radix_enabled())
+		return radix__local_flush_all_mm(mm);
+	return hash__local_flush_all_mm(mm);
+}
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+	if (radix_enabled())
+		return radix__tlb_flush(tlb);
+	return hash__tlb_flush(tlb);
+}
+
+#ifdef CONFIG_SMP
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+	if (radix_enabled())
+		return radix__flush_tlb_mm(mm);
+	return hash__flush_tlb_mm(mm);
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+				  unsigned long vmaddr)
+{
+	if (radix_enabled())
+		return radix__flush_tlb_page(vma, vmaddr);
+	return hash__flush_tlb_page(vma, vmaddr);
+}
+
+static inline void flush_all_mm(struct mm_struct *mm)
+{
+	if (radix_enabled())
+		return radix__flush_all_mm(mm);
+	return hash__flush_all_mm(mm);
+}
+#else
+#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
+#define flush_all_mm(mm)		local_flush_all_mm(mm)
+#endif /* CONFIG_SMP */
+
+#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
+static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+						unsigned long address)
+{
+	/* See ptep_set_access_flags comment */
+	if (atomic_read(&vma->vm_mm->context.copros) > 0)
+		flush_tlb_page(vma, address);
+}
+
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+	/*
+	 * Flush the page table walk cache on freeing a page table. We already
+	 * have marked the upper/higher level page table entry none by now.
+	 * So it is safe to flush PWC here.
+	 */
+	if (!radix_enabled())
+		return;
+
+	radix__flush_tlb_pwc(tlb, address);
+}
+#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgalloc.h b/arch/powerpc/include/asm/book3s/pgalloc.h
new file mode 100644
index 0000000..6b178ca
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/pgalloc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_PGALLOC_H
+#define _ASM_POWERPC_BOOK3S_PGALLOC_H
+
+#include <linux/mm.h>
+
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/pgalloc.h>
+#else
+#include <asm/book3s/32/pgalloc.h>
+#endif
+
+#endif /* _ASM_POWERPC_BOOK3S_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
new file mode 100644
index 0000000..6436b65
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_PGTABLE_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/pgtable.h>
+#else
+#include <asm/book3s/32/pgtable.h>
+#endif
+
+#define FIRST_USER_ADDRESS	0UL
+#ifndef __ASSEMBLY__
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+		       pte_t pte);
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+				 pte_t *ptep, pte_t entry, int dirty);
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+				     unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/book3s/tlbflush.h b/arch/powerpc/include/asm/book3s/tlbflush.h
new file mode 100644
index 0000000..dec11de
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/tlbflush.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/tlbflush.h>
+#else
+#include <asm/book3s/32/tlbflush.h>
+#endif
+
+#endif /* _ASM_POWERPC_BOOK3S_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/bootx.h b/arch/powerpc/include/asm/bootx.h
new file mode 100644
index 0000000..1c121f3
--- /dev/null
+++ b/arch/powerpc/include/asm/bootx.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file describes the structure passed from the BootX application
+ * (for MacOS) when it is used to boot Linux.
+ *
+ * Written by Benjamin Herrenschmidt.
+ */
+
+#ifndef __ASM_BOOTX_H__
+#define __ASM_BOOTX_H__
+
+#include <uapi/asm/bootx.h>
+
+/* (*) The format of the colormap is 256 * 3 * 2 bytes. Each color index
+ * is represented by 3 short words containing a 16 bits (unsigned) color
+ * component. Later versions may contain the gamma table for direct-color
+ * devices here.
+ */
+#define BOOTX_COLORTABLE_SIZE    (256UL*3UL*2UL)
+
+/* BootX passes the device-tree using a format that comes from earlier
+ * ppc32 kernels. This used to match what is in prom.h, but not anymore
+ * so we now define it here
+ */
+struct bootx_dt_prop {
+	u32	name;
+	int	length;
+	u32	value;
+	u32	next;
+};
+
+struct bootx_dt_node {
+	u32	unused0;
+	u32	unused1;
+	u32	phandle;	/* not really available */
+	u32	unused2;
+	u32	unused3;
+	u32	unused4;
+	u32	unused5;
+	u32	full_name;
+	u32	properties;
+	u32	parent;
+	u32	child;
+	u32	sibling;
+	u32	next;
+	u32	allnext;
+};
+
+extern void bootx_init(unsigned long r4, unsigned long phys);
+
+#endif
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
new file mode 100644
index 0000000..3ffad03
--- /dev/null
+++ b/arch/powerpc/include/asm/btext.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for using the procedures in btext.c.
+ *
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ */
+#ifndef __PPC_BTEXT_H
+#define __PPC_BTEXT_H
+#ifdef __KERNEL__
+
+extern int btext_find_display(int allow_nonstdout);
+extern void btext_update_display(unsigned long phys, int width, int height,
+				 int depth, int pitch);
+extern void btext_setup_display(int width, int height, int depth, int pitch,
+				unsigned long address);
+extern void btext_prepare_BAT(void);
+extern void btext_map(void);
+extern void btext_unmap(void);
+
+extern void btext_drawchar(char c);
+extern void btext_drawstring(const char *str);
+extern void btext_drawhex(unsigned long v);
+extern void btext_drawtext(const char *c, unsigned int len);
+
+extern void btext_clearscreen(void);
+extern void btext_flushscreen(void);
+extern void btext_flushline(void);
+
+#endif /* __KERNEL__ */
+#endif /* __PPC_BTEXT_H */
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
new file mode 100644
index 0000000..fd06dbe
--- /dev/null
+++ b/arch/powerpc/include/asm/bug.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BUG_H
+#define _ASM_POWERPC_BUG_H
+#ifdef __KERNEL__
+
+#include <asm/asm-compat.h>
+
+/*
+ * Define an illegal instr to trap on the bug.
+ * We don't use 0 because that marks the end of a function
+ * in the ELF ABI.  That's "Boo Boo" in case you wonder...
+ */
+#define BUG_OPCODE .long 0x00b00b00  /* For asm */
+#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */
+
+#ifdef CONFIG_BUG
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+	 .section __bug_table,"aw"
+5001:	 PPC_LONG \addr, 5002f
+	 .short \line, \flags
+	 .org 5001b+BUG_ENTRY_SIZE
+	 .previous
+	 .section .rodata,"a"
+5002:	 .asciz "\file"
+	 .previous
+.endm
+#else
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+	 .section __bug_table,"aw"
+5001:	 PPC_LONG \addr
+	 .short \flags
+	 .org 5001b+BUG_ENTRY_SIZE
+	 .previous
+.endm
+#endif /* verbose */
+
+#else /* !__ASSEMBLY__ */
+/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
+   sizeof(struct bug_entry), respectively */
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _EMIT_BUG_ENTRY				\
+	".section __bug_table,\"aw\"\n"		\
+	"2:\t" PPC_LONG "1b, %0\n"		\
+	"\t.short %1, %2\n"			\
+	".org 2b+%3\n"				\
+	".previous\n"
+#else
+#define _EMIT_BUG_ENTRY				\
+	".section __bug_table,\"aw\"\n"		\
+	"2:\t" PPC_LONG "1b\n"			\
+	"\t.short %2\n"				\
+	".org 2b+%3\n"				\
+	".previous\n"
+#endif
+
+/*
+ * BUG_ON() and WARN_ON() do their best to cooperate with compile-time
+ * optimisations. However depending on the complexity of the condition
+ * some compiler versions may not produce optimal results.
+ */
+
+#define BUG() do {						\
+	__asm__ __volatile__(					\
+		"1:	twi 31,0,0\n"				\
+		_EMIT_BUG_ENTRY					\
+		: : "i" (__FILE__), "i" (__LINE__),		\
+		    "i" (0), "i"  (sizeof(struct bug_entry)));	\
+	unreachable();						\
+} while (0)
+
+#define BUG_ON(x) do {						\
+	if (__builtin_constant_p(x)) {				\
+		if (x)						\
+			BUG();					\
+	} else {						\
+		__asm__ __volatile__(				\
+		"1:	"PPC_TLNEI"	%4,0\n"			\
+		_EMIT_BUG_ENTRY					\
+		: : "i" (__FILE__), "i" (__LINE__), "i" (0),	\
+		  "i" (sizeof(struct bug_entry)),		\
+		  "r" ((__force long)(x)));			\
+	}							\
+} while (0)
+
+#define __WARN_FLAGS(flags) do {				\
+	__asm__ __volatile__(					\
+		"1:	twi 31,0,0\n"				\
+		_EMIT_BUG_ENTRY					\
+		: : "i" (__FILE__), "i" (__LINE__),		\
+		  "i" (BUGFLAG_WARNING|(flags)),		\
+		  "i" (sizeof(struct bug_entry)));		\
+} while (0)
+
+#define WARN_ON(x) ({						\
+	int __ret_warn_on = !!(x);				\
+	if (__builtin_constant_p(__ret_warn_on)) {		\
+		if (__ret_warn_on)				\
+			__WARN();				\
+	} else {						\
+		__asm__ __volatile__(				\
+		"1:	"PPC_TLNEI"	%4,0\n"			\
+		_EMIT_BUG_ENTRY					\
+		: : "i" (__FILE__), "i" (__LINE__),		\
+		  "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
+		  "i" (sizeof(struct bug_entry)),		\
+		  "r" (__ret_warn_on));				\
+	}							\
+	unlikely(__ret_warn_on);				\
+})
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_BUG_ON
+#define HAVE_ARCH_WARN_ON
+#endif /* __ASSEMBLY __ */
+#else
+#ifdef __ASSEMBLY__
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+.endm
+#else /* !__ASSEMBLY__ */
+#define _EMIT_BUG_ENTRY
+#endif
+#endif /* CONFIG_BUG */
+
+#include <asm-generic/bug.h>
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs;
+extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
+extern void _exception_pkey(int, struct pt_regs *, int, unsigned long, int);
+extern void die(const char *, struct pt_regs *, long);
+extern bool die_will_crash(void);
+extern void panic_flush_kmsg_start(void);
+extern void panic_flush_kmsg_end(void);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BUG_H */
diff --git a/arch/powerpc/include/asm/bugs.h b/arch/powerpc/include/asm/bugs.h
new file mode 100644
index 0000000..42fdb73
--- /dev/null
+++ b/arch/powerpc/include/asm/bugs.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_BUGS_H
+#define _ASM_POWERPC_BUGS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This file is included by 'init/main.c' to check for
+ * architecture-dependent bugs.
+ */
+
+static inline void check_bugs(void) { }
+
+#endif	/* _ASM_POWERPC_BUGS_H */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
new file mode 100644
index 0000000..6629846
--- /dev/null
+++ b/arch/powerpc/include/asm/cache.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CACHE_H
+#define _ASM_POWERPC_CACHE_H
+
+#ifdef __KERNEL__
+
+
+/* bytes per L1 cache line */
+#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
+#define L1_CACHE_SHIFT		4
+#define MAX_COPY_PREFETCH	1
+#define IFETCH_ALIGN_SHIFT	2
+#elif defined(CONFIG_PPC_E500MC)
+#define L1_CACHE_SHIFT		6
+#define MAX_COPY_PREFETCH	4
+#define IFETCH_ALIGN_SHIFT	3
+#elif defined(CONFIG_PPC32)
+#define MAX_COPY_PREFETCH	4
+#define IFETCH_ALIGN_SHIFT	3	/* 603 fetches 2 insn at a time */
+#if defined(CONFIG_PPC_47x)
+#define L1_CACHE_SHIFT		7
+#else
+#define L1_CACHE_SHIFT		5
+#endif
+#else /* CONFIG_PPC64 */
+#define L1_CACHE_SHIFT		7
+#define IFETCH_ALIGN_SHIFT	4 /* POWER8,9 */
+#endif
+
+#define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#define	SMP_CACHE_BYTES		L1_CACHE_BYTES
+
+#define IFETCH_ALIGN_BYTES	(1 << IFETCH_ALIGN_SHIFT)
+
+#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
+
+struct ppc_cache_info {
+	u32 size;
+	u32 line_size;
+	u32 block_size;	/* L1 only */
+	u32 log_block_size;
+	u32 blocks_per_page;
+	u32 sets;
+	u32 assoc;
+};
+
+struct ppc64_caches {
+	struct ppc_cache_info l1d;
+	struct ppc_cache_info l1i;
+	struct ppc_cache_info l2;
+	struct ppc_cache_info l3;
+};
+
+extern struct ppc64_caches ppc64_caches;
+#endif /* __powerpc64__ && ! __ASSEMBLY__ */
+
+#if defined(__ASSEMBLY__)
+/*
+ * For a snooping icache, we still need a dummy icbi to purge all the
+ * prefetched instructions from the ifetch buffers. We also need a sync
+ * before the icbi to order the the actual stores to memory that might
+ * have modified instructions with the icbi.
+ */
+#define PURGE_PREFETCHED_INS	\
+	sync;			\
+	icbi	0,r3;		\
+	sync;			\
+	isync
+
+#else
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#ifdef CONFIG_6xx
+extern long _get_L2CR(void);
+extern long _get_L3CR(void);
+extern void _set_L2CR(unsigned long);
+extern void _set_L3CR(unsigned long);
+#else
+#define _get_L2CR()	0L
+#define _get_L3CR()	0L
+#define _set_L2CR(val)	do { } while(0)
+#define _set_L3CR(val)	do { } while(0)
+#endif
+
+static inline void dcbz(void *addr)
+{
+	__asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbi(void *addr)
+{
+	__asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbf(void *addr)
+{
+	__asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void dcbst(void *addr)
+{
+	__asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
+}
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
new file mode 100644
index 0000000..d5a8d7b
--- /dev/null
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -0,0 +1,125 @@
+/*
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_CACHEFLUSH_H
+#define _ASM_POWERPC_CACHEFLUSH_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h>
+#include <asm/cputable.h>
+
+/*
+ * No cache flushing is required when address mappings are changed,
+ * because the caches on PowerPCs are physically addressed.
+ */
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_dup_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_icache_page(vma, page)		do { } while (0)
+#define flush_cache_vunmap(start, end)		do { } while (0)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * Book3s has no ptesync after setting a pte, so without this ptesync it's
+ * possible for a kernel virtual mapping access to return a spurious fault
+ * if it's accessed right after the pte is set. The page fault handler does
+ * not expect this type of fault. flush_cache_vmap is not exactly the right
+ * place to put this, but it seems to work well enough.
+ */
+#define flush_cache_vmap(start, end)		do { asm volatile("ptesync" ::: "memory"); } while (0)
+#else
+#define flush_cache_vmap(start, end)		do { } while (0)
+#endif
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *page);
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+
+extern void flush_icache_range(unsigned long, unsigned long);
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+				    struct page *page, unsigned long addr,
+				    int len);
+extern void __flush_dcache_icache(void *page_va);
+extern void flush_dcache_icache_page(struct page *page);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
+extern void __flush_dcache_icache_phys(unsigned long physaddr);
+#else
+static inline void __flush_dcache_icache_phys(unsigned long physaddr)
+{
+	BUG();
+}
+#endif
+
+#ifdef CONFIG_PPC32
+/*
+ * Write any modified data cache blocks out to memory and invalidate them.
+ * Does not invalidate the corresponding instruction cache blocks.
+ */
+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+	unsigned long i;
+
+	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+		dcbf(addr);
+	mb();	/* sync */
+}
+
+/*
+ * Write any modified data cache blocks out to memory.
+ * Does not invalidate the corresponding cache lines (especially for
+ * any corresponding instruction cache).
+ */
+static inline void clean_dcache_range(unsigned long start, unsigned long stop)
+{
+	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+	unsigned long i;
+
+	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+		dcbst(addr);
+	mb();	/* sync */
+}
+
+/*
+ * Like above, but invalidate the D-cache.  This is used by the 8xx
+ * to invalidate the cache so the PPC core doesn't get stale data
+ * from the CPM (no cache snooping here :-).
+ */
+static inline void invalidate_dcache_range(unsigned long start,
+					   unsigned long stop)
+{
+	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
+	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
+	unsigned long i;
+
+	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
+		dcbi(addr);
+	mb();	/* sync */
+}
+
+#endif /* CONFIG_PPC32 */
+#ifdef CONFIG_PPC64
+extern void flush_dcache_range(unsigned long start, unsigned long stop);
+extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
+#endif
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+	do { \
+		memcpy(dst, src, len); \
+		flush_icache_user_range(vma, page, vaddr, len); \
+	} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h
new file mode 100644
index 0000000..b4b7338
--- /dev/null
+++ b/arch/powerpc/include/asm/cell-pmu.h
@@ -0,0 +1,107 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author:
+ *   David Erb (djerb@us.ibm.com)
+ *   Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ASM_CELL_PMU_H__
+#define __ASM_CELL_PMU_H__
+
+/* The Cell PMU has four hardware performance counters, which can be
+ * configured as four 32-bit counters or eight 16-bit counters.
+ */
+#define NR_PHYS_CTRS 4
+#define NR_CTRS      (NR_PHYS_CTRS * 2)
+
+/* Macros for the pm_control register. */
+#define CBE_PM_16BIT_CTR(ctr)              (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1))))
+#define CBE_PM_ENABLE_PERF_MON             0x80000000
+#define CBE_PM_STOP_AT_MAX                 0x40000000
+#define CBE_PM_TRACE_MODE_GET(pm_control)  (((pm_control) >> 28) & 0x3)
+#define CBE_PM_TRACE_MODE_SET(mode)        (((mode)  & 0x3) << 28)
+#define CBE_PM_TRACE_BUF_OVFLW(bit)        (((bit) & 0x1) << 17)
+#define CBE_PM_COUNT_MODE_SET(count)       (((count) & 0x3) << 18)
+#define CBE_PM_FREEZE_ALL_CTRS             0x00100000
+#define CBE_PM_ENABLE_EXT_TRACE            0x00008000
+#define CBE_PM_SPU_ADDR_TRACE_SET(msk)     (((msk) & 0x3) << 9)
+
+/* Macros for the trace_address register. */
+#define CBE_PM_TRACE_BUF_FULL              0x00000800
+#define CBE_PM_TRACE_BUF_EMPTY             0x00000400
+#define CBE_PM_TRACE_BUF_DATA_COUNT(ta)    ((ta) & 0x3ff)
+#define CBE_PM_TRACE_BUF_MAX_COUNT         0x400
+
+/* Macros for the pm07_control registers. */
+#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f)
+#define CBE_PM_CTR_INPUT_CONTROL           0x02000000
+#define CBE_PM_CTR_POLARITY                0x01000000
+#define CBE_PM_CTR_COUNT_CYCLES            0x00800000
+#define CBE_PM_CTR_ENABLE                  0x00400000
+#define PM07_CTR_INPUT_MUX(x)              (((x) & 0x3F) << 26)
+#define PM07_CTR_INPUT_CONTROL(x)          (((x) & 1) << 25)
+#define PM07_CTR_POLARITY(x)               (((x) & 1) << 24)
+#define PM07_CTR_COUNT_CYCLES(x)           (((x) & 1) << 23)
+#define PM07_CTR_ENABLE(x)                 (((x) & 1) << 22)
+
+/* Macros for the pm_status register. */
+#define CBE_PM_CTR_OVERFLOW_INTR(ctr)      (1 << (31 - ((ctr) & 7)))
+
+enum pm_reg_name {
+	group_control,
+	debug_bus_control,
+	trace_address,
+	ext_tr_timer,
+	pm_status,
+	pm_control,
+	pm_interval,
+	pm_start_stop,
+};
+
+/* Routines for reading/writing the PMU registers. */
+extern u32  cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
+extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+extern u32  cbe_read_ctr(u32 cpu, u32 ctr);
+extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+extern u32  cbe_read_pm07_control(u32 cpu, u32 ctr);
+extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+extern u32  cbe_read_pm(u32 cpu, enum pm_reg_name reg);
+extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+extern u32  cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
+extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+extern void cbe_enable_pm(u32 cpu);
+extern void cbe_disable_pm(u32 cpu);
+
+extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
+
+extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+extern void cbe_disable_pm_interrupts(u32 cpu);
+extern u32  cbe_get_and_clear_pm_interrupts(u32 cpu);
+extern void cbe_sync_irq(int node);
+
+#define CBE_COUNT_SUPERVISOR_MODE       0
+#define CBE_COUNT_HYPERVISOR_MODE       1
+#define CBE_COUNT_PROBLEM_MODE          2
+#define CBE_COUNT_ALL_MODES             3
+
+#endif /* __ASM_CELL_PMU_H__ */
diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h
new file mode 100644
index 0000000..e1c431e
--- /dev/null
+++ b/arch/powerpc/include/asm/cell-regs.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cbe_regs.h
+ *
+ * This file is intended to hold the various register definitions for CBE
+ * on-chip system devices (memory controller, IO controller, etc...)
+ *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ *          David J. Erb (djerb@us.ibm.com)
+ *
+ * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ */
+
+#ifndef CBE_REGS_H
+#define CBE_REGS_H
+
+#include <asm/cell-pmu.h>
+
+/*
+ *
+ * Some HID register definitions
+ *
+ */
+
+/* CBE specific HID0 bits */
+#define HID0_CBE_THERM_WAKEUP	0x0000020000000000ul
+#define HID0_CBE_SYSERR_WAKEUP	0x0000008000000000ul
+#define HID0_CBE_THERM_INT_EN	0x0000000400000000ul
+#define HID0_CBE_SYSERR_INT_EN	0x0000000200000000ul
+
+#define MAX_CBE		2
+
+/*
+ *
+ * Pervasive unit register definitions
+ *
+ */
+
+union spe_reg {
+	u64 val;
+	u8 spe[8];
+};
+
+union ppe_spe_reg {
+	u64 val;
+	struct {
+		u32 ppe;
+		u32 spe;
+	};
+};
+
+
+struct cbe_pmd_regs {
+	/* Debug Bus Control */
+	u64	pad_0x0000;					/* 0x0000 */
+
+	u64	group_control;					/* 0x0008 */
+
+	u8	pad_0x0010_0x00a8 [0x00a8 - 0x0010];		/* 0x0010 */
+
+	u64	debug_bus_control;				/* 0x00a8 */
+
+	u8	pad_0x00b0_0x0100 [0x0100 - 0x00b0];		/* 0x00b0 */
+
+	u64	trace_aux_data;					/* 0x0100 */
+	u64	trace_buffer_0_63;				/* 0x0108 */
+	u64	trace_buffer_64_127;				/* 0x0110 */
+	u64	trace_address;					/* 0x0118 */
+	u64	ext_tr_timer;					/* 0x0120 */
+
+	u8	pad_0x0128_0x0400 [0x0400 - 0x0128];		/* 0x0128 */
+
+	/* Performance Monitor */
+	u64	pm_status;					/* 0x0400 */
+	u64	pm_control;					/* 0x0408 */
+	u64	pm_interval;					/* 0x0410 */
+	u64	pm_ctr[4];					/* 0x0418 */
+	u64	pm_start_stop;					/* 0x0438 */
+	u64	pm07_control[8];				/* 0x0440 */
+
+	u8	pad_0x0480_0x0800 [0x0800 - 0x0480];		/* 0x0480 */
+
+	/* Thermal Sensor Registers */
+	union	spe_reg	ts_ctsr1;				/* 0x0800 */
+	u64	ts_ctsr2;					/* 0x0808 */
+	union	spe_reg	ts_mtsr1;				/* 0x0810 */
+	u64	ts_mtsr2;					/* 0x0818 */
+	union	spe_reg	ts_itr1;				/* 0x0820 */
+	u64	ts_itr2;					/* 0x0828 */
+	u64	ts_gitr;					/* 0x0830 */
+	u64	ts_isr;						/* 0x0838 */
+	u64	ts_imr;						/* 0x0840 */
+	union	spe_reg	tm_cr1;					/* 0x0848 */
+	u64	tm_cr2;						/* 0x0850 */
+	u64	tm_simr;					/* 0x0858 */
+	union	ppe_spe_reg tm_tpr;				/* 0x0860 */
+	union	spe_reg	tm_str1;				/* 0x0868 */
+	u64	tm_str2;					/* 0x0870 */
+	union	ppe_spe_reg tm_tsr;				/* 0x0878 */
+
+	/* Power Management */
+	u64	pmcr;						/* 0x0880 */
+#define CBE_PMD_PAUSE_ZERO_CONTROL	0x10000
+	u64	pmsr;						/* 0x0888 */
+
+	/* Time Base Register */
+	u64	tbr;						/* 0x0890 */
+
+	u8	pad_0x0898_0x0c00 [0x0c00 - 0x0898];		/* 0x0898 */
+
+	/* Fault Isolation Registers */
+	u64	checkstop_fir;					/* 0x0c00 */
+	u64	recoverable_fir;				/* 0x0c08 */
+	u64	spec_att_mchk_fir;				/* 0x0c10 */
+	u32	fir_mode_reg;					/* 0x0c18 */
+	u8	pad_0x0c1c_0x0c20 [4];				/* 0x0c1c */
+#define CBE_PMD_FIR_MODE_M8		0x00800
+	u64	fir_enable_mask;				/* 0x0c20 */
+
+	u8	pad_0x0c28_0x0ca8 [0x0ca8 - 0x0c28];		/* 0x0c28 */
+	u64	ras_esc_0;					/* 0x0ca8 */
+	u8	pad_0x0cb0_0x1000 [0x1000 - 0x0cb0];		/* 0x0cb0 */
+};
+
+extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
+extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
+
+/*
+ * PMU shadow registers
+ *
+ * Many of the registers in the performance monitoring unit are write-only,
+ * so we need to save a copy of what we write to those registers.
+ *
+ * The actual data counters are read/write. However, writing to the counters
+ * only takes effect if the PMU is enabled. Otherwise the value is stored in
+ * a hardware latch until the next time the PMU is enabled. So we save a copy
+ * of the counter values if we need to read them back while the PMU is
+ * disabled. The counter_value_in_latch field is a bitmap indicating which
+ * counters currently have a value waiting to be written.
+ */
+
+struct cbe_pmd_shadow_regs {
+	u32 group_control;
+	u32 debug_bus_control;
+	u32 trace_address;
+	u32 ext_tr_timer;
+	u32 pm_status;
+	u32 pm_control;
+	u32 pm_interval;
+	u32 pm_start_stop;
+	u32 pm07_control[NR_CTRS];
+
+	u32 pm_ctr[NR_PHYS_CTRS];
+	u32 counter_value_in_latch;
+};
+
+extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
+extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
+
+/*
+ *
+ * IIC unit register definitions
+ *
+ */
+
+struct cbe_iic_pending_bits {
+	u32 data;
+	u8 flags;
+	u8 class;
+	u8 source;
+	u8 prio;
+};
+
+#define CBE_IIC_IRQ_VALID	0x80
+#define CBE_IIC_IRQ_IPI		0x40
+
+struct cbe_iic_thread_regs {
+	struct cbe_iic_pending_bits pending;
+	struct cbe_iic_pending_bits pending_destr;
+	u64 generate;
+	u64 prio;
+};
+
+struct cbe_iic_regs {
+	u8	pad_0x0000_0x0400[0x0400 - 0x0000];		/* 0x0000 */
+
+	/* IIC interrupt registers */
+	struct	cbe_iic_thread_regs thread[2];			/* 0x0400 */
+
+	u64	iic_ir;						/* 0x0440 */
+#define CBE_IIC_IR_PRIO(x)      (((x) & 0xf) << 12)
+#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
+#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
+#define CBE_IIC_IR_IOC_0        0x0
+#define CBE_IIC_IR_IOC_1S       0xb
+#define CBE_IIC_IR_PT_0         0xe
+#define CBE_IIC_IR_PT_1         0xf
+
+	u64	iic_is;						/* 0x0448 */
+#define CBE_IIC_IS_PMI		0x2
+
+	u8	pad_0x0450_0x0500[0x0500 - 0x0450];		/* 0x0450 */
+
+	/* IOC FIR */
+	u64	ioc_fir_reset;					/* 0x0500 */
+	u64	ioc_fir_set;					/* 0x0508 */
+	u64	ioc_checkstop_enable;				/* 0x0510 */
+	u64	ioc_fir_error_mask;				/* 0x0518 */
+	u64	ioc_syserr_enable;				/* 0x0520 */
+	u64	ioc_fir;					/* 0x0528 */
+
+	u8	pad_0x0530_0x1000[0x1000 - 0x0530];		/* 0x0530 */
+};
+
+extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
+extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
+
+
+struct cbe_mic_tm_regs {
+	u8	pad_0x0000_0x0040[0x0040 - 0x0000];		/* 0x0000 */
+
+	u64	mic_ctl_cnfg2;					/* 0x0040 */
+#define CBE_MIC_ENABLE_AUX_TRC		0x8000000000000000LL
+#define CBE_MIC_DISABLE_PWR_SAV_2	0x0200000000000000LL
+#define CBE_MIC_DISABLE_AUX_TRC_WRAP	0x0100000000000000LL
+#define CBE_MIC_ENABLE_AUX_TRC_INT	0x0080000000000000LL
+
+	u64	pad_0x0048;					/* 0x0048 */
+
+	u64	mic_aux_trc_base;				/* 0x0050 */
+	u64	mic_aux_trc_max_addr;				/* 0x0058 */
+	u64	mic_aux_trc_cur_addr;				/* 0x0060 */
+	u64	mic_aux_trc_grf_addr;				/* 0x0068 */
+	u64	mic_aux_trc_grf_data;				/* 0x0070 */
+
+	u64	pad_0x0078;					/* 0x0078 */
+
+	u64	mic_ctl_cnfg_0;					/* 0x0080 */
+#define CBE_MIC_DISABLE_PWR_SAV_0	0x8000000000000000LL
+
+	u64	pad_0x0088;					/* 0x0088 */
+
+	u64	slow_fast_timer_0;				/* 0x0090 */
+	u64	slow_next_timer_0;				/* 0x0098 */
+
+	u8	pad_0x00a0_0x00f8[0x00f8 - 0x00a0];		/* 0x00a0 */
+	u64    	mic_df_ecc_address_0;				/* 0x00f8 */
+
+	u8	pad_0x0100_0x01b8[0x01b8 - 0x0100];		/* 0x0100 */
+	u64    	mic_df_ecc_address_1;				/* 0x01b8 */
+
+	u64	mic_ctl_cnfg_1;					/* 0x01c0 */
+#define CBE_MIC_DISABLE_PWR_SAV_1	0x8000000000000000LL
+
+	u64	pad_0x01c8;					/* 0x01c8 */
+
+	u64	slow_fast_timer_1;				/* 0x01d0 */
+	u64	slow_next_timer_1;				/* 0x01d8 */
+
+	u8	pad_0x01e0_0x0208[0x0208 - 0x01e0];		/* 0x01e0 */
+	u64	mic_exc;					/* 0x0208 */
+#define CBE_MIC_EXC_BLOCK_SCRUB		0x0800000000000000ULL
+#define CBE_MIC_EXC_FAST_SCRUB		0x0100000000000000ULL
+
+	u64	mic_mnt_cfg;					/* 0x0210 */
+#define CBE_MIC_MNT_CFG_CHAN_0_POP	0x0002000000000000ULL
+#define CBE_MIC_MNT_CFG_CHAN_1_POP	0x0004000000000000ULL
+
+	u64	mic_df_config;					/* 0x0218 */
+#define CBE_MIC_ECC_DISABLE_0		0x4000000000000000ULL
+#define CBE_MIC_ECC_REP_SINGLE_0	0x2000000000000000ULL
+#define CBE_MIC_ECC_DISABLE_1		0x0080000000000000ULL
+#define CBE_MIC_ECC_REP_SINGLE_1	0x0040000000000000ULL
+
+	u8	pad_0x0220_0x0230[0x0230 - 0x0220];		/* 0x0220 */
+	u64	mic_fir;					/* 0x0230 */
+#define CBE_MIC_FIR_ECC_SINGLE_0_ERR	0x0200000000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_ERR	0x0100000000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_ERR	0x0080000000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_ERR	0x0040000000000000ULL
+#define CBE_MIC_FIR_ECC_ERR_MASK	0xffff000000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_CTE	0x0000020000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_CTE	0x0000010000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_CTE	0x0000008000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_CTE	0x0000004000000000ULL
+#define CBE_MIC_FIR_ECC_CTE_MASK	0x0000ffff00000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_RESET	0x0000000002000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_RESET	0x0000000001000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_RESET	0x0000000000800000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_RESET	0x0000000000400000ULL
+#define CBE_MIC_FIR_ECC_RESET_MASK	0x00000000ffff0000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_SET	0x0000000000000200ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_SET	0x0000000000000100ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_SET	0x0000000000000080ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_SET	0x0000000000000040ULL
+#define CBE_MIC_FIR_ECC_SET_MASK	0x000000000000ffffULL
+	u64	mic_fir_debug;					/* 0x0238 */
+
+	u8	pad_0x0240_0x1000[0x1000 - 0x0240];		/* 0x0240 */
+};
+
+extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
+extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
+
+
+/* Cell page table entries */
+#define CBE_IOPTE_PP_W		0x8000000000000000ul /* protection: write */
+#define CBE_IOPTE_PP_R		0x4000000000000000ul /* protection: read */
+#define CBE_IOPTE_M		0x2000000000000000ul /* coherency required */
+#define CBE_IOPTE_SO_R		0x1000000000000000ul /* ordering: writes */
+#define CBE_IOPTE_SO_RW		0x1800000000000000ul /* ordering: r & w */
+#define CBE_IOPTE_RPN_Mask	0x07fffffffffff000ul /* RPN */
+#define CBE_IOPTE_H		0x0000000000000800ul /* cache hint */
+#define CBE_IOPTE_IOID_Mask	0x00000000000007fful /* ioid */
+
+/* some utility functions to deal with SMT */
+extern u32 cbe_get_hw_thread_id(int cpu);
+extern u32 cbe_cpu_to_node(int cpu);
+extern u32 cbe_node_to_cpu(int node);
+
+/* Init this module early */
+extern void cbe_regs_init(void);
+
+
+#endif /* CBE_REGS_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
new file mode 100644
index 0000000..a78a57e
--- /dev/null
+++ b/arch/powerpc/include/asm/checksum.h
@@ -0,0 +1,222 @@
+#ifndef _ASM_POWERPC_CHECKSUM_H
+#define _ASM_POWERPC_CHECKSUM_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+#include <linux/bitops.h>
+#include <linux/in6.h>
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively (if that pointer is not
+ * NULL), and, for an error on src, zeroes the rest of dst.
+ *
+ * Like csum_partial, this must be called with even lengths,
+ * except for the last fragment.
+ */
+extern __wsum csum_partial_copy_generic(const void *src, void *dst,
+					      int len, __wsum sum,
+					      int *src_err, int *dst_err);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
+				      int len, __wsum sum, int *err_ptr);
+#define HAVE_CSUM_COPY_USER
+extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
+				    int len, __wsum sum, int *err_ptr);
+
+#define csum_partial_copy_nocheck(src, dst, len, sum)   \
+        csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+
+
+/*
+ * turns a 32-bit partial checksum (e.g. from csum_partial) into a
+ * 1's complement 16-bit checksum.
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+	unsigned int tmp;
+
+	/* swap the two 16-bit halves of sum */
+	__asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
+	/* if there is a carry from adding the two 16-bit halves,
+	   it will carry from the lower half into the upper half,
+	   giving us the correct sum in the upper half. */
+	return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
+}
+
+static inline u32 from64to32(u64 x)
+{
+	return (x + ror64(x, 32)) >> 32;
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+					__u8 proto, __wsum sum)
+{
+#ifdef __powerpc64__
+	u64 s = (__force u32)sum;
+
+	s += (__force u32)saddr;
+	s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN__
+	s += proto + len;
+#else
+	s += (proto + len) << 8;
+#endif
+	return (__force __wsum) from64to32(s);
+#else
+    __asm__("\n\
+	addc %0,%0,%1 \n\
+	adde %0,%0,%2 \n\
+	adde %0,%0,%3 \n\
+	addze %0,%0 \n\
+	"
+	: "=r" (sum)
+	: "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
+	return sum;
+#endif
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+					__u8 proto, __wsum sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+#ifdef __powerpc64__
+	u64 res = (__force u64)csum;
+#endif
+	if (__builtin_constant_p(csum) && csum == 0)
+		return addend;
+	if (__builtin_constant_p(addend) && addend == 0)
+		return csum;
+
+#ifdef __powerpc64__
+	res += (__force u64)addend;
+	return (__force __wsum)((u32)res + (res >> 32));
+#else
+	asm("addc %0,%0,%1;"
+	    "addze %0,%0;"
+	    : "+r" (csum) : "r" (addend) : "xer");
+	return csum;
+#endif
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.  ihl is the number
+ * of 32-bit words and is always >= 5.
+ */
+static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
+{
+	const u32 *ptr = (const u32 *)iph + 1;
+#ifdef __powerpc64__
+	unsigned int i;
+	u64 s = *(const u32 *)iph;
+
+	for (i = 0; i < ihl - 1; i++, ptr++)
+		s += *ptr;
+	return (__force __wsum)from64to32(s);
+#else
+	__wsum sum, tmp;
+
+	asm("mtctr %3;"
+	    "addc %0,%4,%5;"
+	    "1: lwzu %1, 4(%2);"
+	    "adde %0,%0,%1;"
+	    "bdnz 1b;"
+	    "addze %0,%0;"
+	    : "=r" (sum), "=r" (tmp), "+b" (ptr)
+	    : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
+	    : "ctr", "xer", "memory");
+
+	return sum;
+#endif
+}
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	return csum_fold(ip_fast_csum_nofold(iph, ihl));
+}
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum __csum_partial(const void *buff, int len, __wsum sum);
+
+static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+	if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
+		if (len == 2)
+			sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
+		if (len >= 4)
+			sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
+		if (len == 6)
+			sum = csum_add(sum, (__force __wsum)
+					    *(const u16 *)(buff + 4));
+		if (len >= 8)
+			sum = csum_add(sum, (__force __wsum)
+					    *(const u32 *)(buff + 4));
+		if (len == 10)
+			sum = csum_add(sum, (__force __wsum)
+					    *(const u16 *)(buff + 8));
+		if (len >= 12)
+			sum = csum_add(sum, (__force __wsum)
+					    *(const u32 *)(buff + 8));
+		if (len == 14)
+			sum = csum_add(sum, (__force __wsum)
+					    *(const u16 *)(buff + 12));
+		if (len >= 16)
+			sum = csum_add(sum, (__force __wsum)
+					    *(const u32 *)(buff + 12));
+	} else if (__builtin_constant_p(len) && (len & 3) == 0) {
+		sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
+	} else {
+		sum = __csum_partial(buff, len, sum);
+	}
+	return sum;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+	return csum_fold(csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+			const struct in6_addr *daddr,
+			__u32 len, __u8 proto, __wsum sum);
+
+#endif
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
new file mode 100644
index 0000000..2718387
--- /dev/null
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -0,0 +1,542 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CMPXCHG_H_
+#define _ASM_POWERPC_CMPXCHG_H_
+
+#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <asm/synch.h>
+#include <linux/bug.h>
+#include <asm/asm-405.h>
+
+#ifdef __BIG_ENDIAN
+#define BITOFF_CAL(size, off)	((sizeof(u32) - size - off) * BITS_PER_BYTE)
+#else
+#define BITOFF_CAL(size, off)	(off * BITS_PER_BYTE)
+#endif
+
+#define XCHG_GEN(type, sfx, cl)				\
+static inline u32 __xchg_##type##sfx(volatile void *p, u32 val)	\
+{								\
+	unsigned int prev, prev_mask, tmp, bitoff, off;		\
+								\
+	off = (unsigned long)p % sizeof(u32);			\
+	bitoff = BITOFF_CAL(sizeof(type), off);			\
+	p -= off;						\
+	val <<= bitoff;						\
+	prev_mask = (u32)(type)-1 << bitoff;			\
+								\
+	__asm__ __volatile__(					\
+"1:	lwarx   %0,0,%3\n"					\
+"	andc	%1,%0,%5\n"					\
+"	or	%1,%1,%4\n"					\
+	PPC405_ERR77(0,%3)					\
+"	stwcx.	%1,0,%3\n"					\
+"	bne-	1b\n"						\
+	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
+	: "r" (p), "r" (val), "r" (prev_mask)			\
+	: "cc", cl);						\
+								\
+	return prev >> bitoff;					\
+}
+
+#define CMPXCHG_GEN(type, sfx, br, br2, cl)			\
+static inline							\
+u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new)	\
+{								\
+	unsigned int prev, prev_mask, tmp, bitoff, off;		\
+								\
+	off = (unsigned long)p % sizeof(u32);			\
+	bitoff = BITOFF_CAL(sizeof(type), off);			\
+	p -= off;						\
+	old <<= bitoff;						\
+	new <<= bitoff;						\
+	prev_mask = (u32)(type)-1 << bitoff;			\
+								\
+	__asm__ __volatile__(					\
+	br							\
+"1:	lwarx   %0,0,%3\n"					\
+"	and	%1,%0,%6\n"					\
+"	cmpw	0,%1,%4\n"					\
+"	bne-	2f\n"						\
+"	andc	%1,%0,%6\n"					\
+"	or	%1,%1,%5\n"					\
+	PPC405_ERR77(0,%3)					\
+"	stwcx.  %1,0,%3\n"					\
+"	bne-    1b\n"						\
+	br2							\
+	"\n"							\
+"2:"								\
+	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
+	: "r" (p), "r" (old), "r" (new), "r" (prev_mask)	\
+	: "cc", cl);						\
+								\
+	return prev >> bitoff;					\
+}
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*p' to be val and returns
+ * the previous value stored there.
+ */
+
+XCHG_GEN(u8, _local, "memory");
+XCHG_GEN(u8, _relaxed, "cc");
+XCHG_GEN(u16, _local, "memory");
+XCHG_GEN(u16, _relaxed, "cc");
+
+static __always_inline unsigned long
+__xchg_u32_local(volatile void *p, unsigned long val)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__(
+"1:	lwarx	%0,0,%2 \n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%3,0,%2 \n\
+	bne-	1b"
+	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+	: "r" (p), "r" (val)
+	: "cc", "memory");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__xchg_u32_relaxed(u32 *p, unsigned long val)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__(
+"1:	lwarx	%0,0,%2\n"
+	PPC405_ERR77(0, %2)
+"	stwcx.	%3,0,%2\n"
+"	bne-	1b"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (val)
+	: "cc");
+
+	return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__xchg_u64_local(volatile void *p, unsigned long val)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2 \n"
+	PPC405_ERR77(0,%2)
+"	stdcx.	%3,0,%2 \n\
+	bne-	1b"
+	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+	: "r" (p), "r" (val)
+	: "cc", "memory");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__xchg_u64_relaxed(u64 *p, unsigned long val)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2\n"
+	PPC405_ERR77(0, %2)
+"	stdcx.	%3,0,%2\n"
+"	bne-	1b"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (val)
+	: "cc");
+
+	return prev;
+}
+#endif
+
+static __always_inline unsigned long
+__xchg_local(void *ptr, unsigned long x, unsigned int size)
+{
+	switch (size) {
+	case 1:
+		return __xchg_u8_local(ptr, x);
+	case 2:
+		return __xchg_u16_local(ptr, x);
+	case 4:
+		return __xchg_u32_local(ptr, x);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __xchg_u64_local(ptr, x);
+#endif
+	}
+	BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
+	return x;
+}
+
+static __always_inline unsigned long
+__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
+{
+	switch (size) {
+	case 1:
+		return __xchg_u8_relaxed(ptr, x);
+	case 2:
+		return __xchg_u16_relaxed(ptr, x);
+	case 4:
+		return __xchg_u32_relaxed(ptr, x);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __xchg_u64_relaxed(ptr, x);
+#endif
+	}
+	BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
+	return x;
+}
+#define xchg_local(ptr,x)						     \
+  ({									     \
+     __typeof__(*(ptr)) _x_ = (x);					     \
+     (__typeof__(*(ptr))) __xchg_local((ptr),				     \
+     		(unsigned long)_x_, sizeof(*(ptr))); 			     \
+  })
+
+#define xchg_relaxed(ptr, x)						\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg_relaxed((ptr),			\
+			(unsigned long)_x_, sizeof(*(ptr)));		\
+})
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+
+CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u8, _local, , , "memory");
+CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u8, _relaxed, , , "cc");
+CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u16, _local, , , "memory");
+CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u16, _relaxed, , , "cc");
+
+static __always_inline unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+	unsigned int prev;
+
+	__asm__ __volatile__ (
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
+	cmpw	0,%0,%3\n\
+	bne-	2f\n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%4,0,%2\n\
+	bne-	1b"
+	PPC_ATOMIC_EXIT_BARRIER
+	"\n\
+2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+			unsigned long new)
+{
+	unsigned int prev;
+
+	__asm__ __volatile__ (
+"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
+	cmpw	0,%0,%3\n\
+	bne-	2f\n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%4,0,%2\n\
+	bne-	1b"
+	"\n\
+2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+"1:	lwarx	%0,0,%2		# __cmpxchg_u32_relaxed\n"
+"	cmpw	0,%0,%3\n"
+"	bne-	2f\n"
+	PPC405_ERR77(0, %2)
+"	stwcx.	%4,0,%2\n"
+"	bne-	1b\n"
+"2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc");
+
+	return prev;
+}
+
+/*
+ * cmpxchg family don't have order guarantee if cmp part fails, therefore we
+ * can avoid superfluous barriers if we use assembly code to implement
+ * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
+ * cmpxchg_release() because that will result in putting a barrier in the
+ * middle of a ll/sc loop, which is probably a bad idea. For example, this
+ * might cause the conditional store more likely to fail.
+ */
+static __always_inline unsigned long
+__cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+"1:	lwarx	%0,0,%2		# __cmpxchg_u32_acquire\n"
+"	cmpw	0,%0,%3\n"
+"	bne-	2f\n"
+	PPC405_ERR77(0, %2)
+"	stwcx.	%4,0,%2\n"
+"	bne-	1b\n"
+	PPC_ACQUIRE_BARRIER
+	"\n"
+"2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+	PPC_ATOMIC_ENTRY_BARRIER
+"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
+	cmpd	0,%0,%3\n\
+	bne-	2f\n\
+	stdcx.	%4,0,%2\n\
+	bne-	1b"
+	PPC_ATOMIC_EXIT_BARRIER
+	"\n\
+2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
+			unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
+	cmpd	0,%0,%3\n\
+	bne-	2f\n\
+	stdcx.	%4,0,%2\n\
+	bne-	1b"
+	"\n\
+2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+"1:	ldarx	%0,0,%2		# __cmpxchg_u64_relaxed\n"
+"	cmpd	0,%0,%3\n"
+"	bne-	2f\n"
+"	stdcx.	%4,0,%2\n"
+"	bne-	1b\n"
+"2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+"1:	ldarx	%0,0,%2		# __cmpxchg_u64_acquire\n"
+"	cmpd	0,%0,%3\n"
+"	bne-	2f\n"
+"	stdcx.	%4,0,%2\n"
+"	bne-	1b\n"
+	PPC_ACQUIRE_BARRIER
+	"\n"
+"2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+#endif
+
+static __always_inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+	  unsigned int size)
+{
+	switch (size) {
+	case 1:
+		return __cmpxchg_u8(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16(ptr, old, new);
+	case 4:
+		return __cmpxchg_u32(ptr, old, new);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __cmpxchg_u64(ptr, old, new);
+#endif
+	}
+	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
+	return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
+	  unsigned int size)
+{
+	switch (size) {
+	case 1:
+		return __cmpxchg_u8_local(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16_local(ptr, old, new);
+	case 4:
+		return __cmpxchg_u32_local(ptr, old, new);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __cmpxchg_u64_local(ptr, old, new);
+#endif
+	}
+	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
+	return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
+		  unsigned int size)
+{
+	switch (size) {
+	case 1:
+		return __cmpxchg_u8_relaxed(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16_relaxed(ptr, old, new);
+	case 4:
+		return __cmpxchg_u32_relaxed(ptr, old, new);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __cmpxchg_u64_relaxed(ptr, old, new);
+#endif
+	}
+	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
+	return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
+		  unsigned int size)
+{
+	switch (size) {
+	case 1:
+		return __cmpxchg_u8_acquire(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16_acquire(ptr, old, new);
+	case 4:
+		return __cmpxchg_u32_acquire(ptr, old, new);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __cmpxchg_u64_acquire(ptr, old, new);
+#endif
+	}
+	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
+	return old;
+}
+#define cmpxchg(ptr, o, n)						 \
+  ({									 \
+     __typeof__(*(ptr)) _o_ = (o);					 \
+     __typeof__(*(ptr)) _n_ = (n);					 \
+     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+
+#define cmpxchg_local(ptr, o, n)					 \
+  ({									 \
+     __typeof__(*(ptr)) _o_ = (o);					 \
+     __typeof__(*(ptr)) _n_ = (n);					 \
+     (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+#define cmpxchg_relaxed(ptr, o, n)					\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg_relaxed((ptr),			\
+			(unsigned long)_o_, (unsigned long)_n_,		\
+			sizeof(*(ptr)));				\
+})
+
+#define cmpxchg_acquire(ptr, o, n)					\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg_acquire((ptr),			\
+			(unsigned long)_o_, (unsigned long)_n_,		\
+			sizeof(*(ptr)));				\
+})
+#ifdef CONFIG_PPC64
+#define cmpxchg64(ptr, o, n)						\
+  ({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg((ptr), (o), (n));					\
+  })
+#define cmpxchg64_local(ptr, o, n)					\
+  ({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg_local((ptr), (o), (n));					\
+  })
+#define cmpxchg64_relaxed(ptr, o, n)					\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg_relaxed((ptr), (o), (n));				\
+})
+#define cmpxchg64_acquire(ptr, o, n)					\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg_acquire((ptr), (o), (n));				\
+})
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CMPXCHG_H_ */
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
new file mode 100644
index 0000000..ed7b144
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching-asm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
+#define _ASM_POWERPC_CODE_PATCHING_ASM_H
+
+/* Define a "site" that can be patched */
+.macro patch_site label name
+	.pushsection ".rodata"
+	.balign 4
+	.global \name
+\name:
+	.4byte	\label - .
+	.popsection
+.endm
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
new file mode 100644
index 0000000..31733a9
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -0,0 +1,169 @@
+#ifndef _ASM_POWERPC_CODE_PATCHING_H
+#define _ASM_POWERPC_CODE_PATCHING_H
+
+/*
+ * Copyright 2008, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <asm/ppc-opcode.h>
+#include <linux/string.h>
+#include <linux/kallsyms.h>
+#include <asm/asm-compat.h>
+
+/* Flags for create_branch:
+ * "b"   == create_branch(addr, target, 0);
+ * "ba"  == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * "bl"  == create_branch(addr, target, BRANCH_SET_LINK);
+ * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK	0x1
+#define BRANCH_ABSOLUTE	0x2
+
+bool is_offset_in_branch_range(long offset);
+unsigned int create_branch(const unsigned int *addr,
+			   unsigned long target, int flags);
+unsigned int create_cond_branch(const unsigned int *addr,
+				unsigned long target, int flags);
+int patch_branch(unsigned int *addr, unsigned long target, int flags);
+int patch_instruction(unsigned int *addr, unsigned int instr);
+int raw_patch_instruction(unsigned int *addr, unsigned int instr);
+int patch_instruction_site(s32 *addr, unsigned int instr);
+int patch_branch_site(s32 *site, unsigned long target, int flags);
+
+int instr_is_relative_branch(unsigned int instr);
+int instr_is_relative_link_branch(unsigned int instr);
+int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
+unsigned long branch_target(const unsigned int *instr);
+unsigned int translate_branch(const unsigned int *dest,
+			      const unsigned int *src);
+extern bool is_conditional_branch(unsigned int instr);
+#ifdef CONFIG_PPC_BOOK3E_64
+void __patch_exception(int exc, unsigned long addr);
+#define patch_exception(exc, name) do { \
+	extern unsigned int name; \
+	__patch_exception((exc), (unsigned long)&name); \
+} while (0)
+#endif
+
+#define OP_RT_RA_MASK	0xffff0000UL
+#define LIS_R2		0x3c020000UL
+#define ADDIS_R2_R12	0x3c4c0000UL
+#define ADDI_R2_R2	0x38420000UL
+
+static inline unsigned long ppc_function_entry(void *func)
+{
+#ifdef PPC64_ELF_ABI_v2
+	u32 *insn = func;
+
+	/*
+	 * A PPC64 ABIv2 function may have a local and a global entry
+	 * point. We need to use the local entry point when patching
+	 * functions, so identify and step over the global entry point
+	 * sequence.
+	 *
+	 * The global entry point sequence is always of the form:
+	 *
+	 * addis r2,r12,XXXX
+	 * addi  r2,r2,XXXX
+	 *
+	 * A linker optimisation may convert the addis to lis:
+	 *
+	 * lis   r2,XXXX
+	 * addi  r2,r2,XXXX
+	 */
+	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
+	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
+	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
+		return (unsigned long)(insn + 2);
+	else
+		return (unsigned long)func;
+#elif defined(PPC64_ELF_ABI_v1)
+	/*
+	 * On PPC64 ABIv1 the function pointer actually points to the
+	 * function's descriptor. The first entry in the descriptor is the
+	 * address of the function text.
+	 */
+	return ((func_descr_t *)func)->entry;
+#else
+	return (unsigned long)func;
+#endif
+}
+
+static inline unsigned long ppc_global_function_entry(void *func)
+{
+#ifdef PPC64_ELF_ABI_v2
+	/* PPC64 ABIv2 the global entry point is at the address */
+	return (unsigned long)func;
+#else
+	/* All other cases there is no change vs ppc_function_entry() */
+	return ppc_function_entry(func);
+#endif
+}
+
+/*
+ * Wrapper around kallsyms_lookup() to return function entry address:
+ * - For ABIv1, we lookup the dot variant.
+ * - For ABIv2, we return the local entry point.
+ */
+static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
+{
+	unsigned long addr;
+#ifdef PPC64_ELF_ABI_v1
+	/* check for dot variant */
+	char dot_name[1 + KSYM_NAME_LEN];
+	bool dot_appended = false;
+
+	if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
+		return 0;
+
+	if (name[0] != '.') {
+		dot_name[0] = '.';
+		dot_name[1] = '\0';
+		strlcat(dot_name, name, sizeof(dot_name));
+		dot_appended = true;
+	} else {
+		dot_name[0] = '\0';
+		strlcat(dot_name, name, sizeof(dot_name));
+	}
+	addr = kallsyms_lookup_name(dot_name);
+	if (!addr && dot_appended)
+		/* Let's try the original non-dot symbol lookup	*/
+		addr = kallsyms_lookup_name(name);
+#elif defined(PPC64_ELF_ABI_v2)
+	addr = kallsyms_lookup_name(name);
+	if (addr)
+		addr = ppc_function_entry((void *)addr);
+#else
+	addr = kallsyms_lookup_name(name);
+#endif
+	return addr;
+}
+
+#ifdef CONFIG_PPC64
+/*
+ * Some instruction encodings commonly used in dynamic ftracing
+ * and function live patching.
+ */
+
+/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
+#ifdef PPC64_ELF_ABI_v2
+#define R2_STACK_OFFSET         24
+#else
+#define R2_STACK_OFFSET         40
+#endif
+
+#define PPC_INST_LD_TOC		(PPC_INST_LD  | ___PPC_RT(__REG_R2) | \
+				 ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+
+/* usually preceded by a mflr r0 */
+#define PPC_INST_STD_LR		(PPC_INST_STD | ___PPC_RS(__REG_R0) | \
+				 ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
+#endif /* CONFIG_PPC64 */
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
new file mode 100644
index 0000000..85c8af2
--- /dev/null
+++ b/arch/powerpc/include/asm/compat.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_COMPAT_H
+#define _ASM_POWERPC_COMPAT_H
+#ifdef __KERNEL__
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+
+#define COMPAT_USER_HZ		100
+#ifdef __BIG_ENDIAN__
+#define COMPAT_UTS_MACHINE	"ppc\0\0"
+#else
+#define COMPAT_UTS_MACHINE	"ppcle\0\0"
+#endif
+
+typedef u32		compat_size_t;
+typedef s32		compat_ssize_t;
+typedef s32		compat_clock_t;
+typedef s32		compat_pid_t;
+typedef u32		__compat_uid_t;
+typedef u32		__compat_gid_t;
+typedef u32		__compat_uid32_t;
+typedef u32		__compat_gid32_t;
+typedef u32		compat_mode_t;
+typedef u32		compat_ino_t;
+typedef u32		compat_dev_t;
+typedef s32		compat_off_t;
+typedef s64		compat_loff_t;
+typedef s16		compat_nlink_t;
+typedef u16		compat_ipc_pid_t;
+typedef s32		compat_daddr_t;
+typedef u32		compat_caddr_t;
+typedef __kernel_fsid_t	compat_fsid_t;
+typedef s32		compat_key_t;
+typedef s32		compat_timer_t;
+
+typedef s32		compat_int_t;
+typedef s32		compat_long_t;
+typedef s64		compat_s64;
+typedef u32		compat_uint_t;
+typedef u32		compat_ulong_t;
+typedef u64		compat_u64;
+typedef u32		compat_uptr_t;
+
+struct compat_stat {
+	compat_dev_t	st_dev;
+	compat_ino_t	st_ino;
+	compat_mode_t	st_mode;
+	compat_nlink_t	st_nlink;
+	__compat_uid32_t	st_uid;
+	__compat_gid32_t	st_gid;
+	compat_dev_t	st_rdev;
+	compat_off_t	st_size;
+	compat_off_t	st_blksize;
+	compat_off_t	st_blocks;
+	compat_time_t	st_atime;
+	u32		st_atime_nsec;
+	compat_time_t	st_mtime;
+	u32		st_mtime_nsec;
+	compat_time_t	st_ctime;
+	u32		st_ctime_nsec;
+	u32		__unused4[2];
+};
+
+struct compat_flock {
+	short		l_type;
+	short		l_whence;
+	compat_off_t	l_start;
+	compat_off_t	l_len;
+	compat_pid_t	l_pid;
+};
+
+#define F_GETLK64	12	/*  using 'struct flock64' */
+#define F_SETLK64	13
+#define F_SETLKW64	14
+
+struct compat_flock64 {
+	short		l_type;
+	short		l_whence;
+	compat_loff_t	l_start;
+	compat_loff_t	l_len;
+	compat_pid_t	l_pid;
+};
+
+struct compat_statfs {
+	int		f_type;
+	int		f_bsize;
+	int		f_blocks;
+	int		f_bfree;
+	int		f_bavail;
+	int		f_files;
+	int		f_ffree;
+	compat_fsid_t	f_fsid;
+	int		f_namelen;	/* SunOS ignores this field. */
+	int		f_frsize;
+	int		f_flags;
+	int		f_spare[4];
+};
+
+#define COMPAT_RLIM_INFINITY		0xffffffff
+
+typedef u32		compat_old_sigset_t;
+
+#define _COMPAT_NSIG		64
+#define _COMPAT_NSIG_BPW	32
+
+typedef u32		compat_sigset_word;
+
+#define COMPAT_OFF_T_MAX	0x7fffffff
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+	return (void __user *)(unsigned long)uptr;
+}
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+	return (u32)(unsigned long)uptr;
+}
+
+static inline void __user *arch_compat_alloc_user_space(long len)
+{
+	struct pt_regs *regs = current->thread.regs;
+	unsigned long usp = regs->gpr[1];
+
+	/*
+	 * We can't access below the stack pointer in the 32bit ABI and
+	 * can access 288 bytes in the 64bit big-endian ABI,
+	 * or 512 bytes with the new ELFv2 little-endian ABI.
+	 */
+	if (!is_32bit_task())
+		usp -= USER_REDZONE_SIZE;
+
+	return (void __user *) (usp - len);
+}
+
+/*
+ * ipc64_perm is actually 32/64bit clean but since the compat layer refers to
+ * it we may as well define it.
+ */
+struct compat_ipc64_perm {
+	compat_key_t key;
+	__compat_uid_t uid;
+	__compat_gid_t gid;
+	__compat_uid_t cuid;
+	__compat_gid_t cgid;
+	compat_mode_t mode;
+	unsigned int seq;
+	unsigned int __pad2;
+	unsigned long __unused1;	/* yes they really are 64bit pads */
+	unsigned long __unused2;
+};
+
+struct compat_semid64_ds {
+	struct compat_ipc64_perm sem_perm;
+	unsigned int sem_otime_high;
+	unsigned int sem_otime;
+	unsigned int sem_ctime_high;
+	unsigned int sem_ctime;
+	compat_ulong_t sem_nsems;
+	compat_ulong_t __unused3;
+	compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+	struct compat_ipc64_perm msg_perm;
+	unsigned int msg_stime_high;
+	unsigned int msg_stime;
+	unsigned int msg_rtime_high;
+	unsigned int msg_rtime;
+	unsigned int msg_ctime_high;
+	unsigned int msg_ctime;
+	compat_ulong_t msg_cbytes;
+	compat_ulong_t msg_qnum;
+	compat_ulong_t msg_qbytes;
+	compat_pid_t msg_lspid;
+	compat_pid_t msg_lrpid;
+	compat_ulong_t __unused4;
+	compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+	struct compat_ipc64_perm shm_perm;
+	unsigned int shm_atime_high;
+	unsigned int shm_atime;
+	unsigned int shm_dtime_high;
+	unsigned int shm_dtime;
+	unsigned int shm_ctime_high;
+	unsigned int shm_ctime;
+	unsigned int __unused4;
+	compat_size_t shm_segsz;
+	compat_pid_t shm_cpid;
+	compat_pid_t shm_lpid;
+	compat_ulong_t shm_nattch;
+	compat_ulong_t __unused5;
+	compat_ulong_t __unused6;
+};
+
+static inline int is_compat_task(void)
+{
+	return is_32bit_task();
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644
index 0000000..f2682b2
--- /dev/null
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
+#define _ASM_POWERPC_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+#define SCHEDULE_USER bl	schedule_user
+#else
+#define SCHEDULE_USER bl	schedule
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
new file mode 100644
index 0000000..48616fe
--- /dev/null
+++ b/arch/powerpc/include/asm/copro.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_COPRO_H
+#define _ASM_POWERPC_COPRO_H
+
+#include <linux/mm_types.h>
+
+struct copro_slb
+{
+	u64 esid, vsid;
+};
+
+int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+			  unsigned long dsisr, vm_fault_t *flt);
+
+int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
+
+
+#ifdef CONFIG_PPC_COPRO_BASE
+void copro_flush_all_slbs(struct mm_struct *mm);
+#else
+static inline void copro_flush_all_slbs(struct mm_struct *mm) {}
+#endif
+#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
new file mode 100644
index 0000000..4c24ea8
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CPM_H
+#define __CPM_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <soc/fsl/qe/qe.h>
+
+/*
+ * SPI Parameter RAM common to QE and CPM.
+ */
+struct spi_pram {
+	__be16	rbase;	/* Rx Buffer descriptor base address */
+	__be16	tbase;	/* Tx Buffer descriptor base address */
+	u8	rfcr;	/* Rx function code */
+	u8	tfcr;	/* Tx function code */
+	__be16	mrblr;	/* Max receive buffer length */
+	__be32	rstate;	/* Internal */
+	__be32	rdp;	/* Internal */
+	__be16	rbptr;	/* Internal */
+	__be16	rbc;	/* Internal */
+	__be32	rxtmp;	/* Internal */
+	__be32	tstate;	/* Internal */
+	__be32	tdp;	/* Internal */
+	__be16	tbptr;	/* Internal */
+	__be16	tbc;	/* Internal */
+	__be32	txtmp;	/* Internal */
+	__be32	res;	/* Tx temp. */
+	__be16  rpbase;	/* Relocation pointer (CPM1 only) */
+	__be16	res1;	/* Reserved */
+};
+
+/*
+ * USB Controller pram common to QE and CPM.
+ */
+struct usb_ctlr {
+	u8	usb_usmod;
+	u8	usb_usadr;
+	u8	usb_uscom;
+	u8	res1[1];
+	__be16	usb_usep[4];
+	u8	res2[4];
+	__be16	usb_usber;
+	u8	res3[2];
+	__be16	usb_usbmr;
+	u8	res4[1];
+	u8	usb_usbs;
+	/* Fields down below are QE-only */
+	__be16	usb_ussft;
+	u8	res5[2];
+	__be16	usb_usfrn;
+	u8	res6[0x22];
+} __attribute__ ((packed));
+
+/*
+ * Function code bits, usually generic to devices.
+ */
+#ifdef CONFIG_CPM1
+#define CPMFCR_GBL	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#define CPMFCR_TC2	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#define CPMFCR_DTB	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#define CPMFCR_BDB	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#else
+#define CPMFCR_GBL	((u_char)0x20)	/* Set memory snooping */
+#define CPMFCR_TC2	((u_char)0x04)	/* Transfer code 2 value */
+#define CPMFCR_DTB	((u_char)0x02)	/* Use local bus for data when set */
+#define CPMFCR_BDB	((u_char)0x01)	/* Use local bus for BD when set */
+#endif
+#define CPMFCR_EB	((u_char)0x10)	/* Set big endian byte order */
+
+/* Opcodes common to CPM1 and CPM2
+*/
+#define CPM_CR_INIT_TRX		((ushort)0x0000)
+#define CPM_CR_INIT_RX		((ushort)0x0001)
+#define CPM_CR_INIT_TX		((ushort)0x0002)
+#define CPM_CR_HUNT_MODE	((ushort)0x0003)
+#define CPM_CR_STOP_TX		((ushort)0x0004)
+#define CPM_CR_GRA_STOP_TX	((ushort)0x0005)
+#define CPM_CR_RESTART_TX	((ushort)0x0006)
+#define CPM_CR_CLOSE_RX_BD	((ushort)0x0007)
+#define CPM_CR_SET_GADDR	((ushort)0x0008)
+#define CPM_CR_SET_TIMER	((ushort)0x0008)
+#define CPM_CR_STOP_IDMA	((ushort)0x000b)
+
+/* Buffer descriptors used by many of the CPM protocols. */
+typedef struct cpm_buf_desc {
+	ushort	cbd_sc;		/* Status and Control */
+	ushort	cbd_datlen;	/* Data length in buffer */
+	uint	cbd_bufaddr;	/* Buffer address in host memory */
+} cbd_t;
+
+/* Buffer descriptor control/status used by serial
+ */
+
+#define BD_SC_EMPTY	(0x8000)	/* Receive is empty */
+#define BD_SC_READY	(0x8000)	/* Transmit is ready */
+#define BD_SC_WRAP	(0x2000)	/* Last buffer descriptor */
+#define BD_SC_INTRPT	(0x1000)	/* Interrupt on change */
+#define BD_SC_LAST	(0x0800)	/* Last buffer in frame */
+#define BD_SC_TC	(0x0400)	/* Transmit CRC */
+#define BD_SC_CM	(0x0200)	/* Continuous mode */
+#define BD_SC_ID	(0x0100)	/* Rec'd too many idles */
+#define BD_SC_P		(0x0100)	/* xmt preamble */
+#define BD_SC_BR	(0x0020)	/* Break received */
+#define BD_SC_FR	(0x0010)	/* Framing error */
+#define BD_SC_PR	(0x0008)	/* Parity error */
+#define BD_SC_NAK	(0x0004)	/* NAK - did not respond */
+#define BD_SC_OV	(0x0002)	/* Overrun */
+#define BD_SC_UN	(0x0002)	/* Underrun */
+#define BD_SC_CD	(0x0001)	/* */
+#define BD_SC_CL	(0x0001)	/* Collision */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_RX_EMPTY	(0x8000)
+#define BD_ENET_RX_WRAP		(0x2000)
+#define BD_ENET_RX_INTR		(0x1000)
+#define BD_ENET_RX_LAST		(0x0800)
+#define BD_ENET_RX_FIRST	(0x0400)
+#define BD_ENET_RX_MISS		(0x0100)
+#define BD_ENET_RX_BC		(0x0080)	/* FCC Only */
+#define BD_ENET_RX_MC		(0x0040)	/* FCC Only */
+#define BD_ENET_RX_LG		(0x0020)
+#define BD_ENET_RX_NO		(0x0010)
+#define BD_ENET_RX_SH		(0x0008)
+#define BD_ENET_RX_CR		(0x0004)
+#define BD_ENET_RX_OV		(0x0002)
+#define BD_ENET_RX_CL		(0x0001)
+#define BD_ENET_RX_STATS	(0x01ff)	/* All status bits */
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_TX_READY	(0x8000)
+#define BD_ENET_TX_PAD		(0x4000)
+#define BD_ENET_TX_WRAP		(0x2000)
+#define BD_ENET_TX_INTR		(0x1000)
+#define BD_ENET_TX_LAST		(0x0800)
+#define BD_ENET_TX_TC		(0x0400)
+#define BD_ENET_TX_DEF		(0x0200)
+#define BD_ENET_TX_HB		(0x0100)
+#define BD_ENET_TX_LC		(0x0080)
+#define BD_ENET_TX_RL		(0x0040)
+#define BD_ENET_TX_RCMASK	(0x003c)
+#define BD_ENET_TX_UN		(0x0002)
+#define BD_ENET_TX_CSL		(0x0001)
+#define BD_ENET_TX_STATS	(0x03ff)	/* All status bits */
+
+/* Buffer descriptor control/status used by Transparent mode SCC.
+ */
+#define BD_SCC_TX_LAST		(0x0800)
+
+/* Buffer descriptor control/status used by I2C.
+ */
+#define BD_I2C_START		(0x0400)
+
+#ifdef CONFIG_CPM
+int cpm_command(u32 command, u8 opcode);
+#else
+static inline int cpm_command(u32 command, u8 opcode)
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_CPM */
+
+int cpm2_gpiochip_add32(struct device *dev);
+
+#endif
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
new file mode 100644
index 0000000..a116fe9
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -0,0 +1,611 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MPC8xx Communication Processor Module.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * This file contains structures and information for the communication
+ * processor channels.  Some CPM control and status is available
+ * through the MPC8xx internal memory map.  See immap.h for details.
+ * This file only contains what I need for the moment, not the total
+ * CPM capabilities.  I (or someone else) will add definitions as they
+ * are needed.  -- Dan
+ *
+ * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
+ * bytes of the DP RAM and relocates the I2C parameter area to the
+ * IDMA1 space.  The remaining DP RAM is available for buffer descriptors
+ * or other use.
+ */
+#ifndef __CPM1__
+#define __CPM1__
+
+#include <linux/init.h>
+#include <asm/8xx_immap.h>
+#include <asm/ptrace.h>
+#include <asm/cpm.h>
+
+/* CPM Command register.
+*/
+#define CPM_CR_RST	((ushort)0x8000)
+#define CPM_CR_OPCODE	((ushort)0x0f00)
+#define CPM_CR_CHAN	((ushort)0x00f0)
+#define CPM_CR_FLG	((ushort)0x0001)
+
+/* Channel numbers.
+*/
+#define CPM_CR_CH_SCC1		((ushort)0x0000)
+#define CPM_CR_CH_I2C		((ushort)0x0001)	/* I2C and IDMA1 */
+#define CPM_CR_CH_SCC2		((ushort)0x0004)
+#define CPM_CR_CH_SPI		((ushort)0x0005)	/* SPI / IDMA2 / Timers */
+#define CPM_CR_CH_TIMER		CPM_CR_CH_SPI
+#define CPM_CR_CH_SCC3		((ushort)0x0008)
+#define CPM_CR_CH_SMC1		((ushort)0x0009)	/* SMC1 / DSP1 */
+#define CPM_CR_CH_SCC4		((ushort)0x000c)
+#define CPM_CR_CH_SMC2		((ushort)0x000d)	/* SMC2 / DSP2 */
+
+#define mk_cr_cmd(CH, CMD)	((CMD << 8) | (CH << 4))
+
+/* Export the base address of the communication processor registers
+ * and dual port ram.
+ */
+extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
+
+#define cpm_dpalloc cpm_muram_alloc
+#define cpm_dpfree cpm_muram_free
+#define cpm_dpram_addr cpm_muram_addr
+#define cpm_dpram_phys cpm_muram_dma
+
+extern void cpm_setbrg(uint brg, uint rate);
+
+extern void __init cpm_load_patch(cpm8xx_t *cp);
+
+extern void cpm_reset(void);
+
+/* Parameter RAM offsets.
+*/
+#define PROFF_SCC1	((uint)0x0000)
+#define PROFF_IIC	((uint)0x0080)
+#define PROFF_SCC2	((uint)0x0100)
+#define PROFF_SPI	((uint)0x0180)
+#define PROFF_SCC3	((uint)0x0200)
+#define PROFF_SMC1	((uint)0x0280)
+#define PROFF_SCC4	((uint)0x0300)
+#define PROFF_SMC2	((uint)0x0380)
+
+/* Define enough so I can at least use the serial port as a UART.
+ * The MBX uses SMC1 as the host serial port.
+ */
+typedef struct smc_uart {
+	ushort	smc_rbase;	/* Rx Buffer descriptor base address */
+	ushort	smc_tbase;	/* Tx Buffer descriptor base address */
+	u_char	smc_rfcr;	/* Rx function code */
+	u_char	smc_tfcr;	/* Tx function code */
+	ushort	smc_mrblr;	/* Max receive buffer length */
+	uint	smc_rstate;	/* Internal */
+	uint	smc_idp;	/* Internal */
+	ushort	smc_rbptr;	/* Internal */
+	ushort	smc_ibc;	/* Internal */
+	uint	smc_rxtmp;	/* Internal */
+	uint	smc_tstate;	/* Internal */
+	uint	smc_tdp;	/* Internal */
+	ushort	smc_tbptr;	/* Internal */
+	ushort	smc_tbc;	/* Internal */
+	uint	smc_txtmp;	/* Internal */
+	ushort	smc_maxidl;	/* Maximum idle characters */
+	ushort	smc_tmpidl;	/* Temporary idle counter */
+	ushort	smc_brklen;	/* Last received break length */
+	ushort	smc_brkec;	/* rcv'd break condition counter */
+	ushort	smc_brkcr;	/* xmt break count register */
+	ushort	smc_rmask;	/* Temporary bit mask */
+	char	res1[8];	/* Reserved */
+	ushort	smc_rpbase;	/* Relocation pointer */
+} smc_uart_t;
+
+/* Function code bits.
+*/
+#define SMC_EB	((u_char)0x10)	/* Set big endian byte order */
+
+/* SMC uart mode register.
+*/
+#define	SMCMR_REN	((ushort)0x0001)
+#define SMCMR_TEN	((ushort)0x0002)
+#define SMCMR_DM	((ushort)0x000c)
+#define SMCMR_SM_GCI	((ushort)0x0000)
+#define SMCMR_SM_UART	((ushort)0x0020)
+#define SMCMR_SM_TRANS	((ushort)0x0030)
+#define SMCMR_SM_MASK	((ushort)0x0030)
+#define SMCMR_PM_EVEN	((ushort)0x0100)	/* Even parity, else odd */
+#define SMCMR_REVD	SMCMR_PM_EVEN
+#define SMCMR_PEN	((ushort)0x0200)	/* Parity enable */
+#define SMCMR_BS	SMCMR_PEN
+#define SMCMR_SL	((ushort)0x0400)	/* Two stops, else one */
+#define SMCR_CLEN_MASK	((ushort)0x7800)	/* Character length */
+#define smcr_mk_clen(C)	(((C) << 11) & SMCR_CLEN_MASK)
+
+/* SMC2 as Centronics parallel printer.  It is half duplex, in that
+ * it can only receive or transmit.  The parameter ram values for
+ * each direction are either unique or properly overlap, so we can
+ * include them in one structure.
+ */
+typedef struct smc_centronics {
+	ushort	scent_rbase;
+	ushort	scent_tbase;
+	u_char	scent_cfcr;
+	u_char	scent_smask;
+	ushort	scent_mrblr;
+	uint	scent_rstate;
+	uint	scent_r_ptr;
+	ushort	scent_rbptr;
+	ushort	scent_r_cnt;
+	uint	scent_rtemp;
+	uint	scent_tstate;
+	uint	scent_t_ptr;
+	ushort	scent_tbptr;
+	ushort	scent_t_cnt;
+	uint	scent_ttemp;
+	ushort	scent_max_sl;
+	ushort	scent_sl_cnt;
+	ushort	scent_character1;
+	ushort	scent_character2;
+	ushort	scent_character3;
+	ushort	scent_character4;
+	ushort	scent_character5;
+	ushort	scent_character6;
+	ushort	scent_character7;
+	ushort	scent_character8;
+	ushort	scent_rccm;
+	ushort	scent_rccr;
+} smc_cent_t;
+
+/* Centronics Status Mask Register.
+*/
+#define SMC_CENT_F	((u_char)0x08)
+#define SMC_CENT_PE	((u_char)0x04)
+#define SMC_CENT_S	((u_char)0x02)
+
+/* SMC Event and Mask register.
+*/
+#define	SMCM_BRKE	((unsigned char)0x40)	/* When in UART Mode */
+#define	SMCM_BRK	((unsigned char)0x10)	/* When in UART Mode */
+#define	SMCM_TXE	((unsigned char)0x10)	/* When in Transparent Mode */
+#define	SMCM_BSY	((unsigned char)0x04)
+#define	SMCM_TX		((unsigned char)0x02)
+#define	SMCM_RX		((unsigned char)0x01)
+
+/* Baud rate generators.
+*/
+#define CPM_BRG_RST		((uint)0x00020000)
+#define CPM_BRG_EN		((uint)0x00010000)
+#define CPM_BRG_EXTC_INT	((uint)0x00000000)
+#define CPM_BRG_EXTC_CLK2	((uint)0x00004000)
+#define CPM_BRG_EXTC_CLK6	((uint)0x00008000)
+#define CPM_BRG_ATB		((uint)0x00002000)
+#define CPM_BRG_CD_MASK		((uint)0x00001ffe)
+#define CPM_BRG_DIV16		((uint)0x00000001)
+
+/* SI Clock Route Register
+*/
+#define SICR_RCLK_SCC1_BRG1	((uint)0x00000000)
+#define SICR_TCLK_SCC1_BRG1	((uint)0x00000000)
+#define SICR_RCLK_SCC2_BRG2	((uint)0x00000800)
+#define SICR_TCLK_SCC2_BRG2	((uint)0x00000100)
+#define SICR_RCLK_SCC3_BRG3	((uint)0x00100000)
+#define SICR_TCLK_SCC3_BRG3	((uint)0x00020000)
+#define SICR_RCLK_SCC4_BRG4	((uint)0x18000000)
+#define SICR_TCLK_SCC4_BRG4	((uint)0x03000000)
+
+/* SCCs.
+*/
+#define SCC_GSMRH_IRP		((uint)0x00040000)
+#define SCC_GSMRH_GDE		((uint)0x00010000)
+#define SCC_GSMRH_TCRC_CCITT	((uint)0x00008000)
+#define SCC_GSMRH_TCRC_BISYNC	((uint)0x00004000)
+#define SCC_GSMRH_TCRC_HDLC	((uint)0x00000000)
+#define SCC_GSMRH_REVD		((uint)0x00002000)
+#define SCC_GSMRH_TRX		((uint)0x00001000)
+#define SCC_GSMRH_TTX		((uint)0x00000800)
+#define SCC_GSMRH_CDP		((uint)0x00000400)
+#define SCC_GSMRH_CTSP		((uint)0x00000200)
+#define SCC_GSMRH_CDS		((uint)0x00000100)
+#define SCC_GSMRH_CTSS		((uint)0x00000080)
+#define SCC_GSMRH_TFL		((uint)0x00000040)
+#define SCC_GSMRH_RFW		((uint)0x00000020)
+#define SCC_GSMRH_TXSY		((uint)0x00000010)
+#define SCC_GSMRH_SYNL16	((uint)0x0000000c)
+#define SCC_GSMRH_SYNL8		((uint)0x00000008)
+#define SCC_GSMRH_SYNL4		((uint)0x00000004)
+#define SCC_GSMRH_RTSM		((uint)0x00000002)
+#define SCC_GSMRH_RSYN		((uint)0x00000001)
+
+#define SCC_GSMRL_SIR		((uint)0x80000000)	/* SCC2 only */
+#define SCC_GSMRL_EDGE_NONE	((uint)0x60000000)
+#define SCC_GSMRL_EDGE_NEG	((uint)0x40000000)
+#define SCC_GSMRL_EDGE_POS	((uint)0x20000000)
+#define SCC_GSMRL_EDGE_BOTH	((uint)0x00000000)
+#define SCC_GSMRL_TCI		((uint)0x10000000)
+#define SCC_GSMRL_TSNC_3	((uint)0x0c000000)
+#define SCC_GSMRL_TSNC_4	((uint)0x08000000)
+#define SCC_GSMRL_TSNC_14	((uint)0x04000000)
+#define SCC_GSMRL_TSNC_INF	((uint)0x00000000)
+#define SCC_GSMRL_RINV		((uint)0x02000000)
+#define SCC_GSMRL_TINV		((uint)0x01000000)
+#define SCC_GSMRL_TPL_128	((uint)0x00c00000)
+#define SCC_GSMRL_TPL_64	((uint)0x00a00000)
+#define SCC_GSMRL_TPL_48	((uint)0x00800000)
+#define SCC_GSMRL_TPL_32	((uint)0x00600000)
+#define SCC_GSMRL_TPL_16	((uint)0x00400000)
+#define SCC_GSMRL_TPL_8		((uint)0x00200000)
+#define SCC_GSMRL_TPL_NONE	((uint)0x00000000)
+#define SCC_GSMRL_TPP_ALL1	((uint)0x00180000)
+#define SCC_GSMRL_TPP_01	((uint)0x00100000)
+#define SCC_GSMRL_TPP_10	((uint)0x00080000)
+#define SCC_GSMRL_TPP_ZEROS	((uint)0x00000000)
+#define SCC_GSMRL_TEND		((uint)0x00040000)
+#define SCC_GSMRL_TDCR_32	((uint)0x00030000)
+#define SCC_GSMRL_TDCR_16	((uint)0x00020000)
+#define SCC_GSMRL_TDCR_8	((uint)0x00010000)
+#define SCC_GSMRL_TDCR_1	((uint)0x00000000)
+#define SCC_GSMRL_RDCR_32	((uint)0x0000c000)
+#define SCC_GSMRL_RDCR_16	((uint)0x00008000)
+#define SCC_GSMRL_RDCR_8	((uint)0x00004000)
+#define SCC_GSMRL_RDCR_1	((uint)0x00000000)
+#define SCC_GSMRL_RENC_DFMAN	((uint)0x00003000)
+#define SCC_GSMRL_RENC_MANCH	((uint)0x00002000)
+#define SCC_GSMRL_RENC_FM0	((uint)0x00001000)
+#define SCC_GSMRL_RENC_NRZI	((uint)0x00000800)
+#define SCC_GSMRL_RENC_NRZ	((uint)0x00000000)
+#define SCC_GSMRL_TENC_DFMAN	((uint)0x00000600)
+#define SCC_GSMRL_TENC_MANCH	((uint)0x00000400)
+#define SCC_GSMRL_TENC_FM0	((uint)0x00000200)
+#define SCC_GSMRL_TENC_NRZI	((uint)0x00000100)
+#define SCC_GSMRL_TENC_NRZ	((uint)0x00000000)
+#define SCC_GSMRL_DIAG_LE	((uint)0x000000c0)	/* Loop and echo */
+#define SCC_GSMRL_DIAG_ECHO	((uint)0x00000080)
+#define SCC_GSMRL_DIAG_LOOP	((uint)0x00000040)
+#define SCC_GSMRL_DIAG_NORM	((uint)0x00000000)
+#define SCC_GSMRL_ENR		((uint)0x00000020)
+#define SCC_GSMRL_ENT		((uint)0x00000010)
+#define SCC_GSMRL_MODE_ENET	((uint)0x0000000c)
+#define SCC_GSMRL_MODE_QMC	((uint)0x0000000a)
+#define SCC_GSMRL_MODE_DDCMP	((uint)0x00000009)
+#define SCC_GSMRL_MODE_BISYNC	((uint)0x00000008)
+#define SCC_GSMRL_MODE_V14	((uint)0x00000007)
+#define SCC_GSMRL_MODE_AHDLC	((uint)0x00000006)
+#define SCC_GSMRL_MODE_PROFIBUS	((uint)0x00000005)
+#define SCC_GSMRL_MODE_UART	((uint)0x00000004)
+#define SCC_GSMRL_MODE_SS7	((uint)0x00000003)
+#define SCC_GSMRL_MODE_ATALK	((uint)0x00000002)
+#define SCC_GSMRL_MODE_HDLC	((uint)0x00000000)
+
+#define SCC_TODR_TOD		((ushort)0x8000)
+
+/* SCC Event and Mask register.
+*/
+#define	SCCM_TXE	((unsigned char)0x10)
+#define	SCCM_BSY	((unsigned char)0x04)
+#define	SCCM_TX		((unsigned char)0x02)
+#define	SCCM_RX		((unsigned char)0x01)
+
+typedef struct scc_param {
+	ushort	scc_rbase;	/* Rx Buffer descriptor base address */
+	ushort	scc_tbase;	/* Tx Buffer descriptor base address */
+	u_char	scc_rfcr;	/* Rx function code */
+	u_char	scc_tfcr;	/* Tx function code */
+	ushort	scc_mrblr;	/* Max receive buffer length */
+	uint	scc_rstate;	/* Internal */
+	uint	scc_idp;	/* Internal */
+	ushort	scc_rbptr;	/* Internal */
+	ushort	scc_ibc;	/* Internal */
+	uint	scc_rxtmp;	/* Internal */
+	uint	scc_tstate;	/* Internal */
+	uint	scc_tdp;	/* Internal */
+	ushort	scc_tbptr;	/* Internal */
+	ushort	scc_tbc;	/* Internal */
+	uint	scc_txtmp;	/* Internal */
+	uint	scc_rcrc;	/* Internal */
+	uint	scc_tcrc;	/* Internal */
+} sccp_t;
+
+/* Function code bits.
+*/
+#define SCC_EB	((u_char)0x10)	/* Set big endian byte order */
+
+/* CPM Ethernet through SCCx.
+ */
+typedef struct scc_enet {
+	sccp_t	sen_genscc;
+	uint	sen_cpres;	/* Preset CRC */
+	uint	sen_cmask;	/* Constant mask for CRC */
+	uint	sen_crcec;	/* CRC Error counter */
+	uint	sen_alec;	/* alignment error counter */
+	uint	sen_disfc;	/* discard frame counter */
+	ushort	sen_pads;	/* Tx short frame pad character */
+	ushort	sen_retlim;	/* Retry limit threshold */
+	ushort	sen_retcnt;	/* Retry limit counter */
+	ushort	sen_maxflr;	/* maximum frame length register */
+	ushort	sen_minflr;	/* minimum frame length register */
+	ushort	sen_maxd1;	/* maximum DMA1 length */
+	ushort	sen_maxd2;	/* maximum DMA2 length */
+	ushort	sen_maxd;	/* Rx max DMA */
+	ushort	sen_dmacnt;	/* Rx DMA counter */
+	ushort	sen_maxb;	/* Max BD byte count */
+	ushort	sen_gaddr1;	/* Group address filter */
+	ushort	sen_gaddr2;
+	ushort	sen_gaddr3;
+	ushort	sen_gaddr4;
+	uint	sen_tbuf0data0;	/* Save area 0 - current frame */
+	uint	sen_tbuf0data1;	/* Save area 1 - current frame */
+	uint	sen_tbuf0rba;	/* Internal */
+	uint	sen_tbuf0crc;	/* Internal */
+	ushort	sen_tbuf0bcnt;	/* Internal */
+	ushort	sen_paddrh;	/* physical address (MSB) */
+	ushort	sen_paddrm;
+	ushort	sen_paddrl;	/* physical address (LSB) */
+	ushort	sen_pper;	/* persistence */
+	ushort	sen_rfbdptr;	/* Rx first BD pointer */
+	ushort	sen_tfbdptr;	/* Tx first BD pointer */
+	ushort	sen_tlbdptr;	/* Tx last BD pointer */
+	uint	sen_tbuf1data0;	/* Save area 0 - current frame */
+	uint	sen_tbuf1data1;	/* Save area 1 - current frame */
+	uint	sen_tbuf1rba;	/* Internal */
+	uint	sen_tbuf1crc;	/* Internal */
+	ushort	sen_tbuf1bcnt;	/* Internal */
+	ushort	sen_txlen;	/* Tx Frame length counter */
+	ushort	sen_iaddr1;	/* Individual address filter */
+	ushort	sen_iaddr2;
+	ushort	sen_iaddr3;
+	ushort	sen_iaddr4;
+	ushort	sen_boffcnt;	/* Backoff counter */
+
+	/* NOTE: Some versions of the manual have the following items
+	 * incorrectly documented.  Below is the proper order.
+	 */
+	ushort	sen_taddrh;	/* temp address (MSB) */
+	ushort	sen_taddrm;
+	ushort	sen_taddrl;	/* temp address (LSB) */
+} scc_enet_t;
+
+/* SCC Event register as used by Ethernet.
+*/
+#define SCCE_ENET_GRA	((ushort)0x0080)	/* Graceful stop complete */
+#define SCCE_ENET_TXE	((ushort)0x0010)	/* Transmit Error */
+#define SCCE_ENET_RXF	((ushort)0x0008)	/* Full frame received */
+#define SCCE_ENET_BSY	((ushort)0x0004)	/* All incoming buffers full */
+#define SCCE_ENET_TXB	((ushort)0x0002)	/* A buffer was transmitted */
+#define SCCE_ENET_RXB	((ushort)0x0001)	/* A buffer was received */
+
+/* SCC Mode Register (PMSR) as used by Ethernet.
+*/
+#define SCC_PSMR_HBC	((ushort)0x8000)	/* Enable heartbeat */
+#define SCC_PSMR_FC	((ushort)0x4000)	/* Force collision */
+#define SCC_PSMR_RSH	((ushort)0x2000)	/* Receive short frames */
+#define SCC_PSMR_IAM	((ushort)0x1000)	/* Check individual hash */
+#define SCC_PSMR_ENCRC	((ushort)0x0800)	/* Ethernet CRC mode */
+#define SCC_PSMR_PRO	((ushort)0x0200)	/* Promiscuous mode */
+#define SCC_PSMR_BRO	((ushort)0x0100)	/* Catch broadcast pkts */
+#define SCC_PSMR_SBT	((ushort)0x0080)	/* Special backoff timer */
+#define SCC_PSMR_LPB	((ushort)0x0040)	/* Set Loopback mode */
+#define SCC_PSMR_SIP	((ushort)0x0020)	/* Sample Input Pins */
+#define SCC_PSMR_LCW	((ushort)0x0010)	/* Late collision window */
+#define SCC_PSMR_NIB22	((ushort)0x000a)	/* Start frame search */
+#define SCC_PSMR_FDE	((ushort)0x0001)	/* Full duplex enable */
+
+/* SCC as UART
+*/
+typedef struct scc_uart {
+	sccp_t	scc_genscc;
+	char	res1[8];	/* Reserved */
+	ushort	scc_maxidl;	/* Maximum idle chars */
+	ushort	scc_idlc;	/* temp idle counter */
+	ushort	scc_brkcr;	/* Break count register */
+	ushort	scc_parec;	/* receive parity error counter */
+	ushort	scc_frmec;	/* receive framing error counter */
+	ushort	scc_nosec;	/* receive noise counter */
+	ushort	scc_brkec;	/* receive break condition counter */
+	ushort	scc_brkln;	/* last received break length */
+	ushort	scc_uaddr1;	/* UART address character 1 */
+	ushort	scc_uaddr2;	/* UART address character 2 */
+	ushort	scc_rtemp;	/* Temp storage */
+	ushort	scc_toseq;	/* Transmit out of sequence char */
+	ushort	scc_char1;	/* control character 1 */
+	ushort	scc_char2;	/* control character 2 */
+	ushort	scc_char3;	/* control character 3 */
+	ushort	scc_char4;	/* control character 4 */
+	ushort	scc_char5;	/* control character 5 */
+	ushort	scc_char6;	/* control character 6 */
+	ushort	scc_char7;	/* control character 7 */
+	ushort	scc_char8;	/* control character 8 */
+	ushort	scc_rccm;	/* receive control character mask */
+	ushort	scc_rccr;	/* receive control character register */
+	ushort	scc_rlbc;	/* receive last break character */
+} scc_uart_t;
+
+/* SCC Event and Mask registers when it is used as a UART.
+*/
+#define UART_SCCM_GLR		((ushort)0x1000)
+#define UART_SCCM_GLT		((ushort)0x0800)
+#define UART_SCCM_AB		((ushort)0x0200)
+#define UART_SCCM_IDL		((ushort)0x0100)
+#define UART_SCCM_GRA		((ushort)0x0080)
+#define UART_SCCM_BRKE		((ushort)0x0040)
+#define UART_SCCM_BRKS		((ushort)0x0020)
+#define UART_SCCM_CCR		((ushort)0x0008)
+#define UART_SCCM_BSY		((ushort)0x0004)
+#define UART_SCCM_TX		((ushort)0x0002)
+#define UART_SCCM_RX		((ushort)0x0001)
+
+/* The SCC PMSR when used as a UART.
+*/
+#define SCU_PSMR_FLC		((ushort)0x8000)
+#define SCU_PSMR_SL		((ushort)0x4000)
+#define SCU_PSMR_CL		((ushort)0x3000)
+#define SCU_PSMR_UM		((ushort)0x0c00)
+#define SCU_PSMR_FRZ		((ushort)0x0200)
+#define SCU_PSMR_RZS		((ushort)0x0100)
+#define SCU_PSMR_SYN		((ushort)0x0080)
+#define SCU_PSMR_DRT		((ushort)0x0040)
+#define SCU_PSMR_PEN		((ushort)0x0010)
+#define SCU_PSMR_RPM		((ushort)0x000c)
+#define SCU_PSMR_REVP		((ushort)0x0008)
+#define SCU_PSMR_TPM		((ushort)0x0003)
+#define SCU_PSMR_TEVP		((ushort)0x0002)
+
+/* CPM Transparent mode SCC.
+ */
+typedef struct scc_trans {
+	sccp_t	st_genscc;
+	uint	st_cpres;	/* Preset CRC */
+	uint	st_cmask;	/* Constant mask for CRC */
+} scc_trans_t;
+
+/* IIC parameter RAM.
+*/
+typedef struct iic {
+	ushort	iic_rbase;	/* Rx Buffer descriptor base address */
+	ushort	iic_tbase;	/* Tx Buffer descriptor base address */
+	u_char	iic_rfcr;	/* Rx function code */
+	u_char	iic_tfcr;	/* Tx function code */
+	ushort	iic_mrblr;	/* Max receive buffer length */
+	uint	iic_rstate;	/* Internal */
+	uint	iic_rdp;	/* Internal */
+	ushort	iic_rbptr;	/* Internal */
+	ushort	iic_rbc;	/* Internal */
+	uint	iic_rxtmp;	/* Internal */
+	uint	iic_tstate;	/* Internal */
+	uint	iic_tdp;	/* Internal */
+	ushort	iic_tbptr;	/* Internal */
+	ushort	iic_tbc;	/* Internal */
+	uint	iic_txtmp;	/* Internal */
+	char	res1[4];	/* Reserved */
+	ushort	iic_rpbase;	/* Relocation pointer */
+	char	res2[2];	/* Reserved */
+} iic_t;
+
+/*
+ * RISC Controller Configuration Register definitons
+ */
+#define RCCR_TIME	0x8000			/* RISC Timer Enable */
+#define RCCR_TIMEP(t)	(((t) & 0x3F)<<8)	/* RISC Timer Period */
+#define RCCR_TIME_MASK	0x00FF			/* not RISC Timer related bits */
+
+/* RISC Timer Parameter RAM offset */
+#define PROFF_RTMR	((uint)0x01B0)
+
+typedef struct risc_timer_pram {
+	unsigned short	tm_base;	/* RISC Timer Table Base Address */
+	unsigned short	tm_ptr;		/* RISC Timer Table Pointer (internal) */
+	unsigned short	r_tmr;		/* RISC Timer Mode Register */
+	unsigned short	r_tmv;		/* RISC Timer Valid Register */
+	unsigned long	tm_cmd;		/* RISC Timer Command Register */
+	unsigned long	tm_cnt;		/* RISC Timer Internal Count */
+} rt_pram_t;
+
+/* Bits in RISC Timer Command Register */
+#define TM_CMD_VALID	0x80000000	/* Valid - Enables the timer */
+#define TM_CMD_RESTART	0x40000000	/* Restart - for automatic restart */
+#define TM_CMD_PWM	0x20000000	/* Run in Pulse Width Modulation Mode */
+#define TM_CMD_NUM(n)	(((n)&0xF)<<16)	/* Timer Number */
+#define TM_CMD_PERIOD(p) ((p)&0xFFFF)	/* Timer Period */
+
+/* CPM interrupts.  There are nearly 32 interrupts generated by CPM
+ * channels or devices.  All of these are presented to the PPC core
+ * as a single interrupt.  The CPM interrupt handler dispatches its
+ * own handlers, in a similar fashion to the PPC core handler.  We
+ * use the table as defined in the manuals (i.e. no special high
+ * priority and SCC1 == SCCa, etc...).
+ */
+#define CPMVEC_NR		32
+#define	CPMVEC_PIO_PC15		((ushort)0x1f)
+#define	CPMVEC_SCC1		((ushort)0x1e)
+#define	CPMVEC_SCC2		((ushort)0x1d)
+#define	CPMVEC_SCC3		((ushort)0x1c)
+#define	CPMVEC_SCC4		((ushort)0x1b)
+#define	CPMVEC_PIO_PC14		((ushort)0x1a)
+#define	CPMVEC_TIMER1		((ushort)0x19)
+#define	CPMVEC_PIO_PC13		((ushort)0x18)
+#define	CPMVEC_PIO_PC12		((ushort)0x17)
+#define	CPMVEC_SDMA_CB_ERR	((ushort)0x16)
+#define CPMVEC_IDMA1		((ushort)0x15)
+#define CPMVEC_IDMA2		((ushort)0x14)
+#define CPMVEC_TIMER2		((ushort)0x12)
+#define CPMVEC_RISCTIMER	((ushort)0x11)
+#define CPMVEC_I2C		((ushort)0x10)
+#define	CPMVEC_PIO_PC11		((ushort)0x0f)
+#define	CPMVEC_PIO_PC10		((ushort)0x0e)
+#define CPMVEC_TIMER3		((ushort)0x0c)
+#define	CPMVEC_PIO_PC9		((ushort)0x0b)
+#define	CPMVEC_PIO_PC8		((ushort)0x0a)
+#define	CPMVEC_PIO_PC7		((ushort)0x09)
+#define CPMVEC_TIMER4		((ushort)0x07)
+#define	CPMVEC_PIO_PC6		((ushort)0x06)
+#define	CPMVEC_SPI		((ushort)0x05)
+#define	CPMVEC_SMC1		((ushort)0x04)
+#define	CPMVEC_SMC2		((ushort)0x03)
+#define	CPMVEC_PIO_PC5		((ushort)0x02)
+#define	CPMVEC_PIO_PC4		((ushort)0x01)
+#define	CPMVEC_ERROR		((ushort)0x00)
+
+/* CPM interrupt configuration vector.
+*/
+#define	CICR_SCD_SCC4		((uint)0x00c00000)	/* SCC4 @ SCCd */
+#define	CICR_SCC_SCC3		((uint)0x00200000)	/* SCC3 @ SCCc */
+#define	CICR_SCB_SCC2		((uint)0x00040000)	/* SCC2 @ SCCb */
+#define	CICR_SCA_SCC1		((uint)0x00000000)	/* SCC1 @ SCCa */
+#define CICR_IRL_MASK		((uint)0x0000e000)	/* Core interrupt */
+#define CICR_HP_MASK		((uint)0x00001f00)	/* Hi-pri int. */
+#define CICR_IEN		((uint)0x00000080)	/* Int. enable */
+#define CICR_SPS		((uint)0x00000001)	/* SCC Spread */
+
+#define CPM_PIN_INPUT     0
+#define CPM_PIN_OUTPUT    1
+#define CPM_PIN_PRIMARY   0
+#define CPM_PIN_SECONDARY 2
+#define CPM_PIN_GPIO      4
+#define CPM_PIN_OPENDRAIN 8
+#define CPM_PIN_FALLEDGE  16
+#define CPM_PIN_ANYEDGE   0
+
+enum cpm_port {
+	CPM_PORTA,
+	CPM_PORTB,
+	CPM_PORTC,
+	CPM_PORTD,
+	CPM_PORTE,
+};
+
+void cpm1_set_pin(enum cpm_port port, int pin, int flags);
+
+enum cpm_clk_dir {
+	CPM_CLK_RX,
+	CPM_CLK_TX,
+	CPM_CLK_RTX
+};
+
+enum cpm_clk_target {
+	CPM_CLK_SCC1,
+	CPM_CLK_SCC2,
+	CPM_CLK_SCC3,
+	CPM_CLK_SCC4,
+	CPM_CLK_SMC1,
+	CPM_CLK_SMC2,
+};
+
+enum cpm_clk {
+	CPM_BRG1,	/* Baud Rate Generator  1 */
+	CPM_BRG2,	/* Baud Rate Generator  2 */
+	CPM_BRG3,	/* Baud Rate Generator  3 */
+	CPM_BRG4,	/* Baud Rate Generator  4 */
+	CPM_CLK1,	/* Clock  1 */
+	CPM_CLK2,	/* Clock  2 */
+	CPM_CLK3,	/* Clock  3 */
+	CPM_CLK4,	/* Clock  4 */
+	CPM_CLK5,	/* Clock  5 */
+	CPM_CLK6,	/* Clock  6 */
+	CPM_CLK7,	/* Clock  7 */
+	CPM_CLK8,	/* Clock  8 */
+};
+
+int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode);
+int cpm1_gpiochip_add16(struct device *dev);
+int cpm1_gpiochip_add32(struct device *dev);
+
+#endif /* __CPM1__ */
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
new file mode 100644
index 0000000..2211b93
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -0,0 +1,1149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Communication Processor Module v2.
+ *
+ * This file contains structures and information for the communication
+ * processor channels found in the dual port RAM or parameter RAM.
+ * All CPM control and status is available through the CPM2 internal
+ * memory map.  See immap_cpm2.h for details.
+ */
+#ifdef __KERNEL__
+#ifndef __CPM2__
+#define __CPM2__
+
+#include <asm/immap_cpm2.h>
+#include <asm/cpm.h>
+#include <sysdev/fsl_soc.h>
+
+/* CPM Command register.
+*/
+#define CPM_CR_RST	((uint)0x80000000)
+#define CPM_CR_PAGE	((uint)0x7c000000)
+#define CPM_CR_SBLOCK	((uint)0x03e00000)
+#define CPM_CR_FLG	((uint)0x00010000)
+#define CPM_CR_MCN	((uint)0x00003fc0)
+#define CPM_CR_OPCODE	((uint)0x0000000f)
+
+/* Device sub-block and page codes.
+*/
+#define CPM_CR_SCC1_SBLOCK	(0x04)
+#define CPM_CR_SCC2_SBLOCK	(0x05)
+#define CPM_CR_SCC3_SBLOCK	(0x06)
+#define CPM_CR_SCC4_SBLOCK	(0x07)
+#define CPM_CR_SMC1_SBLOCK	(0x08)
+#define CPM_CR_SMC2_SBLOCK	(0x09)
+#define CPM_CR_SPI_SBLOCK	(0x0a)
+#define CPM_CR_I2C_SBLOCK	(0x0b)
+#define CPM_CR_TIMER_SBLOCK	(0x0f)
+#define CPM_CR_RAND_SBLOCK	(0x0e)
+#define CPM_CR_FCC1_SBLOCK	(0x10)
+#define CPM_CR_FCC2_SBLOCK	(0x11)
+#define CPM_CR_FCC3_SBLOCK	(0x12)
+#define CPM_CR_IDMA1_SBLOCK	(0x14)
+#define CPM_CR_IDMA2_SBLOCK	(0x15)
+#define CPM_CR_IDMA3_SBLOCK	(0x16)
+#define CPM_CR_IDMA4_SBLOCK	(0x17)
+#define CPM_CR_MCC1_SBLOCK	(0x1c)
+
+#define CPM_CR_FCC_SBLOCK(x)	(x + 0x10)
+
+#define CPM_CR_SCC1_PAGE	(0x00)
+#define CPM_CR_SCC2_PAGE	(0x01)
+#define CPM_CR_SCC3_PAGE	(0x02)
+#define CPM_CR_SCC4_PAGE	(0x03)
+#define CPM_CR_SMC1_PAGE	(0x07)
+#define CPM_CR_SMC2_PAGE	(0x08)
+#define CPM_CR_SPI_PAGE		(0x09)
+#define CPM_CR_I2C_PAGE		(0x0a)
+#define CPM_CR_TIMER_PAGE	(0x0a)
+#define CPM_CR_RAND_PAGE	(0x0a)
+#define CPM_CR_FCC1_PAGE	(0x04)
+#define CPM_CR_FCC2_PAGE	(0x05)
+#define CPM_CR_FCC3_PAGE	(0x06)
+#define CPM_CR_IDMA1_PAGE	(0x07)
+#define CPM_CR_IDMA2_PAGE	(0x08)
+#define CPM_CR_IDMA3_PAGE	(0x09)
+#define CPM_CR_IDMA4_PAGE	(0x0a)
+#define CPM_CR_MCC1_PAGE	(0x07)
+#define CPM_CR_MCC2_PAGE	(0x08)
+
+#define CPM_CR_FCC_PAGE(x)	(x + 0x04)
+
+/* CPM2-specific opcodes (see cpm.h for common opcodes)
+*/
+#define CPM_CR_START_IDMA	((ushort)0x0009)
+
+#define mk_cr_cmd(PG, SBC, MCN, OP) \
+	((PG << 26) | (SBC << 21) | (MCN << 6) | OP)
+
+/* The number of pages of host memory we allocate for CPM.  This is
+ * done early in kernel initialization to get physically contiguous
+ * pages.
+ */
+#define NUM_CPM_HOST_PAGES	2
+
+/* Export the base address of the communication processor registers
+ * and dual port ram.
+ */
+extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */
+
+#define cpm_dpalloc cpm_muram_alloc
+#define cpm_dpfree cpm_muram_free
+#define cpm_dpram_addr cpm_muram_addr
+
+extern void cpm2_reset(void);
+
+/* Baud rate generators.
+*/
+#define CPM_BRG_RST		((uint)0x00020000)
+#define CPM_BRG_EN		((uint)0x00010000)
+#define CPM_BRG_EXTC_INT	((uint)0x00000000)
+#define CPM_BRG_EXTC_CLK3_9	((uint)0x00004000)
+#define CPM_BRG_EXTC_CLK5_15	((uint)0x00008000)
+#define CPM_BRG_ATB		((uint)0x00002000)
+#define CPM_BRG_CD_MASK		((uint)0x00001ffe)
+#define CPM_BRG_DIV16		((uint)0x00000001)
+
+#define CPM2_BRG_INT_CLK	(get_brgfreq())
+#define CPM2_BRG_UART_CLK	(CPM2_BRG_INT_CLK/16)
+
+extern void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src);
+
+/* This function is used by UARTS, or anything else that uses a 16x
+ * oversampled clock.
+ */
+static inline void cpm_setbrg(uint brg, uint rate)
+{
+	__cpm2_setbrg(brg, rate, CPM2_BRG_UART_CLK, 0, CPM_BRG_EXTC_INT);
+}
+
+/* This function is used to set high speed synchronous baud rate
+ * clocks.
+ */
+static inline void cpm2_fastbrg(uint brg, uint rate, int div16)
+{
+	__cpm2_setbrg(brg, rate, CPM2_BRG_INT_CLK, div16, CPM_BRG_EXTC_INT);
+}
+
+/* Parameter RAM offsets from the base.
+*/
+#define PROFF_SCC1		((uint)0x8000)
+#define PROFF_SCC2		((uint)0x8100)
+#define PROFF_SCC3		((uint)0x8200)
+#define PROFF_SCC4		((uint)0x8300)
+#define PROFF_FCC1		((uint)0x8400)
+#define PROFF_FCC2		((uint)0x8500)
+#define PROFF_FCC3		((uint)0x8600)
+#define PROFF_MCC1		((uint)0x8700)
+#define PROFF_SMC1_BASE		((uint)0x87fc)
+#define PROFF_IDMA1_BASE	((uint)0x87fe)
+#define PROFF_MCC2		((uint)0x8800)
+#define PROFF_SMC2_BASE		((uint)0x88fc)
+#define PROFF_IDMA2_BASE	((uint)0x88fe)
+#define PROFF_SPI_BASE		((uint)0x89fc)
+#define PROFF_IDMA3_BASE	((uint)0x89fe)
+#define PROFF_TIMERS		((uint)0x8ae0)
+#define PROFF_REVNUM		((uint)0x8af0)
+#define PROFF_RAND		((uint)0x8af8)
+#define PROFF_I2C_BASE		((uint)0x8afc)
+#define PROFF_IDMA4_BASE	((uint)0x8afe)
+
+#define PROFF_SCC_SIZE		((uint)0x100)
+#define PROFF_FCC_SIZE		((uint)0x100)
+#define PROFF_SMC_SIZE		((uint)64)
+
+/* The SMCs are relocated to any of the first eight DPRAM pages.
+ * We will fix these at the first locations of DPRAM, until we
+ * get some microcode patches :-).
+ * The parameter ram space for the SMCs is fifty-some bytes, and
+ * they are required to start on a 64 byte boundary.
+ */
+#define PROFF_SMC1	(0)
+#define PROFF_SMC2	(64)
+
+
+/* Define enough so I can at least use the serial port as a UART.
+ */
+typedef struct smc_uart {
+	ushort	smc_rbase;	/* Rx Buffer descriptor base address */
+	ushort	smc_tbase;	/* Tx Buffer descriptor base address */
+	u_char	smc_rfcr;	/* Rx function code */
+	u_char	smc_tfcr;	/* Tx function code */
+	ushort	smc_mrblr;	/* Max receive buffer length */
+	uint	smc_rstate;	/* Internal */
+	uint	smc_idp;	/* Internal */
+	ushort	smc_rbptr;	/* Internal */
+	ushort	smc_ibc;	/* Internal */
+	uint	smc_rxtmp;	/* Internal */
+	uint	smc_tstate;	/* Internal */
+	uint	smc_tdp;	/* Internal */
+	ushort	smc_tbptr;	/* Internal */
+	ushort	smc_tbc;	/* Internal */
+	uint	smc_txtmp;	/* Internal */
+	ushort	smc_maxidl;	/* Maximum idle characters */
+	ushort	smc_tmpidl;	/* Temporary idle counter */
+	ushort	smc_brklen;	/* Last received break length */
+	ushort	smc_brkec;	/* rcv'd break condition counter */
+	ushort	smc_brkcr;	/* xmt break count register */
+	ushort	smc_rmask;	/* Temporary bit mask */
+	uint	smc_stmp;	/* SDMA Temp */
+} smc_uart_t;
+
+/* SMC uart mode register (Internal memory map).
+*/
+#define SMCMR_REN	((ushort)0x0001)
+#define SMCMR_TEN	((ushort)0x0002)
+#define SMCMR_DM	((ushort)0x000c)
+#define SMCMR_SM_GCI	((ushort)0x0000)
+#define SMCMR_SM_UART	((ushort)0x0020)
+#define SMCMR_SM_TRANS	((ushort)0x0030)
+#define SMCMR_SM_MASK	((ushort)0x0030)
+#define SMCMR_PM_EVEN	((ushort)0x0100)	/* Even parity, else odd */
+#define SMCMR_REVD	SMCMR_PM_EVEN
+#define SMCMR_PEN	((ushort)0x0200)	/* Parity enable */
+#define SMCMR_BS	SMCMR_PEN
+#define SMCMR_SL	((ushort)0x0400)	/* Two stops, else one */
+#define SMCR_CLEN_MASK	((ushort)0x7800)	/* Character length */
+#define smcr_mk_clen(C)	(((C) << 11) & SMCR_CLEN_MASK)
+
+/* SMC Event and Mask register.
+*/
+#define SMCM_BRKE       ((unsigned char)0x40)   /* When in UART Mode */
+#define SMCM_BRK        ((unsigned char)0x10)   /* When in UART Mode */
+#define SMCM_TXE	((unsigned char)0x10)
+#define SMCM_BSY	((unsigned char)0x04)
+#define SMCM_TX		((unsigned char)0x02)
+#define SMCM_RX		((unsigned char)0x01)
+
+/* SCCs.
+*/
+#define SCC_GSMRH_IRP		((uint)0x00040000)
+#define SCC_GSMRH_GDE		((uint)0x00010000)
+#define SCC_GSMRH_TCRC_CCITT	((uint)0x00008000)
+#define SCC_GSMRH_TCRC_BISYNC	((uint)0x00004000)
+#define SCC_GSMRH_TCRC_HDLC	((uint)0x00000000)
+#define SCC_GSMRH_REVD		((uint)0x00002000)
+#define SCC_GSMRH_TRX		((uint)0x00001000)
+#define SCC_GSMRH_TTX		((uint)0x00000800)
+#define SCC_GSMRH_CDP		((uint)0x00000400)
+#define SCC_GSMRH_CTSP		((uint)0x00000200)
+#define SCC_GSMRH_CDS		((uint)0x00000100)
+#define SCC_GSMRH_CTSS		((uint)0x00000080)
+#define SCC_GSMRH_TFL		((uint)0x00000040)
+#define SCC_GSMRH_RFW		((uint)0x00000020)
+#define SCC_GSMRH_TXSY		((uint)0x00000010)
+#define SCC_GSMRH_SYNL16	((uint)0x0000000c)
+#define SCC_GSMRH_SYNL8		((uint)0x00000008)
+#define SCC_GSMRH_SYNL4		((uint)0x00000004)
+#define SCC_GSMRH_RTSM		((uint)0x00000002)
+#define SCC_GSMRH_RSYN		((uint)0x00000001)
+
+#define SCC_GSMRL_SIR		((uint)0x80000000)	/* SCC2 only */
+#define SCC_GSMRL_EDGE_NONE	((uint)0x60000000)
+#define SCC_GSMRL_EDGE_NEG	((uint)0x40000000)
+#define SCC_GSMRL_EDGE_POS	((uint)0x20000000)
+#define SCC_GSMRL_EDGE_BOTH	((uint)0x00000000)
+#define SCC_GSMRL_TCI		((uint)0x10000000)
+#define SCC_GSMRL_TSNC_3	((uint)0x0c000000)
+#define SCC_GSMRL_TSNC_4	((uint)0x08000000)
+#define SCC_GSMRL_TSNC_14	((uint)0x04000000)
+#define SCC_GSMRL_TSNC_INF	((uint)0x00000000)
+#define SCC_GSMRL_RINV		((uint)0x02000000)
+#define SCC_GSMRL_TINV		((uint)0x01000000)
+#define SCC_GSMRL_TPL_128	((uint)0x00c00000)
+#define SCC_GSMRL_TPL_64	((uint)0x00a00000)
+#define SCC_GSMRL_TPL_48	((uint)0x00800000)
+#define SCC_GSMRL_TPL_32	((uint)0x00600000)
+#define SCC_GSMRL_TPL_16	((uint)0x00400000)
+#define SCC_GSMRL_TPL_8		((uint)0x00200000)
+#define SCC_GSMRL_TPL_NONE	((uint)0x00000000)
+#define SCC_GSMRL_TPP_ALL1	((uint)0x00180000)
+#define SCC_GSMRL_TPP_01	((uint)0x00100000)
+#define SCC_GSMRL_TPP_10	((uint)0x00080000)
+#define SCC_GSMRL_TPP_ZEROS	((uint)0x00000000)
+#define SCC_GSMRL_TEND		((uint)0x00040000)
+#define SCC_GSMRL_TDCR_32	((uint)0x00030000)
+#define SCC_GSMRL_TDCR_16	((uint)0x00020000)
+#define SCC_GSMRL_TDCR_8	((uint)0x00010000)
+#define SCC_GSMRL_TDCR_1	((uint)0x00000000)
+#define SCC_GSMRL_RDCR_32	((uint)0x0000c000)
+#define SCC_GSMRL_RDCR_16	((uint)0x00008000)
+#define SCC_GSMRL_RDCR_8	((uint)0x00004000)
+#define SCC_GSMRL_RDCR_1	((uint)0x00000000)
+#define SCC_GSMRL_RENC_DFMAN	((uint)0x00003000)
+#define SCC_GSMRL_RENC_MANCH	((uint)0x00002000)
+#define SCC_GSMRL_RENC_FM0	((uint)0x00001000)
+#define SCC_GSMRL_RENC_NRZI	((uint)0x00000800)
+#define SCC_GSMRL_RENC_NRZ	((uint)0x00000000)
+#define SCC_GSMRL_TENC_DFMAN	((uint)0x00000600)
+#define SCC_GSMRL_TENC_MANCH	((uint)0x00000400)
+#define SCC_GSMRL_TENC_FM0	((uint)0x00000200)
+#define SCC_GSMRL_TENC_NRZI	((uint)0x00000100)
+#define SCC_GSMRL_TENC_NRZ	((uint)0x00000000)
+#define SCC_GSMRL_DIAG_LE	((uint)0x000000c0)	/* Loop and echo */
+#define SCC_GSMRL_DIAG_ECHO	((uint)0x00000080)
+#define SCC_GSMRL_DIAG_LOOP	((uint)0x00000040)
+#define SCC_GSMRL_DIAG_NORM	((uint)0x00000000)
+#define SCC_GSMRL_ENR		((uint)0x00000020)
+#define SCC_GSMRL_ENT		((uint)0x00000010)
+#define SCC_GSMRL_MODE_ENET	((uint)0x0000000c)
+#define SCC_GSMRL_MODE_DDCMP	((uint)0x00000009)
+#define SCC_GSMRL_MODE_BISYNC	((uint)0x00000008)
+#define SCC_GSMRL_MODE_V14	((uint)0x00000007)
+#define SCC_GSMRL_MODE_AHDLC	((uint)0x00000006)
+#define SCC_GSMRL_MODE_PROFIBUS	((uint)0x00000005)
+#define SCC_GSMRL_MODE_UART	((uint)0x00000004)
+#define SCC_GSMRL_MODE_SS7	((uint)0x00000003)
+#define SCC_GSMRL_MODE_ATALK	((uint)0x00000002)
+#define SCC_GSMRL_MODE_HDLC	((uint)0x00000000)
+
+#define SCC_TODR_TOD		((ushort)0x8000)
+
+/* SCC Event and Mask register.
+*/
+#define SCCM_TXE	((unsigned char)0x10)
+#define SCCM_BSY	((unsigned char)0x04)
+#define SCCM_TX		((unsigned char)0x02)
+#define SCCM_RX		((unsigned char)0x01)
+
+typedef struct scc_param {
+	ushort	scc_rbase;	/* Rx Buffer descriptor base address */
+	ushort	scc_tbase;	/* Tx Buffer descriptor base address */
+	u_char	scc_rfcr;	/* Rx function code */
+	u_char	scc_tfcr;	/* Tx function code */
+	ushort	scc_mrblr;	/* Max receive buffer length */
+	uint	scc_rstate;	/* Internal */
+	uint	scc_idp;	/* Internal */
+	ushort	scc_rbptr;	/* Internal */
+	ushort	scc_ibc;	/* Internal */
+	uint	scc_rxtmp;	/* Internal */
+	uint	scc_tstate;	/* Internal */
+	uint	scc_tdp;	/* Internal */
+	ushort	scc_tbptr;	/* Internal */
+	ushort	scc_tbc;	/* Internal */
+	uint	scc_txtmp;	/* Internal */
+	uint	scc_rcrc;	/* Internal */
+	uint	scc_tcrc;	/* Internal */
+} sccp_t;
+
+/* Function code bits.
+*/
+#define SCC_EB	((u_char) 0x10)	/* Set big endian byte order */
+#define SCC_GBL	((u_char) 0x20) /* Snooping enabled */
+
+/* CPM Ethernet through SCC1.
+ */
+typedef struct scc_enet {
+	sccp_t	sen_genscc;
+	uint	sen_cpres;	/* Preset CRC */
+	uint	sen_cmask;	/* Constant mask for CRC */
+	uint	sen_crcec;	/* CRC Error counter */
+	uint	sen_alec;	/* alignment error counter */
+	uint	sen_disfc;	/* discard frame counter */
+	ushort	sen_pads;	/* Tx short frame pad character */
+	ushort	sen_retlim;	/* Retry limit threshold */
+	ushort	sen_retcnt;	/* Retry limit counter */
+	ushort	sen_maxflr;	/* maximum frame length register */
+	ushort	sen_minflr;	/* minimum frame length register */
+	ushort	sen_maxd1;	/* maximum DMA1 length */
+	ushort	sen_maxd2;	/* maximum DMA2 length */
+	ushort	sen_maxd;	/* Rx max DMA */
+	ushort	sen_dmacnt;	/* Rx DMA counter */
+	ushort	sen_maxb;	/* Max BD byte count */
+	ushort	sen_gaddr1;	/* Group address filter */
+	ushort	sen_gaddr2;
+	ushort	sen_gaddr3;
+	ushort	sen_gaddr4;
+	uint	sen_tbuf0data0;	/* Save area 0 - current frame */
+	uint	sen_tbuf0data1;	/* Save area 1 - current frame */
+	uint	sen_tbuf0rba;	/* Internal */
+	uint	sen_tbuf0crc;	/* Internal */
+	ushort	sen_tbuf0bcnt;	/* Internal */
+	ushort	sen_paddrh;	/* physical address (MSB) */
+	ushort	sen_paddrm;
+	ushort	sen_paddrl;	/* physical address (LSB) */
+	ushort	sen_pper;	/* persistence */
+	ushort	sen_rfbdptr;	/* Rx first BD pointer */
+	ushort	sen_tfbdptr;	/* Tx first BD pointer */
+	ushort	sen_tlbdptr;	/* Tx last BD pointer */
+	uint	sen_tbuf1data0;	/* Save area 0 - current frame */
+	uint	sen_tbuf1data1;	/* Save area 1 - current frame */
+	uint	sen_tbuf1rba;	/* Internal */
+	uint	sen_tbuf1crc;	/* Internal */
+	ushort	sen_tbuf1bcnt;	/* Internal */
+	ushort	sen_txlen;	/* Tx Frame length counter */
+	ushort	sen_iaddr1;	/* Individual address filter */
+	ushort	sen_iaddr2;
+	ushort	sen_iaddr3;
+	ushort	sen_iaddr4;
+	ushort	sen_boffcnt;	/* Backoff counter */
+
+	/* NOTE: Some versions of the manual have the following items
+	 * incorrectly documented.  Below is the proper order.
+	 */
+	ushort	sen_taddrh;	/* temp address (MSB) */
+	ushort	sen_taddrm;
+	ushort	sen_taddrl;	/* temp address (LSB) */
+} scc_enet_t;
+
+
+/* SCC Event register as used by Ethernet.
+*/
+#define SCCE_ENET_GRA	((ushort)0x0080)	/* Graceful stop complete */
+#define SCCE_ENET_TXE	((ushort)0x0010)	/* Transmit Error */
+#define SCCE_ENET_RXF	((ushort)0x0008)	/* Full frame received */
+#define SCCE_ENET_BSY	((ushort)0x0004)	/* All incoming buffers full */
+#define SCCE_ENET_TXB	((ushort)0x0002)	/* A buffer was transmitted */
+#define SCCE_ENET_RXB	((ushort)0x0001)	/* A buffer was received */
+
+/* SCC Mode Register (PSMR) as used by Ethernet.
+*/
+#define SCC_PSMR_HBC	((ushort)0x8000)	/* Enable heartbeat */
+#define SCC_PSMR_FC	((ushort)0x4000)	/* Force collision */
+#define SCC_PSMR_RSH	((ushort)0x2000)	/* Receive short frames */
+#define SCC_PSMR_IAM	((ushort)0x1000)	/* Check individual hash */
+#define SCC_PSMR_ENCRC	((ushort)0x0800)	/* Ethernet CRC mode */
+#define SCC_PSMR_PRO	((ushort)0x0200)	/* Promiscuous mode */
+#define SCC_PSMR_BRO	((ushort)0x0100)	/* Catch broadcast pkts */
+#define SCC_PSMR_SBT	((ushort)0x0080)	/* Special backoff timer */
+#define SCC_PSMR_LPB	((ushort)0x0040)	/* Set Loopback mode */
+#define SCC_PSMR_SIP	((ushort)0x0020)	/* Sample Input Pins */
+#define SCC_PSMR_LCW	((ushort)0x0010)	/* Late collision window */
+#define SCC_PSMR_NIB22	((ushort)0x000a)	/* Start frame search */
+#define SCC_PSMR_FDE	((ushort)0x0001)	/* Full duplex enable */
+
+/* SCC as UART
+*/
+typedef struct scc_uart {
+	sccp_t	scc_genscc;
+	uint	scc_res1;	/* Reserved */
+	uint	scc_res2;	/* Reserved */
+	ushort	scc_maxidl;	/* Maximum idle chars */
+	ushort	scc_idlc;	/* temp idle counter */
+	ushort	scc_brkcr;	/* Break count register */
+	ushort	scc_parec;	/* receive parity error counter */
+	ushort	scc_frmec;	/* receive framing error counter */
+	ushort	scc_nosec;	/* receive noise counter */
+	ushort	scc_brkec;	/* receive break condition counter */
+	ushort	scc_brkln;	/* last received break length */
+	ushort	scc_uaddr1;	/* UART address character 1 */
+	ushort	scc_uaddr2;	/* UART address character 2 */
+	ushort	scc_rtemp;	/* Temp storage */
+	ushort	scc_toseq;	/* Transmit out of sequence char */
+	ushort	scc_char1;	/* control character 1 */
+	ushort	scc_char2;	/* control character 2 */
+	ushort	scc_char3;	/* control character 3 */
+	ushort	scc_char4;	/* control character 4 */
+	ushort	scc_char5;	/* control character 5 */
+	ushort	scc_char6;	/* control character 6 */
+	ushort	scc_char7;	/* control character 7 */
+	ushort	scc_char8;	/* control character 8 */
+	ushort	scc_rccm;	/* receive control character mask */
+	ushort	scc_rccr;	/* receive control character register */
+	ushort	scc_rlbc;	/* receive last break character */
+} scc_uart_t;
+
+/* SCC Event and Mask registers when it is used as a UART.
+*/
+#define UART_SCCM_GLR		((ushort)0x1000)
+#define UART_SCCM_GLT		((ushort)0x0800)
+#define UART_SCCM_AB		((ushort)0x0200)
+#define UART_SCCM_IDL		((ushort)0x0100)
+#define UART_SCCM_GRA		((ushort)0x0080)
+#define UART_SCCM_BRKE		((ushort)0x0040)
+#define UART_SCCM_BRKS		((ushort)0x0020)
+#define UART_SCCM_CCR		((ushort)0x0008)
+#define UART_SCCM_BSY		((ushort)0x0004)
+#define UART_SCCM_TX		((ushort)0x0002)
+#define UART_SCCM_RX		((ushort)0x0001)
+
+/* The SCC PSMR when used as a UART.
+*/
+#define SCU_PSMR_FLC		((ushort)0x8000)
+#define SCU_PSMR_SL		((ushort)0x4000)
+#define SCU_PSMR_CL		((ushort)0x3000)
+#define SCU_PSMR_UM		((ushort)0x0c00)
+#define SCU_PSMR_FRZ		((ushort)0x0200)
+#define SCU_PSMR_RZS		((ushort)0x0100)
+#define SCU_PSMR_SYN		((ushort)0x0080)
+#define SCU_PSMR_DRT		((ushort)0x0040)
+#define SCU_PSMR_PEN		((ushort)0x0010)
+#define SCU_PSMR_RPM		((ushort)0x000c)
+#define SCU_PSMR_REVP		((ushort)0x0008)
+#define SCU_PSMR_TPM		((ushort)0x0003)
+#define SCU_PSMR_TEVP		((ushort)0x0002)
+
+/* CPM Transparent mode SCC.
+ */
+typedef struct scc_trans {
+	sccp_t	st_genscc;
+	uint	st_cpres;	/* Preset CRC */
+	uint	st_cmask;	/* Constant mask for CRC */
+} scc_trans_t;
+
+/* How about some FCCs.....
+*/
+#define FCC_GFMR_DIAG_NORM	((uint)0x00000000)
+#define FCC_GFMR_DIAG_LE	((uint)0x40000000)
+#define FCC_GFMR_DIAG_AE	((uint)0x80000000)
+#define FCC_GFMR_DIAG_ALE	((uint)0xc0000000)
+#define FCC_GFMR_TCI		((uint)0x20000000)
+#define FCC_GFMR_TRX		((uint)0x10000000)
+#define FCC_GFMR_TTX		((uint)0x08000000)
+#define FCC_GFMR_CDP		((uint)0x04000000)
+#define FCC_GFMR_CTSP		((uint)0x02000000)
+#define FCC_GFMR_CDS		((uint)0x01000000)
+#define FCC_GFMR_CTSS		((uint)0x00800000)
+#define FCC_GFMR_SYNL_NONE	((uint)0x00000000)
+#define FCC_GFMR_SYNL_AUTO	((uint)0x00004000)
+#define FCC_GFMR_SYNL_8		((uint)0x00008000)
+#define FCC_GFMR_SYNL_16	((uint)0x0000c000)
+#define FCC_GFMR_RTSM		((uint)0x00002000)
+#define FCC_GFMR_RENC_NRZ	((uint)0x00000000)
+#define FCC_GFMR_RENC_NRZI	((uint)0x00000800)
+#define FCC_GFMR_REVD		((uint)0x00000400)
+#define FCC_GFMR_TENC_NRZ	((uint)0x00000000)
+#define FCC_GFMR_TENC_NRZI	((uint)0x00000100)
+#define FCC_GFMR_TCRC_16	((uint)0x00000000)
+#define FCC_GFMR_TCRC_32	((uint)0x00000080)
+#define FCC_GFMR_ENR		((uint)0x00000020)
+#define FCC_GFMR_ENT		((uint)0x00000010)
+#define FCC_GFMR_MODE_ENET	((uint)0x0000000c)
+#define FCC_GFMR_MODE_ATM	((uint)0x0000000a)
+#define FCC_GFMR_MODE_HDLC	((uint)0x00000000)
+
+/* Generic FCC parameter ram.
+*/
+typedef struct fcc_param {
+	ushort	fcc_riptr;	/* Rx Internal temp pointer */
+	ushort	fcc_tiptr;	/* Tx Internal temp pointer */
+	ushort	fcc_res1;
+	ushort	fcc_mrblr;	/* Max receive buffer length, mod 32 bytes */
+	uint	fcc_rstate;	/* Upper byte is Func code, must be set */
+	uint	fcc_rbase;	/* Receive BD base */
+	ushort	fcc_rbdstat;	/* RxBD status */
+	ushort	fcc_rbdlen;	/* RxBD down counter */
+	uint	fcc_rdptr;	/* RxBD internal data pointer */
+	uint	fcc_tstate;	/* Upper byte is Func code, must be set */
+	uint	fcc_tbase;	/* Transmit BD base */
+	ushort	fcc_tbdstat;	/* TxBD status */
+	ushort	fcc_tbdlen;	/* TxBD down counter */
+	uint	fcc_tdptr;	/* TxBD internal data pointer */
+	uint	fcc_rbptr;	/* Rx BD Internal buf pointer */
+	uint	fcc_tbptr;	/* Tx BD Internal buf pointer */
+	uint	fcc_rcrc;	/* Rx temp CRC */
+	uint	fcc_res2;
+	uint	fcc_tcrc;	/* Tx temp CRC */
+} fccp_t;
+
+
+/* Ethernet controller through FCC.
+*/
+typedef struct fcc_enet {
+	fccp_t	fen_genfcc;
+	uint	fen_statbuf;	/* Internal status buffer */
+	uint	fen_camptr;	/* CAM address */
+	uint	fen_cmask;	/* Constant mask for CRC */
+	uint	fen_cpres;	/* Preset CRC */
+	uint	fen_crcec;	/* CRC Error counter */
+	uint	fen_alec;	/* alignment error counter */
+	uint	fen_disfc;	/* discard frame counter */
+	ushort	fen_retlim;	/* Retry limit */
+	ushort	fen_retcnt;	/* Retry counter */
+	ushort	fen_pper;	/* Persistence */
+	ushort	fen_boffcnt;	/* backoff counter */
+	uint	fen_gaddrh;	/* Group address filter, high 32-bits */
+	uint	fen_gaddrl;	/* Group address filter, low 32-bits */
+	ushort	fen_tfcstat;	/* out of sequence TxBD */
+	ushort	fen_tfclen;
+	uint	fen_tfcptr;
+	ushort	fen_mflr;	/* Maximum frame length (1518) */
+	ushort	fen_paddrh;	/* MAC address */
+	ushort	fen_paddrm;
+	ushort	fen_paddrl;
+	ushort	fen_ibdcount;	/* Internal BD counter */
+	ushort	fen_ibdstart;	/* Internal BD start pointer */
+	ushort	fen_ibdend;	/* Internal BD end pointer */
+	ushort	fen_txlen;	/* Internal Tx frame length counter */
+	uint	fen_ibdbase[8]; /* Internal use */
+	uint	fen_iaddrh;	/* Individual address filter */
+	uint	fen_iaddrl;
+	ushort	fen_minflr;	/* Minimum frame length (64) */
+	ushort	fen_taddrh;	/* Filter transfer MAC address */
+	ushort	fen_taddrm;
+	ushort	fen_taddrl;
+	ushort	fen_padptr;	/* Pointer to pad byte buffer */
+	ushort	fen_cftype;	/* control frame type */
+	ushort	fen_cfrange;	/* control frame range */
+	ushort	fen_maxb;	/* maximum BD count */
+	ushort	fen_maxd1;	/* Max DMA1 length (1520) */
+	ushort	fen_maxd2;	/* Max DMA2 length (1520) */
+	ushort	fen_maxd;	/* internal max DMA count */
+	ushort	fen_dmacnt;	/* internal DMA counter */
+	uint	fen_octc;	/* Total octect counter */
+	uint	fen_colc;	/* Total collision counter */
+	uint	fen_broc;	/* Total broadcast packet counter */
+	uint	fen_mulc;	/* Total multicast packet count */
+	uint	fen_uspc;	/* Total packets < 64 bytes */
+	uint	fen_frgc;	/* Total packets < 64 bytes with errors */
+	uint	fen_ospc;	/* Total packets > 1518 */
+	uint	fen_jbrc;	/* Total packets > 1518 with errors */
+	uint	fen_p64c;	/* Total packets == 64 bytes */
+	uint	fen_p65c;	/* Total packets 64 < bytes <= 127 */
+	uint	fen_p128c;	/* Total packets 127 < bytes <= 255 */
+	uint	fen_p256c;	/* Total packets 256 < bytes <= 511 */
+	uint	fen_p512c;	/* Total packets 512 < bytes <= 1023 */
+	uint	fen_p1024c;	/* Total packets 1024 < bytes <= 1518 */
+	uint	fen_cambuf;	/* Internal CAM buffer poiner */
+	ushort	fen_rfthr;	/* Received frames threshold */
+	ushort	fen_rfcnt;	/* Received frames count */
+} fcc_enet_t;
+
+/* FCC Event/Mask register as used by Ethernet.
+*/
+#define FCC_ENET_GRA	((ushort)0x0080)	/* Graceful stop complete */
+#define FCC_ENET_RXC	((ushort)0x0040)	/* Control Frame Received */
+#define FCC_ENET_TXC	((ushort)0x0020)	/* Out of seq. Tx sent */
+#define FCC_ENET_TXE	((ushort)0x0010)	/* Transmit Error */
+#define FCC_ENET_RXF	((ushort)0x0008)	/* Full frame received */
+#define FCC_ENET_BSY	((ushort)0x0004)	/* Busy.  Rx Frame dropped */
+#define FCC_ENET_TXB	((ushort)0x0002)	/* A buffer was transmitted */
+#define FCC_ENET_RXB	((ushort)0x0001)	/* A buffer was received */
+
+/* FCC Mode Register (FPSMR) as used by Ethernet.
+*/
+#define FCC_PSMR_HBC	((uint)0x80000000)	/* Enable heartbeat */
+#define FCC_PSMR_FC	((uint)0x40000000)	/* Force Collision */
+#define FCC_PSMR_SBT	((uint)0x20000000)	/* Stop backoff timer */
+#define FCC_PSMR_LPB	((uint)0x10000000)	/* Local protect. 1 = FDX */
+#define FCC_PSMR_LCW	((uint)0x08000000)	/* Late collision select */
+#define FCC_PSMR_FDE	((uint)0x04000000)	/* Full Duplex Enable */
+#define FCC_PSMR_MON	((uint)0x02000000)	/* RMON Enable */
+#define FCC_PSMR_PRO	((uint)0x00400000)	/* Promiscuous Enable */
+#define FCC_PSMR_FCE	((uint)0x00200000)	/* Flow Control Enable */
+#define FCC_PSMR_RSH	((uint)0x00100000)	/* Receive Short Frames */
+#define FCC_PSMR_CAM	((uint)0x00000400)	/* CAM enable */
+#define FCC_PSMR_BRO	((uint)0x00000200)	/* Broadcast pkt discard */
+#define FCC_PSMR_ENCRC	((uint)0x00000080)	/* Use 32-bit CRC */
+
+/* IIC parameter RAM.
+*/
+typedef struct iic {
+	ushort	iic_rbase;	/* Rx Buffer descriptor base address */
+	ushort	iic_tbase;	/* Tx Buffer descriptor base address */
+	u_char	iic_rfcr;	/* Rx function code */
+	u_char	iic_tfcr;	/* Tx function code */
+	ushort	iic_mrblr;	/* Max receive buffer length */
+	uint	iic_rstate;	/* Internal */
+	uint	iic_rdp;	/* Internal */
+	ushort	iic_rbptr;	/* Internal */
+	ushort	iic_rbc;	/* Internal */
+	uint	iic_rxtmp;	/* Internal */
+	uint	iic_tstate;	/* Internal */
+	uint	iic_tdp;	/* Internal */
+	ushort	iic_tbptr;	/* Internal */
+	ushort	iic_tbc;	/* Internal */
+	uint	iic_txtmp;	/* Internal */
+} iic_t;
+
+/* IDMA parameter RAM
+*/
+typedef struct idma {
+	ushort ibase;		/* IDMA buffer descriptor table base address */
+	ushort dcm;		/* DMA channel mode */
+	ushort ibdptr;		/* IDMA current buffer descriptor pointer */
+	ushort dpr_buf;		/* IDMA transfer buffer base address */
+	ushort buf_inv;		/* internal buffer inventory */
+	ushort ss_max;		/* steady-state maximum transfer size */
+	ushort dpr_in_ptr;	/* write pointer inside the internal buffer */
+	ushort sts;		/* source transfer size */
+	ushort dpr_out_ptr;	/* read pointer inside the internal buffer */
+	ushort seob;		/* source end of burst */
+	ushort deob;		/* destination end of burst */
+	ushort dts;		/* destination transfer size */
+	ushort ret_add;		/* return address when working in ERM=1 mode */
+	ushort res0;		/* reserved */
+	uint   bd_cnt;		/* internal byte count */
+	uint   s_ptr;		/* source internal data pointer */
+	uint   d_ptr;		/* destination internal data pointer */
+	uint   istate;		/* internal state */
+	u_char res1[20];	/* pad to 64-byte length */
+} idma_t;
+
+/* DMA channel mode bit fields
+*/
+#define IDMA_DCM_FB		((ushort)0x8000) /* fly-by mode */
+#define IDMA_DCM_LP		((ushort)0x4000) /* low priority */
+#define IDMA_DCM_TC2		((ushort)0x0400) /* value driven on TC[2] */
+#define IDMA_DCM_DMA_WRAP_MASK	((ushort)0x01c0) /* mask for DMA wrap */
+#define IDMA_DCM_DMA_WRAP_64	((ushort)0x0000) /* 64-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_128	((ushort)0x0040) /* 128-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_256	((ushort)0x0080) /* 256-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_512	((ushort)0x00c0) /* 512-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_1024	((ushort)0x0100) /* 1024-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_2048	((ushort)0x0140) /* 2048-byte DMA xfer buffer */
+#define IDMA_DCM_SINC		((ushort)0x0020) /* source inc addr */
+#define IDMA_DCM_DINC		((ushort)0x0010) /* destination inc addr */
+#define IDMA_DCM_ERM		((ushort)0x0008) /* external request mode */
+#define IDMA_DCM_DT		((ushort)0x0004) /* DONE treatment */
+#define IDMA_DCM_SD_MASK	((ushort)0x0003) /* mask for SD bit field */
+#define IDMA_DCM_SD_MEM2MEM	((ushort)0x0000) /* memory-to-memory xfer */
+#define IDMA_DCM_SD_PER2MEM	((ushort)0x0002) /* peripheral-to-memory xfer */
+#define IDMA_DCM_SD_MEM2PER	((ushort)0x0001) /* memory-to-peripheral xfer */
+
+/* IDMA Buffer Descriptors
+*/
+typedef struct idma_bd {
+	uint flags;
+	uint len;	/* data length */
+	uint src;	/* source data buffer pointer */
+	uint dst;	/* destination data buffer pointer */
+} idma_bd_t;
+
+/* IDMA buffer descriptor flag bit fields
+*/
+#define IDMA_BD_V	((uint)0x80000000)	/* valid */
+#define IDMA_BD_W	((uint)0x20000000)	/* wrap */
+#define IDMA_BD_I	((uint)0x10000000)	/* interrupt */
+#define IDMA_BD_L	((uint)0x08000000)	/* last */
+#define IDMA_BD_CM	((uint)0x02000000)	/* continuous mode */
+#define IDMA_BD_SDN	((uint)0x00400000)	/* source done */
+#define IDMA_BD_DDN	((uint)0x00200000)	/* destination done */
+#define IDMA_BD_DGBL	((uint)0x00100000)	/* destination global */
+#define IDMA_BD_DBO_LE	((uint)0x00040000)	/* little-end dest byte order */
+#define IDMA_BD_DBO_BE	((uint)0x00080000)	/* big-end dest byte order */
+#define IDMA_BD_DDTB	((uint)0x00010000)	/* destination data bus */
+#define IDMA_BD_SGBL	((uint)0x00002000)	/* source global */
+#define IDMA_BD_SBO_LE	((uint)0x00000800)	/* little-end src byte order */
+#define IDMA_BD_SBO_BE	((uint)0x00001000)	/* big-end src byte order */
+#define IDMA_BD_SDTB	((uint)0x00000200)	/* source data bus */
+
+/* per-channel IDMA registers
+*/
+typedef struct im_idma {
+	u_char idsr;			/* IDMAn event status register */
+	u_char res0[3];
+	u_char idmr;			/* IDMAn event mask register */
+	u_char res1[3];
+} im_idma_t;
+
+/* IDMA event register bit fields
+*/
+#define IDMA_EVENT_SC	((unsigned char)0x08)	/* stop completed */
+#define IDMA_EVENT_OB	((unsigned char)0x04)	/* out of buffers */
+#define IDMA_EVENT_EDN	((unsigned char)0x02)	/* external DONE asserted */
+#define IDMA_EVENT_BC	((unsigned char)0x01)	/* buffer descriptor complete */
+
+/* RISC Controller Configuration Register (RCCR) bit fields
+*/
+#define RCCR_TIME	((uint)0x80000000) /* timer enable */
+#define RCCR_TIMEP_MASK	((uint)0x3f000000) /* mask for timer period bit field */
+#define RCCR_DR0M	((uint)0x00800000) /* IDMA0 request mode */
+#define RCCR_DR1M	((uint)0x00400000) /* IDMA1 request mode */
+#define RCCR_DR2M	((uint)0x00000080) /* IDMA2 request mode */
+#define RCCR_DR3M	((uint)0x00000040) /* IDMA3 request mode */
+#define RCCR_DR0QP_MASK	((uint)0x00300000) /* mask for IDMA0 req priority */
+#define RCCR_DR0QP_HIGH ((uint)0x00000000) /* IDMA0 has high req priority */
+#define RCCR_DR0QP_MED	((uint)0x00100000) /* IDMA0 has medium req priority */
+#define RCCR_DR0QP_LOW	((uint)0x00200000) /* IDMA0 has low req priority */
+#define RCCR_DR1QP_MASK	((uint)0x00030000) /* mask for IDMA1 req priority */
+#define RCCR_DR1QP_HIGH ((uint)0x00000000) /* IDMA1 has high req priority */
+#define RCCR_DR1QP_MED	((uint)0x00010000) /* IDMA1 has medium req priority */
+#define RCCR_DR1QP_LOW	((uint)0x00020000) /* IDMA1 has low req priority */
+#define RCCR_DR2QP_MASK	((uint)0x00000030) /* mask for IDMA2 req priority */
+#define RCCR_DR2QP_HIGH ((uint)0x00000000) /* IDMA2 has high req priority */
+#define RCCR_DR2QP_MED	((uint)0x00000010) /* IDMA2 has medium req priority */
+#define RCCR_DR2QP_LOW	((uint)0x00000020) /* IDMA2 has low req priority */
+#define RCCR_DR3QP_MASK	((uint)0x00000003) /* mask for IDMA3 req priority */
+#define RCCR_DR3QP_HIGH ((uint)0x00000000) /* IDMA3 has high req priority */
+#define RCCR_DR3QP_MED	((uint)0x00000001) /* IDMA3 has medium req priority */
+#define RCCR_DR3QP_LOW	((uint)0x00000002) /* IDMA3 has low req priority */
+#define RCCR_EIE	((uint)0x00080000) /* external interrupt enable */
+#define RCCR_SCD	((uint)0x00040000) /* scheduler configuration */
+#define RCCR_ERAM_MASK	((uint)0x0000e000) /* mask for enable RAM microcode */
+#define RCCR_ERAM_0KB	((uint)0x00000000) /* use 0KB of dpram for microcode */
+#define RCCR_ERAM_2KB	((uint)0x00002000) /* use 2KB of dpram for microcode */
+#define RCCR_ERAM_4KB	((uint)0x00004000) /* use 4KB of dpram for microcode */
+#define RCCR_ERAM_6KB	((uint)0x00006000) /* use 6KB of dpram for microcode */
+#define RCCR_ERAM_8KB	((uint)0x00008000) /* use 8KB of dpram for microcode */
+#define RCCR_ERAM_10KB	((uint)0x0000a000) /* use 10KB of dpram for microcode */
+#define RCCR_ERAM_12KB	((uint)0x0000c000) /* use 12KB of dpram for microcode */
+#define RCCR_EDM0	((uint)0x00000800) /* DREQ0 edge detect mode */
+#define RCCR_EDM1	((uint)0x00000400) /* DREQ1 edge detect mode */
+#define RCCR_EDM2	((uint)0x00000200) /* DREQ2 edge detect mode */
+#define RCCR_EDM3	((uint)0x00000100) /* DREQ3 edge detect mode */
+#define RCCR_DEM01	((uint)0x00000008) /* DONE0/DONE1 edge detect mode */
+#define RCCR_DEM23	((uint)0x00000004) /* DONE2/DONE3 edge detect mode */
+
+/*-----------------------------------------------------------------------
+ * CMXFCR - CMX FCC Clock Route Register
+ */
+#define CMXFCR_FC1         0x40000000   /* FCC1 connection              */
+#define CMXFCR_RF1CS_MSK   0x38000000   /* Receive FCC1 Clock Source Mask */
+#define CMXFCR_TF1CS_MSK   0x07000000   /* Transmit FCC1 Clock Source Mask */
+#define CMXFCR_FC2         0x00400000   /* FCC2 connection              */
+#define CMXFCR_RF2CS_MSK   0x00380000   /* Receive FCC2 Clock Source Mask */
+#define CMXFCR_TF2CS_MSK   0x00070000   /* Transmit FCC2 Clock Source Mask */
+#define CMXFCR_FC3         0x00004000   /* FCC3 connection              */
+#define CMXFCR_RF3CS_MSK   0x00003800   /* Receive FCC3 Clock Source Mask */
+#define CMXFCR_TF3CS_MSK   0x00000700   /* Transmit FCC3 Clock Source Mask */
+
+#define CMXFCR_RF1CS_BRG5  0x00000000   /* Receive FCC1 Clock Source is BRG5 */
+#define CMXFCR_RF1CS_BRG6  0x08000000   /* Receive FCC1 Clock Source is BRG6 */
+#define CMXFCR_RF1CS_BRG7  0x10000000   /* Receive FCC1 Clock Source is BRG7 */
+#define CMXFCR_RF1CS_BRG8  0x18000000   /* Receive FCC1 Clock Source is BRG8 */
+#define CMXFCR_RF1CS_CLK9  0x20000000   /* Receive FCC1 Clock Source is CLK9 */
+#define CMXFCR_RF1CS_CLK10 0x28000000   /* Receive FCC1 Clock Source is CLK10 */
+#define CMXFCR_RF1CS_CLK11 0x30000000   /* Receive FCC1 Clock Source is CLK11 */
+#define CMXFCR_RF1CS_CLK12 0x38000000   /* Receive FCC1 Clock Source is CLK12 */
+
+#define CMXFCR_TF1CS_BRG5  0x00000000   /* Transmit FCC1 Clock Source is BRG5 */
+#define CMXFCR_TF1CS_BRG6  0x01000000   /* Transmit FCC1 Clock Source is BRG6 */
+#define CMXFCR_TF1CS_BRG7  0x02000000   /* Transmit FCC1 Clock Source is BRG7 */
+#define CMXFCR_TF1CS_BRG8  0x03000000   /* Transmit FCC1 Clock Source is BRG8 */
+#define CMXFCR_TF1CS_CLK9  0x04000000   /* Transmit FCC1 Clock Source is CLK9 */
+#define CMXFCR_TF1CS_CLK10 0x05000000   /* Transmit FCC1 Clock Source is CLK10 */
+#define CMXFCR_TF1CS_CLK11 0x06000000   /* Transmit FCC1 Clock Source is CLK11 */
+#define CMXFCR_TF1CS_CLK12 0x07000000   /* Transmit FCC1 Clock Source is CLK12 */
+
+#define CMXFCR_RF2CS_BRG5  0x00000000   /* Receive FCC2 Clock Source is BRG5 */
+#define CMXFCR_RF2CS_BRG6  0x00080000   /* Receive FCC2 Clock Source is BRG6 */
+#define CMXFCR_RF2CS_BRG7  0x00100000   /* Receive FCC2 Clock Source is BRG7 */
+#define CMXFCR_RF2CS_BRG8  0x00180000   /* Receive FCC2 Clock Source is BRG8 */
+#define CMXFCR_RF2CS_CLK13 0x00200000   /* Receive FCC2 Clock Source is CLK13 */
+#define CMXFCR_RF2CS_CLK14 0x00280000   /* Receive FCC2 Clock Source is CLK14 */
+#define CMXFCR_RF2CS_CLK15 0x00300000   /* Receive FCC2 Clock Source is CLK15 */
+#define CMXFCR_RF2CS_CLK16 0x00380000   /* Receive FCC2 Clock Source is CLK16 */
+
+#define CMXFCR_TF2CS_BRG5  0x00000000   /* Transmit FCC2 Clock Source is BRG5 */
+#define CMXFCR_TF2CS_BRG6  0x00010000   /* Transmit FCC2 Clock Source is BRG6 */
+#define CMXFCR_TF2CS_BRG7  0x00020000   /* Transmit FCC2 Clock Source is BRG7 */
+#define CMXFCR_TF2CS_BRG8  0x00030000   /* Transmit FCC2 Clock Source is BRG8 */
+#define CMXFCR_TF2CS_CLK13 0x00040000   /* Transmit FCC2 Clock Source is CLK13 */
+#define CMXFCR_TF2CS_CLK14 0x00050000   /* Transmit FCC2 Clock Source is CLK14 */
+#define CMXFCR_TF2CS_CLK15 0x00060000   /* Transmit FCC2 Clock Source is CLK15 */
+#define CMXFCR_TF2CS_CLK16 0x00070000   /* Transmit FCC2 Clock Source is CLK16 */
+
+#define CMXFCR_RF3CS_BRG5  0x00000000   /* Receive FCC3 Clock Source is BRG5 */
+#define CMXFCR_RF3CS_BRG6  0x00000800   /* Receive FCC3 Clock Source is BRG6 */
+#define CMXFCR_RF3CS_BRG7  0x00001000   /* Receive FCC3 Clock Source is BRG7 */
+#define CMXFCR_RF3CS_BRG8  0x00001800   /* Receive FCC3 Clock Source is BRG8 */
+#define CMXFCR_RF3CS_CLK13 0x00002000   /* Receive FCC3 Clock Source is CLK13 */
+#define CMXFCR_RF3CS_CLK14 0x00002800   /* Receive FCC3 Clock Source is CLK14 */
+#define CMXFCR_RF3CS_CLK15 0x00003000   /* Receive FCC3 Clock Source is CLK15 */
+#define CMXFCR_RF3CS_CLK16 0x00003800   /* Receive FCC3 Clock Source is CLK16 */
+
+#define CMXFCR_TF3CS_BRG5  0x00000000   /* Transmit FCC3 Clock Source is BRG5 */
+#define CMXFCR_TF3CS_BRG6  0x00000100   /* Transmit FCC3 Clock Source is BRG6 */
+#define CMXFCR_TF3CS_BRG7  0x00000200   /* Transmit FCC3 Clock Source is BRG7 */
+#define CMXFCR_TF3CS_BRG8  0x00000300   /* Transmit FCC3 Clock Source is BRG8 */
+#define CMXFCR_TF3CS_CLK13 0x00000400   /* Transmit FCC3 Clock Source is CLK13 */
+#define CMXFCR_TF3CS_CLK14 0x00000500   /* Transmit FCC3 Clock Source is CLK14 */
+#define CMXFCR_TF3CS_CLK15 0x00000600   /* Transmit FCC3 Clock Source is CLK15 */
+#define CMXFCR_TF3CS_CLK16 0x00000700   /* Transmit FCC3 Clock Source is CLK16 */
+
+/*-----------------------------------------------------------------------
+ * CMXSCR - CMX SCC Clock Route Register
+ */
+#define CMXSCR_GR1         0x80000000   /* Grant Support of SCC1        */
+#define CMXSCR_SC1         0x40000000   /* SCC1 connection              */
+#define CMXSCR_RS1CS_MSK   0x38000000   /* Receive SCC1 Clock Source Mask */
+#define CMXSCR_TS1CS_MSK   0x07000000   /* Transmit SCC1 Clock Source Mask */
+#define CMXSCR_GR2         0x00800000   /* Grant Support of SCC2        */
+#define CMXSCR_SC2         0x00400000   /* SCC2 connection              */
+#define CMXSCR_RS2CS_MSK   0x00380000   /* Receive SCC2 Clock Source Mask */
+#define CMXSCR_TS2CS_MSK   0x00070000   /* Transmit SCC2 Clock Source Mask */
+#define CMXSCR_GR3         0x00008000   /* Grant Support of SCC3        */
+#define CMXSCR_SC3         0x00004000   /* SCC3 connection              */
+#define CMXSCR_RS3CS_MSK   0x00003800   /* Receive SCC3 Clock Source Mask */
+#define CMXSCR_TS3CS_MSK   0x00000700   /* Transmit SCC3 Clock Source Mask */
+#define CMXSCR_GR4         0x00000080   /* Grant Support of SCC4        */
+#define CMXSCR_SC4         0x00000040   /* SCC4 connection              */
+#define CMXSCR_RS4CS_MSK   0x00000038   /* Receive SCC4 Clock Source Mask */
+#define CMXSCR_TS4CS_MSK   0x00000007   /* Transmit SCC4 Clock Source Mask */
+
+#define CMXSCR_RS1CS_BRG1  0x00000000   /* SCC1 Rx Clock Source is BRG1 */
+#define CMXSCR_RS1CS_BRG2  0x08000000   /* SCC1 Rx Clock Source is BRG2 */
+#define CMXSCR_RS1CS_BRG3  0x10000000   /* SCC1 Rx Clock Source is BRG3 */
+#define CMXSCR_RS1CS_BRG4  0x18000000   /* SCC1 Rx Clock Source is BRG4 */
+#define CMXSCR_RS1CS_CLK11 0x20000000   /* SCC1 Rx Clock Source is CLK11 */
+#define CMXSCR_RS1CS_CLK12 0x28000000   /* SCC1 Rx Clock Source is CLK12 */
+#define CMXSCR_RS1CS_CLK3  0x30000000   /* SCC1 Rx Clock Source is CLK3 */
+#define CMXSCR_RS1CS_CLK4  0x38000000   /* SCC1 Rx Clock Source is CLK4 */
+
+#define CMXSCR_TS1CS_BRG1  0x00000000   /* SCC1 Tx Clock Source is BRG1 */
+#define CMXSCR_TS1CS_BRG2  0x01000000   /* SCC1 Tx Clock Source is BRG2 */
+#define CMXSCR_TS1CS_BRG3  0x02000000   /* SCC1 Tx Clock Source is BRG3 */
+#define CMXSCR_TS1CS_BRG4  0x03000000   /* SCC1 Tx Clock Source is BRG4 */
+#define CMXSCR_TS1CS_CLK11 0x04000000   /* SCC1 Tx Clock Source is CLK11 */
+#define CMXSCR_TS1CS_CLK12 0x05000000   /* SCC1 Tx Clock Source is CLK12 */
+#define CMXSCR_TS1CS_CLK3  0x06000000   /* SCC1 Tx Clock Source is CLK3 */
+#define CMXSCR_TS1CS_CLK4  0x07000000   /* SCC1 Tx Clock Source is CLK4 */
+
+#define CMXSCR_RS2CS_BRG1  0x00000000   /* SCC2 Rx Clock Source is BRG1 */
+#define CMXSCR_RS2CS_BRG2  0x00080000   /* SCC2 Rx Clock Source is BRG2 */
+#define CMXSCR_RS2CS_BRG3  0x00100000   /* SCC2 Rx Clock Source is BRG3 */
+#define CMXSCR_RS2CS_BRG4  0x00180000   /* SCC2 Rx Clock Source is BRG4 */
+#define CMXSCR_RS2CS_CLK11 0x00200000   /* SCC2 Rx Clock Source is CLK11 */
+#define CMXSCR_RS2CS_CLK12 0x00280000   /* SCC2 Rx Clock Source is CLK12 */
+#define CMXSCR_RS2CS_CLK3  0x00300000   /* SCC2 Rx Clock Source is CLK3 */
+#define CMXSCR_RS2CS_CLK4  0x00380000   /* SCC2 Rx Clock Source is CLK4 */
+
+#define CMXSCR_TS2CS_BRG1  0x00000000   /* SCC2 Tx Clock Source is BRG1 */
+#define CMXSCR_TS2CS_BRG2  0x00010000   /* SCC2 Tx Clock Source is BRG2 */
+#define CMXSCR_TS2CS_BRG3  0x00020000   /* SCC2 Tx Clock Source is BRG3 */
+#define CMXSCR_TS2CS_BRG4  0x00030000   /* SCC2 Tx Clock Source is BRG4 */
+#define CMXSCR_TS2CS_CLK11 0x00040000   /* SCC2 Tx Clock Source is CLK11 */
+#define CMXSCR_TS2CS_CLK12 0x00050000   /* SCC2 Tx Clock Source is CLK12 */
+#define CMXSCR_TS2CS_CLK3  0x00060000   /* SCC2 Tx Clock Source is CLK3 */
+#define CMXSCR_TS2CS_CLK4  0x00070000   /* SCC2 Tx Clock Source is CLK4 */
+
+#define CMXSCR_RS3CS_BRG1  0x00000000   /* SCC3 Rx Clock Source is BRG1 */
+#define CMXSCR_RS3CS_BRG2  0x00000800   /* SCC3 Rx Clock Source is BRG2 */
+#define CMXSCR_RS3CS_BRG3  0x00001000   /* SCC3 Rx Clock Source is BRG3 */
+#define CMXSCR_RS3CS_BRG4  0x00001800   /* SCC3 Rx Clock Source is BRG4 */
+#define CMXSCR_RS3CS_CLK5  0x00002000   /* SCC3 Rx Clock Source is CLK5 */
+#define CMXSCR_RS3CS_CLK6  0x00002800   /* SCC3 Rx Clock Source is CLK6 */
+#define CMXSCR_RS3CS_CLK7  0x00003000   /* SCC3 Rx Clock Source is CLK7 */
+#define CMXSCR_RS3CS_CLK8  0x00003800   /* SCC3 Rx Clock Source is CLK8 */
+
+#define CMXSCR_TS3CS_BRG1  0x00000000   /* SCC3 Tx Clock Source is BRG1 */
+#define CMXSCR_TS3CS_BRG2  0x00000100   /* SCC3 Tx Clock Source is BRG2 */
+#define CMXSCR_TS3CS_BRG3  0x00000200   /* SCC3 Tx Clock Source is BRG3 */
+#define CMXSCR_TS3CS_BRG4  0x00000300   /* SCC3 Tx Clock Source is BRG4 */
+#define CMXSCR_TS3CS_CLK5  0x00000400   /* SCC3 Tx Clock Source is CLK5 */
+#define CMXSCR_TS3CS_CLK6  0x00000500   /* SCC3 Tx Clock Source is CLK6 */
+#define CMXSCR_TS3CS_CLK7  0x00000600   /* SCC3 Tx Clock Source is CLK7 */
+#define CMXSCR_TS3CS_CLK8  0x00000700   /* SCC3 Tx Clock Source is CLK8 */
+
+#define CMXSCR_RS4CS_BRG1  0x00000000   /* SCC4 Rx Clock Source is BRG1 */
+#define CMXSCR_RS4CS_BRG2  0x00000008   /* SCC4 Rx Clock Source is BRG2 */
+#define CMXSCR_RS4CS_BRG3  0x00000010   /* SCC4 Rx Clock Source is BRG3 */
+#define CMXSCR_RS4CS_BRG4  0x00000018   /* SCC4 Rx Clock Source is BRG4 */
+#define CMXSCR_RS4CS_CLK5  0x00000020   /* SCC4 Rx Clock Source is CLK5 */
+#define CMXSCR_RS4CS_CLK6  0x00000028   /* SCC4 Rx Clock Source is CLK6 */
+#define CMXSCR_RS4CS_CLK7  0x00000030   /* SCC4 Rx Clock Source is CLK7 */
+#define CMXSCR_RS4CS_CLK8  0x00000038   /* SCC4 Rx Clock Source is CLK8 */
+
+#define CMXSCR_TS4CS_BRG1  0x00000000   /* SCC4 Tx Clock Source is BRG1 */
+#define CMXSCR_TS4CS_BRG2  0x00000001   /* SCC4 Tx Clock Source is BRG2 */
+#define CMXSCR_TS4CS_BRG3  0x00000002   /* SCC4 Tx Clock Source is BRG3 */
+#define CMXSCR_TS4CS_BRG4  0x00000003   /* SCC4 Tx Clock Source is BRG4 */
+#define CMXSCR_TS4CS_CLK5  0x00000004   /* SCC4 Tx Clock Source is CLK5 */
+#define CMXSCR_TS4CS_CLK6  0x00000005   /* SCC4 Tx Clock Source is CLK6 */
+#define CMXSCR_TS4CS_CLK7  0x00000006   /* SCC4 Tx Clock Source is CLK7 */
+#define CMXSCR_TS4CS_CLK8  0x00000007   /* SCC4 Tx Clock Source is CLK8 */
+
+/*-----------------------------------------------------------------------
+ * SIUMCR - SIU Module Configuration Register				 4-31
+ */
+#define SIUMCR_BBD	0x80000000	/* Bus Busy Disable		*/
+#define SIUMCR_ESE	0x40000000	/* External Snoop Enable	*/
+#define SIUMCR_PBSE	0x20000000	/* Parity Byte Select Enable	*/
+#define SIUMCR_CDIS	0x10000000	/* Core Disable			*/
+#define SIUMCR_DPPC00	0x00000000	/* Data Parity Pins Configuration*/
+#define SIUMCR_DPPC01	0x04000000	/* - " -			*/
+#define SIUMCR_DPPC10	0x08000000	/* - " -			*/
+#define SIUMCR_DPPC11	0x0c000000	/* - " -			*/
+#define SIUMCR_L2CPC00	0x00000000	/* L2 Cache Pins Configuration	*/
+#define SIUMCR_L2CPC01	0x01000000	/* - " -			*/
+#define SIUMCR_L2CPC10	0x02000000	/* - " -			*/
+#define SIUMCR_L2CPC11	0x03000000	/* - " -			*/
+#define SIUMCR_LBPC00	0x00000000	/* Local Bus Pins Configuration	*/
+#define SIUMCR_LBPC01	0x00400000	/* - " -			*/
+#define SIUMCR_LBPC10	0x00800000	/* - " -			*/
+#define SIUMCR_LBPC11	0x00c00000	/* - " -			*/
+#define SIUMCR_APPC00	0x00000000	/* Address Parity Pins Configuration*/
+#define SIUMCR_APPC01	0x00100000	/* - " -			*/
+#define SIUMCR_APPC10	0x00200000	/* - " -			*/
+#define SIUMCR_APPC11	0x00300000	/* - " -			*/
+#define SIUMCR_CS10PC00	0x00000000	/* CS10 Pin Configuration	*/
+#define SIUMCR_CS10PC01	0x00040000	/* - " -			*/
+#define SIUMCR_CS10PC10	0x00080000	/* - " -			*/
+#define SIUMCR_CS10PC11	0x000c0000	/* - " -			*/
+#define SIUMCR_BCTLC00	0x00000000	/* Buffer Control Configuration	*/
+#define SIUMCR_BCTLC01	0x00010000	/* - " -			*/
+#define SIUMCR_BCTLC10	0x00020000	/* - " -			*/
+#define SIUMCR_BCTLC11	0x00030000	/* - " -			*/
+#define SIUMCR_MMR00	0x00000000	/* Mask Masters Requests	*/
+#define SIUMCR_MMR01	0x00004000	/* - " -			*/
+#define SIUMCR_MMR10	0x00008000	/* - " -			*/
+#define SIUMCR_MMR11	0x0000c000	/* - " -			*/
+#define SIUMCR_LPBSE	0x00002000	/* LocalBus Parity Byte Select Enable*/
+
+/*-----------------------------------------------------------------------
+ * SCCR - System Clock Control Register					 9-8
+*/
+#define SCCR_PCI_MODE	0x00000100	/* PCI Mode	*/
+#define SCCR_PCI_MODCK	0x00000080	/* Value of PCI_MODCK pin	*/
+#define SCCR_PCIDF_MSK	0x00000078	/* PCI division factor	*/
+#define SCCR_PCIDF_SHIFT 3
+
+#ifndef CPM_IMMR_OFFSET
+#define CPM_IMMR_OFFSET	0x101a8
+#endif
+
+#define FCC_PSMR_RMII	((uint)0x00020000)	/* Use RMII interface */
+
+/* FCC iop & clock configuration. BSP code is responsible to define Fx_RXCLK & Fx_TXCLK
+ * in order to use clock-computing stuff below for the FCC x
+ */
+
+/* Automatically generates register configurations */
+#define PC_CLK(x)	((uint)(1<<(x-1)))	/* FCC CLK I/O ports */
+
+#define CMXFCR_RF1CS(x)	((uint)((x-5)<<27))	/* FCC1 Receive Clock Source */
+#define CMXFCR_TF1CS(x)	((uint)((x-5)<<24))	/* FCC1 Transmit Clock Source */
+#define CMXFCR_RF2CS(x)	((uint)((x-9)<<19))	/* FCC2 Receive Clock Source */
+#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16))	/* FCC2 Transmit Clock Source */
+#define CMXFCR_RF3CS(x)	((uint)((x-9)<<11))	/* FCC3 Receive Clock Source */
+#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8))	/* FCC3 Transmit Clock Source */
+
+#define PC_F1RXCLK	PC_CLK(F1_RXCLK)
+#define PC_F1TXCLK	PC_CLK(F1_TXCLK)
+#define CMX1_CLK_ROUTE	(CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK))
+#define CMX1_CLK_MASK	((uint)0xff000000)
+
+#define PC_F2RXCLK	PC_CLK(F2_RXCLK)
+#define PC_F2TXCLK	PC_CLK(F2_TXCLK)
+#define CMX2_CLK_ROUTE	(CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK))
+#define CMX2_CLK_MASK	((uint)0x00ff0000)
+
+#define PC_F3RXCLK	PC_CLK(F3_RXCLK)
+#define PC_F3TXCLK	PC_CLK(F3_TXCLK)
+#define CMX3_CLK_ROUTE	(CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK))
+#define CMX3_CLK_MASK	((uint)0x0000ff00)
+
+#define CPMUX_CLK_MASK (CMX3_CLK_MASK | CMX2_CLK_MASK)
+#define CPMUX_CLK_ROUTE (CMX3_CLK_ROUTE | CMX2_CLK_ROUTE)
+
+#define CLK_TRX (PC_F3TXCLK | PC_F3RXCLK | PC_F2TXCLK | PC_F2RXCLK)
+
+/* I/O Pin assignment for FCC1.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PA1_COL		0x00000001U
+#define PA1_CRS		0x00000002U
+#define PA1_TXER	0x00000004U
+#define PA1_TXEN	0x00000008U
+#define PA1_RXDV	0x00000010U
+#define PA1_RXER	0x00000020U
+#define PA1_TXDAT	0x00003c00U
+#define PA1_RXDAT	0x0003c000U
+#define PA1_PSORA0	(PA1_RXDAT | PA1_TXDAT)
+#define PA1_PSORA1	(PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
+		PA1_RXDV | PA1_RXER)
+#define PA1_DIRA0	(PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
+#define PA1_DIRA1	(PA1_TXDAT | PA1_TXEN | PA1_TXER)
+
+
+/* I/O Pin assignment for FCC2.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB2_TXER	0x00000001U
+#define PB2_RXDV	0x00000002U
+#define PB2_TXEN	0x00000004U
+#define PB2_RXER	0x00000008U
+#define PB2_COL		0x00000010U
+#define PB2_CRS		0x00000020U
+#define PB2_TXDAT	0x000003c0U
+#define PB2_RXDAT	0x00003c00U
+#define PB2_PSORB0	(PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
+		PB2_RXER | PB2_RXDV | PB2_TXER)
+#define PB2_PSORB1	(PB2_TXEN)
+#define PB2_DIRB0	(PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
+#define PB2_DIRB1	(PB2_TXDAT | PB2_TXEN | PB2_TXER)
+
+
+/* I/O Pin assignment for FCC3.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB3_RXDV	0x00004000U
+#define PB3_RXER	0x00008000U
+#define PB3_TXER	0x00010000U
+#define PB3_TXEN	0x00020000U
+#define PB3_COL		0x00040000U
+#define PB3_CRS		0x00080000U
+#define PB3_TXDAT	0x0f000000U
+#define PC3_TXDAT	0x00000010U
+#define PB3_RXDAT	0x00f00000U
+#define PB3_PSORB0	(PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
+		PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
+#define PB3_PSORB1	0
+#define PB3_DIRB0	(PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
+#define PB3_DIRB1	(PB3_TXDAT | PB3_TXEN | PB3_TXER)
+#define PC3_DIRC1	(PC3_TXDAT)
+
+/* Handy macro to specify mem for FCCs*/
+#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128))
+#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0)
+#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1)
+#define FCC3_MEM_OFFSET FCC_MEM_OFFSET(2)
+
+/* Clocks and GRG's */
+
+enum cpm_clk_dir {
+	CPM_CLK_RX,
+	CPM_CLK_TX,
+	CPM_CLK_RTX
+};
+
+enum cpm_clk_target {
+	CPM_CLK_SCC1,
+	CPM_CLK_SCC2,
+	CPM_CLK_SCC3,
+	CPM_CLK_SCC4,
+	CPM_CLK_FCC1,
+	CPM_CLK_FCC2,
+	CPM_CLK_FCC3,
+	CPM_CLK_SMC1,
+	CPM_CLK_SMC2,
+};
+
+enum cpm_clk {
+	CPM_CLK_NONE = 0,
+	CPM_BRG1,	/* Baud Rate Generator  1 */
+	CPM_BRG2,	/* Baud Rate Generator  2 */
+	CPM_BRG3,	/* Baud Rate Generator  3 */
+	CPM_BRG4,	/* Baud Rate Generator  4 */
+	CPM_BRG5,	/* Baud Rate Generator  5 */
+	CPM_BRG6,	/* Baud Rate Generator  6 */
+	CPM_BRG7,	/* Baud Rate Generator  7 */
+	CPM_BRG8,	/* Baud Rate Generator  8 */
+	CPM_CLK1,	/* Clock  1 */
+	CPM_CLK2,	/* Clock  2 */
+	CPM_CLK3,	/* Clock  3 */
+	CPM_CLK4,	/* Clock  4 */
+	CPM_CLK5,	/* Clock  5 */
+	CPM_CLK6,	/* Clock  6 */
+	CPM_CLK7,	/* Clock  7 */
+	CPM_CLK8,	/* Clock  8 */
+	CPM_CLK9,	/* Clock  9 */
+	CPM_CLK10,	/* Clock 10 */
+	CPM_CLK11,	/* Clock 11 */
+	CPM_CLK12,	/* Clock 12 */
+	CPM_CLK13,	/* Clock 13 */
+	CPM_CLK14,	/* Clock 14 */
+	CPM_CLK15,	/* Clock 15 */
+	CPM_CLK16,	/* Clock 16 */
+	CPM_CLK17,	/* Clock 17 */
+	CPM_CLK18,	/* Clock 18 */
+	CPM_CLK19,	/* Clock 19 */
+	CPM_CLK20,	/* Clock 20 */
+	CPM_CLK_DUMMY
+};
+
+extern int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
+extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
+
+#define CPM_PIN_INPUT     0
+#define CPM_PIN_OUTPUT    1
+#define CPM_PIN_PRIMARY   0
+#define CPM_PIN_SECONDARY 2
+#define CPM_PIN_GPIO      4
+#define CPM_PIN_OPENDRAIN 8
+
+void cpm2_set_pin(int port, int pin, int flags);
+
+#endif /* __CPM2__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
new file mode 100644
index 0000000..7897d16
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H
+#define __ASM_POWERPC_CPU_HAS_FEATURE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bug.h>
+#include <asm/cputable.h>
+
+static inline bool early_cpu_has_feature(unsigned long feature)
+{
+	return !!((CPU_FTRS_ALWAYS & feature) ||
+		  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
+}
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_CPU_FTR_KEYS	BITS_PER_LONG
+
+extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
+
+static __always_inline bool cpu_has_feature(unsigned long feature)
+{
+	int i;
+
+#ifndef __clang__ /* clang can't cope with this */
+	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+	if (!static_key_initialized) {
+		printk("Warning! cpu_has_feature() used prior to jump label init!\n");
+		dump_stack();
+		return early_cpu_has_feature(feature);
+	}
+#endif
+
+	if (CPU_FTRS_ALWAYS & feature)
+		return true;
+
+	if (!(CPU_FTRS_POSSIBLE & feature))
+		return false;
+
+	i = __builtin_ctzl(feature);
+	return static_branch_likely(&cpu_feature_keys[i]);
+}
+#else
+static inline bool cpu_has_feature(unsigned long feature)
+{
+	return early_cpu_has_feature(feature);
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */
diff --git a/arch/powerpc/include/asm/cpufeature.h b/arch/powerpc/include/asm/cpufeature.h
new file mode 100644
index 0000000..19e6290
--- /dev/null
+++ b/arch/powerpc/include/asm/cpufeature.h
@@ -0,0 +1,40 @@
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see asm/cputable.h for powerpc CPU features.
+ *
+ * Copyright 2016 Alastair D'Silva, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_POWERPC_CPUFEATURE_H
+#define __ASM_POWERPC_CPUFEATURE_H
+
+#include <asm/cputable.h>
+
+/* Keep these in step with powerpc/include/asm/cputable.h */
+#define MAX_CPU_FEATURES (2 * 32)
+
+/*
+ * Currently we don't have a need for any of the feature bits defined in
+ * cpu_user_features. When we do, they should be defined such as:
+ *
+ * #define PPC_MODULE_FEATURE_32 (ilog2(PPC_FEATURE_32))
+ */
+
+#define PPC_MODULE_FEATURE_VEC_CRYPTO			(32 + ilog2(PPC_FEATURE2_VEC_CRYPTO))
+
+#define cpu_feature(x)		(x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+	if (num < 32)
+		return !!(cur_cpu_spec->cpu_user_features & 1UL << num);
+	else
+		return !!(cur_cpu_spec->cpu_user_features2 & 1UL << (num - 32));
+}
+
+#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
new file mode 100644
index 0000000..43e5f31
--- /dev/null
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CPUIDLE_H
+#define _ASM_POWERPC_CPUIDLE_H
+
+#ifdef CONFIG_PPC_POWERNV
+/* Thread state used in powernv idle state management */
+#define PNV_THREAD_RUNNING              0
+#define PNV_THREAD_NAP                  1
+#define PNV_THREAD_SLEEP                2
+#define PNV_THREAD_WINKLE               3
+
+/*
+ * Core state used in powernv idle for POWER8.
+ *
+ * The lock bit synchronizes updates to the state, as well as parts of the
+ * sleep/wake code (see kernel/idle_book3s.S).
+ *
+ * Bottom 8 bits track the idle state of each thread. Bit is cleared before
+ * the thread executes an idle instruction (nap/sleep/winkle).
+ *
+ * Then there is winkle tracking. A core does not lose complete state
+ * until every thread is in winkle. So the winkle count field counts the
+ * number of threads in winkle (small window of false positives is okay
+ * around the sleep/wake, so long as there are no false negatives).
+ *
+ * When the winkle count reaches 8 (the COUNT_ALL_BIT becomes set), then
+ * the THREAD_WINKLE_BITS are set, which indicate which threads have not
+ * yet woken from the winkle state.
+ */
+#define PNV_CORE_IDLE_LOCK_BIT			0x10000000
+
+#define PNV_CORE_IDLE_WINKLE_COUNT		0x00010000
+#define PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT	0x00080000
+#define PNV_CORE_IDLE_WINKLE_COUNT_BITS		0x000F0000
+#define PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT	8
+#define PNV_CORE_IDLE_THREAD_WINKLE_BITS	0x0000FF00
+
+#define PNV_CORE_IDLE_THREAD_BITS       	0x000000FF
+
+/*
+ * ============================ NOTE =================================
+ * The older firmware populates only the RL field in the psscr_val and
+ * sets the psscr_mask to 0xf. On such a firmware, the kernel sets the
+ * remaining PSSCR fields to default values as follows:
+ *
+ * - ESL and EC bits are to 1. So wakeup from any stop state will be
+ *   at vector 0x100.
+ *
+ * - MTL and PSLL are set to the maximum allowed value as per the ISA,
+ *    i.e. 15.
+ *
+ * - The Transition Rate, TR is set to the Maximum value 3.
+ */
+#define PSSCR_HV_DEFAULT_VAL    (PSSCR_ESL | PSSCR_EC |		    \
+				PSSCR_PSLL_MASK | PSSCR_TR_MASK |   \
+				PSSCR_MTL_MASK)
+
+#define PSSCR_HV_DEFAULT_MASK   (PSSCR_ESL | PSSCR_EC |		    \
+				PSSCR_PSLL_MASK | PSSCR_TR_MASK |   \
+				PSSCR_MTL_MASK | PSSCR_RL_MASK)
+#define PSSCR_EC_SHIFT    20
+#define PSSCR_ESL_SHIFT   21
+#define GET_PSSCR_EC(x)   (((x) & PSSCR_EC) >> PSSCR_EC_SHIFT)
+#define GET_PSSCR_ESL(x)  (((x) & PSSCR_ESL) >> PSSCR_ESL_SHIFT)
+#define GET_PSSCR_RL(x)   ((x) & PSSCR_RL_MASK)
+
+#define ERR_EC_ESL_MISMATCH		-1
+#define ERR_DEEP_STATE_ESL_MISMATCH	-2
+
+#ifndef __ASSEMBLY__
+/* Additional SPRs that need to be saved/restored during stop */
+struct stop_sprs {
+	u64 pid;
+	u64 ldbar;
+	u64 fscr;
+	u64 hfscr;
+	u64 mmcr1;
+	u64 mmcr2;
+	u64 mmcra;
+};
+
+#define PNV_IDLE_NAME_LEN    16
+struct pnv_idle_states_t {
+	char name[PNV_IDLE_NAME_LEN];
+	u32 latency_ns;
+	u32 residency_ns;
+	u64 psscr_val;
+	u64 psscr_mask;
+	u32 flags;
+	bool valid;
+};
+
+extern struct pnv_idle_states_t *pnv_idle_states;
+extern int nr_pnv_idle_states;
+extern u32 pnv_fastsleep_workaround_at_entry[];
+extern u32 pnv_fastsleep_workaround_at_exit[];
+
+extern u64 pnv_first_deep_stop_state;
+
+unsigned long pnv_cpu_offline(unsigned int cpu);
+int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
+static inline void report_invalid_psscr_val(u64 psscr_val, int err)
+{
+	switch (err) {
+	case ERR_EC_ESL_MISMATCH:
+		pr_warn("Invalid psscr 0x%016llx : ESL,EC bits unequal",
+			psscr_val);
+		break;
+	case ERR_DEEP_STATE_ESL_MISMATCH:
+		pr_warn("Invalid psscr 0x%016llx : ESL cleared for deep stop-state",
+			psscr_val);
+	}
+}
+#endif
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
new file mode 100644
index 0000000..29f49a3
--- /dev/null
+++ b/arch/powerpc/include/asm/cputable.h
@@ -0,0 +1,616 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_CPUTABLE_H
+#define __ASM_POWERPC_CPUTABLE_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/cputable.h>
+#include <asm/asm-const.h>
+
+#ifndef __ASSEMBLY__
+
+/* This structure can grow, it's real size is used by head.S code
+ * via the mkdefs mechanism.
+ */
+struct cpu_spec;
+
+typedef	void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
+typedef	void (*cpu_restore_t)(void);
+
+enum powerpc_oprofile_type {
+	PPC_OPROFILE_INVALID = 0,
+	PPC_OPROFILE_RS64 = 1,
+	PPC_OPROFILE_POWER4 = 2,
+	PPC_OPROFILE_G4 = 3,
+	PPC_OPROFILE_FSL_EMB = 4,
+	PPC_OPROFILE_CELL = 5,
+	PPC_OPROFILE_PA6T = 6,
+};
+
+enum powerpc_pmc_type {
+	PPC_PMC_DEFAULT = 0,
+	PPC_PMC_IBM = 1,
+	PPC_PMC_PA6T = 2,
+	PPC_PMC_G4 = 3,
+};
+
+struct pt_regs;
+
+extern int machine_check_generic(struct pt_regs *regs);
+extern int machine_check_4xx(struct pt_regs *regs);
+extern int machine_check_440A(struct pt_regs *regs);
+extern int machine_check_e500mc(struct pt_regs *regs);
+extern int machine_check_e500(struct pt_regs *regs);
+extern int machine_check_e200(struct pt_regs *regs);
+extern int machine_check_47x(struct pt_regs *regs);
+int machine_check_8xx(struct pt_regs *regs);
+
+extern void cpu_down_flush_e500v2(void);
+extern void cpu_down_flush_e500mc(void);
+extern void cpu_down_flush_e5500(void);
+extern void cpu_down_flush_e6500(void);
+
+/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
+struct cpu_spec {
+	/* CPU is matched via (PVR & pvr_mask) == pvr_value */
+	unsigned int	pvr_mask;
+	unsigned int	pvr_value;
+
+	char		*cpu_name;
+	unsigned long	cpu_features;		/* Kernel features */
+	unsigned int	cpu_user_features;	/* Userland features */
+	unsigned int	cpu_user_features2;	/* Userland features v2 */
+	unsigned int	mmu_features;		/* MMU features */
+
+	/* cache line sizes */
+	unsigned int	icache_bsize;
+	unsigned int	dcache_bsize;
+
+	/* flush caches inside the current cpu */
+	void (*cpu_down_flush)(void);
+
+	/* number of performance monitor counters */
+	unsigned int	num_pmcs;
+	enum powerpc_pmc_type pmc_type;
+
+	/* this is called to initialize various CPU bits like L1 cache,
+	 * BHT, SPD, etc... from head.S before branching to identify_machine
+	 */
+	cpu_setup_t	cpu_setup;
+	/* Used to restore cpu setup on secondary processors and at resume */
+	cpu_restore_t	cpu_restore;
+
+	/* Used by oprofile userspace to select the right counters */
+	char		*oprofile_cpu_type;
+
+	/* Processor specific oprofile operations */
+	enum powerpc_oprofile_type oprofile_type;
+
+	/* Bit locations inside the mmcra change */
+	unsigned long	oprofile_mmcra_sihv;
+	unsigned long	oprofile_mmcra_sipr;
+
+	/* Bits to clear during an oprofile exception */
+	unsigned long	oprofile_mmcra_clear;
+
+	/* Name of processor class, for the ELF AT_PLATFORM entry */
+	char		*platform;
+
+	/* Processor specific machine check handling. Return negative
+	 * if the error is fatal, 1 if it was fully recovered and 0 to
+	 * pass up (not CPU originated) */
+	int		(*machine_check)(struct pt_regs *regs);
+
+	/*
+	 * Processor specific early machine check handler which is
+	 * called in real mode to handle SLB and TLB errors.
+	 */
+	long		(*machine_check_early)(struct pt_regs *regs);
+};
+
+extern struct cpu_spec		*cur_cpu_spec;
+
+extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
+
+extern void set_cur_cpu_spec(struct cpu_spec *s);
+extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
+extern void identify_cpu_name(unsigned int pvr);
+extern void do_feature_fixups(unsigned long value, void *fixup_start,
+			      void *fixup_end);
+
+extern const char *powerpc_base_platform;
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+extern void cpu_feature_keys_init(void);
+#else
+static inline void cpu_feature_keys_init(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/* CPU kernel features */
+
+/* Definitions for features that we have on both 32-bit and 64-bit chips */
+#define CPU_FTR_COHERENT_ICACHE		ASM_CONST(0x00000001)
+#define CPU_FTR_ALTIVEC			ASM_CONST(0x00000002)
+#define CPU_FTR_DBELL			ASM_CONST(0x00000004)
+#define CPU_FTR_CAN_NAP			ASM_CONST(0x00000008)
+#define CPU_FTR_DEBUG_LVL_EXC		ASM_CONST(0x00000010)
+#define CPU_FTR_NODSISRALIGN		ASM_CONST(0x00000020)
+#define CPU_FTR_FPU_UNAVAILABLE		ASM_CONST(0x00000040)
+#define CPU_FTR_LWSYNC			ASM_CONST(0x00000080)
+#define CPU_FTR_NOEXECUTE		ASM_CONST(0x00000100)
+#define CPU_FTR_EMB_HV			ASM_CONST(0x00000200)
+
+/* Definitions for features that only exist on 32-bit chips */
+#ifdef CONFIG_PPC32
+#define CPU_FTR_601			ASM_CONST(0x00001000)
+#define CPU_FTR_L2CR			ASM_CONST(0x00002000)
+#define CPU_FTR_SPEC7450		ASM_CONST(0x00004000)
+#define CPU_FTR_TAU			ASM_CONST(0x00008000)
+#define CPU_FTR_CAN_DOZE		ASM_CONST(0x00010000)
+#define CPU_FTR_USE_RTC			ASM_CONST(0x00020000)
+#define CPU_FTR_L3CR			ASM_CONST(0x00040000)
+#define CPU_FTR_L3_DISABLE_NAP		ASM_CONST(0x00080000)
+#define CPU_FTR_NAP_DISABLE_L2_PR	ASM_CONST(0x00100000)
+#define CPU_FTR_DUAL_PLL_750FX		ASM_CONST(0x00200000)
+#define CPU_FTR_NO_DPM			ASM_CONST(0x00400000)
+#define CPU_FTR_476_DD2			ASM_CONST(0x00800000)
+#define CPU_FTR_NEED_COHERENT		ASM_CONST(0x01000000)
+#define CPU_FTR_NO_BTIC			ASM_CONST(0x02000000)
+#define CPU_FTR_PPC_LE			ASM_CONST(0x04000000)
+#define CPU_FTR_UNIFIED_ID_CACHE	ASM_CONST(0x08000000)
+#define CPU_FTR_SPE			ASM_CONST(0x10000000)
+#define CPU_FTR_NEED_PAIRED_STWCX	ASM_CONST(0x20000000)
+#define CPU_FTR_INDEXED_DCR		ASM_CONST(0x40000000)
+
+#else	/* CONFIG_PPC32 */
+/* Define these to 0 for the sake of tests in common code */
+#define CPU_FTR_601			(0)
+#define CPU_FTR_PPC_LE			(0)
+#endif
+
+/*
+ * Definitions for the 64-bit processor unique features;
+ * on 32-bit, make the names available but defined to be 0.
+ */
+#ifdef __powerpc64__
+#define LONG_ASM_CONST(x)		ASM_CONST(x)
+#else
+#define LONG_ASM_CONST(x)		0
+#endif
+
+#define CPU_FTR_REAL_LE			LONG_ASM_CONST(0x0000000000001000)
+#define CPU_FTR_HVMODE			LONG_ASM_CONST(0x0000000000002000)
+#define CPU_FTR_ARCH_206		LONG_ASM_CONST(0x0000000000008000)
+#define CPU_FTR_ARCH_207S		LONG_ASM_CONST(0x0000000000010000)
+#define CPU_FTR_ARCH_300		LONG_ASM_CONST(0x0000000000020000)
+#define CPU_FTR_MMCRA			LONG_ASM_CONST(0x0000000000040000)
+#define CPU_FTR_CTRL			LONG_ASM_CONST(0x0000000000080000)
+#define CPU_FTR_SMT			LONG_ASM_CONST(0x0000000000100000)
+#define CPU_FTR_PAUSE_ZERO		LONG_ASM_CONST(0x0000000000200000)
+#define CPU_FTR_PURR			LONG_ASM_CONST(0x0000000000400000)
+#define CPU_FTR_CELL_TB_BUG		LONG_ASM_CONST(0x0000000000800000)
+#define CPU_FTR_SPURR			LONG_ASM_CONST(0x0000000001000000)
+#define CPU_FTR_DSCR			LONG_ASM_CONST(0x0000000002000000)
+#define CPU_FTR_VSX			LONG_ASM_CONST(0x0000000004000000)
+#define CPU_FTR_SAO			LONG_ASM_CONST(0x0000000008000000)
+#define CPU_FTR_CP_USE_DCBTZ		LONG_ASM_CONST(0x0000000010000000)
+#define CPU_FTR_UNALIGNED_LD_STD	LONG_ASM_CONST(0x0000000020000000)
+#define CPU_FTR_ASYM_SMT		LONG_ASM_CONST(0x0000000040000000)
+#define CPU_FTR_STCX_CHECKS_ADDRESS	LONG_ASM_CONST(0x0000000080000000)
+#define CPU_FTR_POPCNTB			LONG_ASM_CONST(0x0000000100000000)
+#define CPU_FTR_POPCNTD			LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_PKEY			LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_VMX_COPY		LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_TM			LONG_ASM_CONST(0x0000001000000000)
+#define CPU_FTR_CFAR			LONG_ASM_CONST(0x0000002000000000)
+#define	CPU_FTR_HAS_PPR			LONG_ASM_CONST(0x0000004000000000)
+#define CPU_FTR_DAWR			LONG_ASM_CONST(0x0000008000000000)
+#define CPU_FTR_DABRX			LONG_ASM_CONST(0x0000010000000000)
+#define CPU_FTR_PMAO_BUG		LONG_ASM_CONST(0x0000020000000000)
+#define CPU_FTR_POWER9_DD2_1		LONG_ASM_CONST(0x0000080000000000)
+#define CPU_FTR_P9_TM_HV_ASSIST		LONG_ASM_CONST(0x0000100000000000)
+#define CPU_FTR_P9_TM_XER_SO_BUG	LONG_ASM_CONST(0x0000200000000000)
+#define CPU_FTR_P9_TLBIE_BUG		LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TIDR			LONG_ASM_CONST(0x0000800000000000)
+
+#ifndef __ASSEMBLY__
+
+#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
+
+#define MMU_FTR_PPCAS_ARCH_V2 	(MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE)
+
+/* We only set the altivec features if the kernel was compiled with altivec
+ * support
+ */
+#ifdef CONFIG_ALTIVEC
+#define CPU_FTR_ALTIVEC_COMP	CPU_FTR_ALTIVEC
+#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
+#else
+#define CPU_FTR_ALTIVEC_COMP	0
+#define PPC_FEATURE_HAS_ALTIVEC_COMP    0
+#endif
+
+/* We only set the VSX features if the kernel was compiled with VSX
+ * support
+ */
+#ifdef CONFIG_VSX
+#define CPU_FTR_VSX_COMP	CPU_FTR_VSX
+#define PPC_FEATURE_HAS_VSX_COMP PPC_FEATURE_HAS_VSX
+#else
+#define CPU_FTR_VSX_COMP	0
+#define PPC_FEATURE_HAS_VSX_COMP    0
+#endif
+
+/* We only set the spe features if the kernel was compiled with spe
+ * support
+ */
+#ifdef CONFIG_SPE
+#define CPU_FTR_SPE_COMP	CPU_FTR_SPE
+#define PPC_FEATURE_HAS_SPE_COMP PPC_FEATURE_HAS_SPE
+#define PPC_FEATURE_HAS_EFP_SINGLE_COMP PPC_FEATURE_HAS_EFP_SINGLE
+#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP PPC_FEATURE_HAS_EFP_DOUBLE
+#else
+#define CPU_FTR_SPE_COMP	0
+#define PPC_FEATURE_HAS_SPE_COMP    0
+#define PPC_FEATURE_HAS_EFP_SINGLE_COMP 0
+#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0
+#endif
+
+/* We only set the TM feature if the kernel was compiled with TM supprt */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define CPU_FTR_TM_COMP			CPU_FTR_TM
+#define PPC_FEATURE2_HTM_COMP		PPC_FEATURE2_HTM
+#define PPC_FEATURE2_HTM_NOSC_COMP	PPC_FEATURE2_HTM_NOSC
+#else
+#define CPU_FTR_TM_COMP			0
+#define PPC_FEATURE2_HTM_COMP		0
+#define PPC_FEATURE2_HTM_NOSC_COMP	0
+#endif
+
+/* We need to mark all pages as being coherent if we're SMP or we have a
+ * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
+ * require it for PCI "streaming/prefetch" to work properly.
+ * This is also required by 52xx family.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \
+	|| defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \
+	|| defined(CONFIG_PPC_MPC52xx)
+#define CPU_FTR_COMMON                  CPU_FTR_NEED_COHERENT
+#else
+#define CPU_FTR_COMMON                  0
+#endif
+
+/* The powersave features NAP & DOZE seems to confuse BDI when
+   debugging. So if a BDI is used, disable theses
+ */
+#ifndef CONFIG_BDI_SWITCH
+#define CPU_FTR_MAYBE_CAN_DOZE	CPU_FTR_CAN_DOZE
+#define CPU_FTR_MAYBE_CAN_NAP	CPU_FTR_CAN_NAP
+#else
+#define CPU_FTR_MAYBE_CAN_DOZE	0
+#define CPU_FTR_MAYBE_CAN_NAP	0
+#endif
+
+#define CPU_FTRS_PPC601	(CPU_FTR_COMMON | CPU_FTR_601 | \
+	CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_USE_RTC)
+#define CPU_FTRS_603	(CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_604	(CPU_FTR_COMMON | CPU_FTR_PPC_LE)
+#define CPU_FTRS_740_NOTAU	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_740	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+	    CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
+	    CPU_FTR_PPC_LE)
+#define CPU_FTRS_750	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+	    CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
+	    CPU_FTR_PPC_LE)
+#define CPU_FTRS_750CL	(CPU_FTRS_750)
+#define CPU_FTRS_750FX1	(CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX2	(CPU_FTRS_750 | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX	(CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
+#define CPU_FTRS_750GX	(CPU_FTRS_750FX)
+#define CPU_FTRS_7400_NOTAU	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+	    CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7400	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
+	    CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7450_20	(CPU_FTR_COMMON | \
+	    CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7450_21	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7450_23	(CPU_FTR_COMMON | \
+	    CPU_FTR_NEED_PAIRED_STWCX | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455_1	(CPU_FTR_COMMON | \
+	    CPU_FTR_NEED_PAIRED_STWCX | \
+	    CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
+	    CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455_20	(CPU_FTR_COMMON | \
+	    CPU_FTR_NEED_PAIRED_STWCX | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447_10	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
+	    CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447A	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7448	(CPU_FTR_COMMON | \
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
+	    CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_82XX	(CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE)
+#define CPU_FTRS_G2_LE	(CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
+	    CPU_FTR_MAYBE_CAN_NAP)
+#define CPU_FTRS_E300	(CPU_FTR_MAYBE_CAN_DOZE | \
+	    CPU_FTR_MAYBE_CAN_NAP | \
+	    CPU_FTR_COMMON)
+#define CPU_FTRS_E300C2	(CPU_FTR_MAYBE_CAN_DOZE | \
+	    CPU_FTR_MAYBE_CAN_NAP | \
+	    CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
+#define CPU_FTRS_CLASSIC32	(CPU_FTR_COMMON)
+#define CPU_FTRS_8XX	(CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_40X	(CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_44X	(CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_440x6	(CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
+	    CPU_FTR_INDEXED_DCR)
+#define CPU_FTRS_47X	(CPU_FTRS_440x6)
+#define CPU_FTRS_E200	(CPU_FTR_SPE_COMP | \
+	    CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
+	    CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
+	    CPU_FTR_DEBUG_LVL_EXC)
+#define CPU_FTRS_E500	(CPU_FTR_MAYBE_CAN_DOZE | \
+	    CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+	    CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_E500_2	(CPU_FTR_MAYBE_CAN_DOZE | \
+	    CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
+	    CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_E500MC	(CPU_FTR_NODSISRALIGN | \
+	    CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+	    CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+/*
+ * e5500/e6500 erratum A-006958 is a timebase bug that can use the
+ * same workaround as CPU_FTR_CELL_TB_BUG.
+ */
+#define CPU_FTRS_E5500	(CPU_FTR_NODSISRALIGN | \
+	    CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+	    CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+	    CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
+#define CPU_FTRS_E6500	(CPU_FTR_NODSISRALIGN | \
+	    CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+	    CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+	    CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
+#define CPU_FTRS_GENERIC_32	(CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
+
+/* 64-bit CPUs */
+#define CPU_FTRS_PPC970	(CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
+	    CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
+	    CPU_FTR_HVMODE | CPU_FTR_DABRX)
+#define CPU_FTRS_POWER5	(CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
+	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
+#define CPU_FTRS_POWER6 (CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
+	    CPU_FTR_COHERENT_ICACHE | \
+	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+	    CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+	    CPU_FTR_DABRX)
+#define CPU_FTRS_POWER7 (CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
+	    CPU_FTR_COHERENT_ICACHE | \
+	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+	    CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+	    CPU_FTR_CFAR | CPU_FTR_HVMODE | \
+	    CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX | CPU_FTR_PKEY)
+#define CPU_FTRS_POWER8 (CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
+	    CPU_FTR_COHERENT_ICACHE | \
+	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+	    CPU_FTR_DSCR | CPU_FTR_SAO  | \
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+	    CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
+	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY)
+#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
+	    CPU_FTR_COHERENT_ICACHE | \
+	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+	    CPU_FTR_DSCR | CPU_FTR_SAO  | \
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+	    CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+	    CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
+	    CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
+#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
+#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
+#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
+			       CPU_FTR_P9_TM_HV_ASSIST | \
+			       CPU_FTR_P9_TM_XER_SO_BUG)
+#define CPU_FTRS_CELL	(CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
+	    CPU_FTR_PAUSE_ZERO  | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
+	    CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
+#define CPU_FTRS_PA6T (CPU_FTR_LWSYNC | \
+	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
+	    CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
+#define CPU_FTRS_COMPATIBLE	(CPU_FTR_PPCAS_ARCH_V2)
+
+#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE	(CPU_FTRS_E6500 | CPU_FTRS_E5500)
+#else
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define CPU_FTRS_POSSIBLE	\
+	    (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
+	     CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \
+	     CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
+#else
+#define CPU_FTRS_POSSIBLE	\
+	    (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
+	     CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
+	     CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
+	     CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \
+	     CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+#endif
+#else
+enum {
+	CPU_FTRS_POSSIBLE =
+#ifdef CONFIG_PPC_BOOK3S_32
+	    CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
+	    CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
+	    CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
+	    CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
+	    CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
+	    CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
+	    CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
+	    CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
+	    CPU_FTRS_CLASSIC32 |
+#else
+	    CPU_FTRS_GENERIC_32 |
+#endif
+#ifdef CONFIG_PPC_8xx
+	    CPU_FTRS_8XX |
+#endif
+#ifdef CONFIG_40x
+	    CPU_FTRS_40X |
+#endif
+#ifdef CONFIG_44x
+	    CPU_FTRS_44X | CPU_FTRS_440x6 |
+#endif
+#ifdef CONFIG_PPC_47x
+	    CPU_FTRS_47X | CPU_FTR_476_DD2 |
+#endif
+#ifdef CONFIG_E200
+	    CPU_FTRS_E200 |
+#endif
+#ifdef CONFIG_E500
+	    CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef CONFIG_PPC_E500MC
+	    CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
+#endif
+	    0,
+};
+#endif /* __powerpc64__ */
+
+#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS		(CPU_FTRS_E6500 & CPU_FTRS_E5500)
+#else
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+#define CPU_FTRS_DT_CPU_BASE			\
+	(CPU_FTR_LWSYNC |			\
+	 CPU_FTR_FPU_UNAVAILABLE |		\
+	 CPU_FTR_NODSISRALIGN |			\
+	 CPU_FTR_NOEXECUTE |			\
+	 CPU_FTR_COHERENT_ICACHE |		\
+	 CPU_FTR_STCX_CHECKS_ADDRESS |		\
+	 CPU_FTR_POPCNTB | CPU_FTR_POPCNTD |	\
+	 CPU_FTR_DAWR |				\
+	 CPU_FTR_ARCH_206 |			\
+	 CPU_FTR_ARCH_207S)
+#else
+#define CPU_FTRS_DT_CPU_BASE	(~0ul)
+#endif
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define CPU_FTRS_ALWAYS \
+	    (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
+	     CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \
+	     CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
+#else
+#define CPU_FTRS_ALWAYS		\
+	    (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
+	     CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
+	     CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
+	     ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \
+	     CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+#endif
+#else
+enum {
+	CPU_FTRS_ALWAYS =
+#ifdef CONFIG_PPC_BOOK3S_32
+	    CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+	    CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
+	    CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
+	    CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
+	    CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
+	    CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
+	    CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
+	    CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
+	    CPU_FTRS_CLASSIC32 &
+#else
+	    CPU_FTRS_GENERIC_32 &
+#endif
+#ifdef CONFIG_PPC_8xx
+	    CPU_FTRS_8XX &
+#endif
+#ifdef CONFIG_40x
+	    CPU_FTRS_40X &
+#endif
+#ifdef CONFIG_44x
+	    CPU_FTRS_44X & CPU_FTRS_440x6 &
+#endif
+#ifdef CONFIG_E200
+	    CPU_FTRS_E200 &
+#endif
+#ifdef CONFIG_E500
+	    CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef CONFIG_PPC_E500MC
+	    CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
+#endif
+	    ~CPU_FTR_EMB_HV &	/* can be removed at runtime */
+	    CPU_FTRS_POSSIBLE,
+};
+#endif /* __powerpc64__ */
+
+#define HBP_NUM 1
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
new file mode 100644
index 0000000..d71a909
--- /dev/null
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CPUTHREADS_H
+#define _ASM_POWERPC_CPUTHREADS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+#include <asm/cpu_has_feature.h>
+
+/*
+ * Mapping of threads to cores
+ *
+ * Note: This implementation is limited to a power of 2 number of
+ * threads per core and the same number for each core in the system
+ * (though it would work if some processors had less threads as long
+ * as the CPU numbers are still allocated, just not brought online).
+ *
+ * However, the API allows for a different implementation in the future
+ * if needed, as long as you only use the functions and not the variables
+ * directly.
+ */
+
+#ifdef CONFIG_SMP
+extern int threads_per_core;
+extern int threads_per_subcore;
+extern int threads_shift;
+extern cpumask_t threads_core_mask;
+#else
+#define threads_per_core	1
+#define threads_per_subcore	1
+#define threads_shift		0
+#define threads_core_mask	(*get_cpu_mask(0))
+#endif
+
+/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
+ *                            hit by the argument
+ *
+ * @threads:	a cpumask of online threads
+ *
+ * This function returns a cpumask which will have one online cpu's
+ * bit set for each core that has at least one thread set in the argument.
+ *
+ * This can typically be used for things like IPI for tlb invalidations
+ * since those need to be done only once per core/TLB
+ */
+static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
+{
+	cpumask_t	tmp, res;
+	int		i, cpu;
+
+	cpumask_clear(&res);
+	for (i = 0; i < NR_CPUS; i += threads_per_core) {
+		cpumask_shift_left(&tmp, &threads_core_mask, i);
+		if (cpumask_intersects(threads, &tmp)) {
+			cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
+			if (cpu < nr_cpu_ids)
+				cpumask_set_cpu(cpu, &res);
+		}
+	}
+	return res;
+}
+
+static inline int cpu_nr_cores(void)
+{
+	return nr_cpu_ids >> threads_shift;
+}
+
+static inline cpumask_t cpu_online_cores_map(void)
+{
+	return cpu_thread_mask_to_cores(cpu_online_mask);
+}
+
+#ifdef CONFIG_SMP
+int cpu_core_index_of_thread(int cpu);
+int cpu_first_thread_of_core(int core);
+#else
+static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
+static inline int cpu_first_thread_of_core(int core) { return core; }
+#endif
+
+static inline int cpu_thread_in_core(int cpu)
+{
+	return cpu & (threads_per_core - 1);
+}
+
+static inline int cpu_thread_in_subcore(int cpu)
+{
+	return cpu & (threads_per_subcore - 1);
+}
+
+static inline int cpu_first_thread_sibling(int cpu)
+{
+	return cpu & ~(threads_per_core - 1);
+}
+
+static inline int cpu_last_thread_sibling(int cpu)
+{
+	return cpu | (threads_per_core - 1);
+}
+
+static inline u32 get_tensr(void)
+{
+#ifdef	CONFIG_BOOKE
+	if (cpu_has_feature(CPU_FTR_SMT))
+		return mfspr(SPRN_TENSR);
+#endif
+	return 1;
+}
+
+void book3e_start_thread(int thread, unsigned long addr);
+void book3e_stop_thread(int thread);
+
+#endif /* __ASSEMBLY__ */
+
+#define INVALID_THREAD_HWID	0x0fff
+
+#endif /* _ASM_POWERPC_CPUTHREADS_H */
+
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
new file mode 100644
index 0000000..1336727
--- /dev/null
+++ b/arch/powerpc/include/asm/cputime.h
@@ -0,0 +1,70 @@
+/*
+ * Definitions for measuring cputime on powerpc machines.
+ *
+ * Copyright (C) 2006 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
+ * the same units as the timebase.  Otherwise we measure cpu time
+ * in jiffies using the generic definitions.
+ */
+
+#ifndef __POWERPC_CPUTIME_H
+#define __POWERPC_CPUTIME_H
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+#include <asm/param.h>
+
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
+
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
+#ifdef __KERNEL__
+/*
+ * Convert cputime <-> microseconds
+ */
+extern u64 __cputime_usec_factor;
+
+static inline unsigned long cputime_to_usecs(const cputime_t ct)
+{
+	return mulhdu((__force u64) ct, __cputime_usec_factor);
+}
+
+/*
+ * PPC64 uses PACA which is task independent for storing accounting data while
+ * PPC32 uses struct thread_info, therefore at task switch the accounting data
+ * has to be populated in the new task
+ */
+#ifdef CONFIG_PPC64
+#define get_accounting(tsk)	(&get_paca()->accounting)
+static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+#else
+#define get_accounting(tsk)	(&task_thread_info(tsk)->accounting)
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+static inline void arch_vtime_task_switch(struct task_struct *prev)
+{
+	struct cpu_accounting_data *acct = get_accounting(current);
+	struct cpu_accounting_data *acct0 = get_accounting(prev);
+
+	acct->starttime = acct0->starttime;
+	acct->startspurr = acct0->startspurr;
+}
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/current.h b/arch/powerpc/include/asm/current.h
new file mode 100644
index 0000000..e2c7f06
--- /dev/null
+++ b/arch/powerpc/include/asm/current.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_CURRENT_H
+#define _ASM_POWERPC_CURRENT_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct task_struct;
+
+#ifdef __powerpc64__
+#include <linux/stddef.h>
+#include <asm/paca.h>
+
+static inline struct task_struct *get_current(void)
+{
+	struct task_struct *task;
+
+	__asm__ __volatile__("ld %0,%1(13)"
+	: "=r" (task)
+	: "i" (offsetof(struct paca_struct, __current)));
+
+	return task;
+}
+#define current	get_current()
+
+#else
+
+/*
+ * We keep `current' in r2 for speed.
+ */
+register struct task_struct *current asm ("r2");
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CURRENT_H */
diff --git a/arch/powerpc/include/asm/dbdma.h b/arch/powerpc/include/asm/dbdma.h
new file mode 100644
index 0000000..4785c17
--- /dev/null
+++ b/arch/powerpc/include/asm/dbdma.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for using the Apple Descriptor-Based DMA controller
+ * in Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+
+#ifdef __KERNEL__
+#ifndef _ASM_DBDMA_H_
+#define _ASM_DBDMA_H_
+/*
+ * DBDMA control/status registers.  All little-endian.
+ */
+struct dbdma_regs {
+    unsigned int control;	/* lets you change bits in status */
+    unsigned int status;	/* DMA and device status bits (see below) */
+    unsigned int cmdptr_hi;	/* upper 32 bits of command address */
+    unsigned int cmdptr;	/* (lower 32 bits of) command address (phys) */
+    unsigned int intr_sel;	/* select interrupt condition bit */
+    unsigned int br_sel;	/* select branch condition bit */
+    unsigned int wait_sel;	/* select wait condition bit */
+    unsigned int xfer_mode;
+    unsigned int data2ptr_hi;
+    unsigned int data2ptr;
+    unsigned int res1;
+    unsigned int address_hi;
+    unsigned int br_addr_hi;
+    unsigned int res2[3];
+};
+
+/* Bits in control and status registers */
+#define RUN	0x8000
+#define PAUSE	0x4000
+#define FLUSH	0x2000
+#define WAKE	0x1000
+#define DEAD	0x0800
+#define ACTIVE	0x0400
+#define BT	0x0100
+#define DEVSTAT	0x00ff
+
+/*
+ * DBDMA command structure.  These fields are all little-endian!
+ */
+struct dbdma_cmd {
+	__le16 req_count;	/* requested byte transfer count */
+	__le16 command;		/* command word (has bit-fields) */
+	__le32 phy_addr;	/* physical data address */
+	__le32 cmd_dep;		/* command-dependent field */
+	__le16 res_count;	/* residual count after completion */
+	__le16 xfer_status;	/* transfer status */
+};
+
+/* DBDMA command values in command field */
+#define OUTPUT_MORE	0	/* transfer memory data to stream */
+#define OUTPUT_LAST	0x1000	/* ditto followed by end marker */
+#define INPUT_MORE	0x2000	/* transfer stream data to memory */
+#define INPUT_LAST	0x3000	/* ditto, expect end marker */
+#define STORE_WORD	0x4000	/* write word (4 bytes) to device reg */
+#define LOAD_WORD	0x5000	/* read word (4 bytes) from device reg */
+#define DBDMA_NOP	0x6000	/* do nothing */
+#define DBDMA_STOP	0x7000	/* suspend processing */
+
+/* Key values in command field */
+#define KEY_STREAM0	0	/* usual data stream */
+#define KEY_STREAM1	0x100	/* control/status stream */
+#define KEY_STREAM2	0x200	/* device-dependent stream */
+#define KEY_STREAM3	0x300	/* device-dependent stream */
+#define KEY_REGS	0x500	/* device register space */
+#define KEY_SYSTEM	0x600	/* system memory-mapped space */
+#define KEY_DEVICE	0x700	/* device memory-mapped space */
+
+/* Interrupt control values in command field */
+#define INTR_NEVER	0	/* don't interrupt */
+#define INTR_IFSET	0x10	/* intr if condition bit is 1 */
+#define INTR_IFCLR	0x20	/* intr if condition bit is 0 */
+#define INTR_ALWAYS	0x30	/* always interrupt */
+
+/* Branch control values in command field */
+#define BR_NEVER	0	/* don't branch */
+#define BR_IFSET	0x4	/* branch if condition bit is 1 */
+#define BR_IFCLR	0x8	/* branch if condition bit is 0 */
+#define BR_ALWAYS	0xc	/* always branch */
+
+/* Wait control values in command field */
+#define WAIT_NEVER	0	/* don't wait */
+#define WAIT_IFSET	1	/* wait if condition bit is 1 */
+#define WAIT_IFCLR	2	/* wait if condition bit is 0 */
+#define WAIT_ALWAYS	3	/* always wait */
+
+/* Align an address for a DBDMA command structure */
+#define DBDMA_ALIGN(x)	(((unsigned long)(x) + sizeof(struct dbdma_cmd) - 1) \
+			 & -sizeof(struct dbdma_cmd))
+
+/* Useful macros */
+#define DBDMA_DO_STOP(regs) do {				\
+	out_le32(&((regs)->control), (RUN|FLUSH)<<16);		\
+	while(in_le32(&((regs)->status)) & (ACTIVE|FLUSH))	\
+		; \
+} while(0)
+
+#define DBDMA_DO_RESET(regs) do {				\
+	out_le32(&((regs)->control), (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);\
+	while(in_le32(&((regs)->status)) & (RUN)) \
+		; \
+} while(0)
+
+#endif /* _ASM_DBDMA_H_ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
new file mode 100644
index 0000000..99b84db
--- /dev/null
+++ b/arch/powerpc/include/asm/dbell.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * provides masks and opcode images for use by code generation, emulation
+ * and for instructions that older assemblers might not know about
+ */
+#ifndef _ASM_POWERPC_DBELL_H
+#define _ASM_POWERPC_DBELL_H
+
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/ppc-opcode.h>
+#include <asm/feature-fixups.h>
+
+#define PPC_DBELL_MSG_BRDCAST	(0x04000000)
+#define PPC_DBELL_TYPE(x)	(((x) & 0xf) << (63-36))
+#define PPC_DBELL_TYPE_MASK	PPC_DBELL_TYPE(0xf)
+#define PPC_DBELL_LPID(x)	((x) << (63 - 49))
+#define PPC_DBELL_PIR_MASK	0x3fff
+enum ppc_dbell {
+	PPC_DBELL = 0,		/* doorbell */
+	PPC_DBELL_CRIT = 1,	/* critical doorbell */
+	PPC_G_DBELL = 2,	/* guest doorbell */
+	PPC_G_DBELL_CRIT = 3,	/* guest critical doorbell */
+	PPC_G_DBELL_MC = 4,	/* guest mcheck doorbell */
+	PPC_DBELL_SERVER = 5,	/* doorbell on server */
+};
+
+#ifdef CONFIG_PPC_BOOK3S
+
+#define PPC_DBELL_MSGTYPE		PPC_DBELL_SERVER
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+	__asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGSND(%1), PPC_MSGSNDP(%1), %0)
+				: : "i" (CPU_FTR_HVMODE), "r" (msg));
+}
+
+/* sync before sending message */
+static inline void ppc_msgsnd_sync(void)
+{
+	__asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* sync after taking message interrupt */
+static inline void ppc_msgsync(void)
+{
+	/* sync is not required when taking messages from the same core */
+	__asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGSYNC " ; lwsync", "", %0)
+				: : "i" (CPU_FTR_HVMODE|CPU_FTR_ARCH_300));
+}
+
+static inline void _ppc_msgclr(u32 msg)
+{
+	__asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGCLR(%1), PPC_MSGCLRP(%1), %0)
+				: : "i" (CPU_FTR_HVMODE), "r" (msg));
+}
+
+static inline void ppc_msgclr(enum ppc_dbell type)
+{
+	u32 msg = PPC_DBELL_TYPE(type);
+
+	_ppc_msgclr(msg);
+}
+
+#else /* CONFIG_PPC_BOOK3S */
+
+#define PPC_DBELL_MSGTYPE		PPC_DBELL
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+	__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+}
+
+/* sync before sending message */
+static inline void ppc_msgsnd_sync(void)
+{
+	__asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* sync after taking message interrupt */
+static inline void ppc_msgsync(void)
+{
+}
+
+#endif /* CONFIG_PPC_BOOK3S */
+
+extern void doorbell_global_ipi(int cpu);
+extern void doorbell_core_ipi(int cpu);
+extern int doorbell_try_core_ipi(int cpu);
+extern void doorbell_exception(struct pt_regs *regs);
+
+static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
+{
+	u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) |
+			(tag & 0x07ffffff);
+
+	_ppc_msgsnd(msg);
+}
+
+#endif /* _ASM_POWERPC_DBELL_H */
diff --git a/arch/powerpc/include/asm/dcr-generic.h b/arch/powerpc/include/asm/dcr-generic.h
new file mode 100644
index 0000000..35b7159
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-generic.h
@@ -0,0 +1,49 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_GENERIC_H
+#define _ASM_POWERPC_DCR_GENERIC_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID};
+
+typedef struct {
+	enum host_type_t type;
+	union {
+		dcr_host_mmio_t mmio;
+		dcr_host_native_t native;
+	} host;
+} dcr_host_t;
+
+extern bool dcr_map_ok_generic(dcr_host_t host);
+
+extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n,
+			  unsigned int dcr_c);
+extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c);
+
+extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n);
+
+extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_GENERIC_H */
+
+
diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h
new file mode 100644
index 0000000..93a68b2
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-mmio.h
@@ -0,0 +1,57 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_MMIO_H
+#define _ASM_POWERPC_DCR_MMIO_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+typedef struct {
+	void __iomem *token;
+	unsigned int stride;
+	unsigned int base;
+} dcr_host_mmio_t;
+
+static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host)
+{
+	return host.token != NULL;
+}
+
+extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
+				    unsigned int dcr_n,
+				    unsigned int dcr_c);
+extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c);
+
+static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n)
+{
+	return in_be32(host.token + ((host.base + dcr_n) * host.stride));
+}
+
+static inline void dcr_write_mmio(dcr_host_mmio_t host,
+				  unsigned int dcr_n,
+				  u32 value)
+{
+	out_be32(host.token + ((host.base + dcr_n) * host.stride), value);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_MMIO_H */
+
+
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
new file mode 100644
index 0000000..151dff5
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -0,0 +1,157 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_NATIVE_H
+#define _ASM_POWERPC_DCR_NATIVE_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/spinlock.h>
+#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
+#include <linux/stringify.h>
+
+typedef struct {
+	unsigned int base;
+} dcr_host_native_t;
+
+static inline bool dcr_map_ok_native(dcr_host_native_t host)
+{
+	return true;
+}
+
+#define dcr_map_native(dev, dcr_n, dcr_c) \
+	((dcr_host_native_t){ .base = (dcr_n) })
+#define dcr_unmap_native(host, dcr_c)		do {} while (0)
+#define dcr_read_native(host, dcr_n)		mfdcr(dcr_n + host.base)
+#define dcr_write_native(host, dcr_n, value)	mtdcr(dcr_n + host.base, value)
+
+/* Table based DCR accessors */
+extern void __mtdcr(unsigned int reg, unsigned int val);
+extern unsigned int __mfdcr(unsigned int reg);
+
+/* mfdcrx/mtdcrx instruction based accessors. We hand code
+ * the opcodes in order not to depend on newer binutils
+ */
+static inline unsigned int mfdcrx(unsigned int reg)
+{
+	unsigned int ret;
+	asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
+		     : "=r" (ret) : "r" (reg));
+	return ret;
+}
+
+static inline void mtdcrx(unsigned int reg, unsigned int val)
+{
+	asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
+		     : : "r" (val), "r" (reg));
+}
+
+#define mfdcr(rn)						\
+	({unsigned int rval;					\
+	if (__builtin_constant_p(rn) && rn < 1024)		\
+		asm volatile("mfdcr %0," __stringify(rn)	\
+		              : "=r" (rval));			\
+	else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))	\
+		rval = mfdcrx(rn);				\
+	else							\
+		rval = __mfdcr(rn);				\
+	rval;})
+
+#define mtdcr(rn, v)						\
+do {								\
+	if (__builtin_constant_p(rn) && rn < 1024)		\
+		asm volatile("mtdcr " __stringify(rn) ",%0"	\
+			      : : "r" (v)); 			\
+	else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))	\
+		mtdcrx(rn, v);					\
+	else							\
+		__mtdcr(rn, v);					\
+} while (0)
+
+/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
+extern spinlock_t dcr_ind_lock;
+
+static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
+{
+	unsigned long flags;
+	unsigned int val;
+
+	spin_lock_irqsave(&dcr_ind_lock, flags);
+	if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+		mtdcrx(base_addr, reg);
+		val = mfdcrx(base_data);
+	} else {
+		__mtdcr(base_addr, reg);
+		val = __mfdcr(base_data);
+	}
+	spin_unlock_irqrestore(&dcr_ind_lock, flags);
+	return val;
+}
+
+static inline void __mtdcri(int base_addr, int base_data, int reg,
+			    unsigned val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcr_ind_lock, flags);
+	if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+		mtdcrx(base_addr, reg);
+		mtdcrx(base_data, val);
+	} else {
+		__mtdcr(base_addr, reg);
+		__mtdcr(base_data, val);
+	}
+	spin_unlock_irqrestore(&dcr_ind_lock, flags);
+}
+
+static inline void __dcri_clrset(int base_addr, int base_data, int reg,
+				 unsigned clr, unsigned set)
+{
+	unsigned long flags;
+	unsigned int val;
+
+	spin_lock_irqsave(&dcr_ind_lock, flags);
+	if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+		mtdcrx(base_addr, reg);
+		val = (mfdcrx(base_data) & ~clr) | set;
+		mtdcrx(base_data, val);
+	} else {
+		__mtdcr(base_addr, reg);
+		val = (__mfdcr(base_data) & ~clr) | set;
+		__mtdcr(base_data, val);
+	}
+	spin_unlock_irqrestore(&dcr_ind_lock, flags);
+}
+
+#define mfdcri(base, reg)	__mfdcri(DCRN_ ## base ## _CONFIG_ADDR,	\
+					 DCRN_ ## base ## _CONFIG_DATA,	\
+					 reg)
+
+#define mtdcri(base, reg, data)	__mtdcri(DCRN_ ## base ## _CONFIG_ADDR,	\
+					 DCRN_ ## base ## _CONFIG_DATA,	\
+					 reg, data)
+
+#define dcri_clrset(base, reg, clr, set)	__dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR,	\
+							      DCRN_ ## base ## _CONFIG_DATA,	\
+							      reg, clr, set)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_NATIVE_H */
diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h
new file mode 100644
index 0000000..5c1a497
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-regs.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common DCR / SDR / CPR register definitions used on various IBM/AMCC
+ * 4xx processors
+ *
+ *    Copyright 2007 Benjamin Herrenschmidt, IBM Corp
+ *                   <benh@kernel.crashing.org>
+ *
+ * Mostly lifted from asm-ppc/ibm4xx.h by
+ *
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *
+ */
+
+#ifndef __DCR_REGS_H__
+#define __DCR_REGS_H__
+
+/*
+ * Most DCRs used for controlling devices such as the MAL, DMA engine,
+ * etc... are obtained for the device tree.
+ *
+ * The definitions in this files are fixed DCRs and indirect DCRs that
+ * are commonly used outside of specific drivers or refer to core
+ * common registers that may occasionally have to be tweaked outside
+ * of the driver main register set
+ */
+
+/* CPRs (440GX and 440SP/440SPe) */
+#define DCRN_CPR0_CONFIG_ADDR	0xc
+#define DCRN_CPR0_CONFIG_DATA	0xd
+
+/* SDRs (440GX and 440SP/440SPe) */
+#define DCRN_SDR0_CONFIG_ADDR 	0xe
+#define DCRN_SDR0_CONFIG_DATA	0xf
+
+#define SDR0_PFC0		0x4100
+#define SDR0_PFC1		0x4101
+#define SDR0_PFC1_EPS		0x1c00000
+#define SDR0_PFC1_EPS_SHIFT	22
+#define SDR0_PFC1_RMII		0x02000000
+#define SDR0_MFR		0x4300
+#define SDR0_MFR_TAH0 		0x80000000  	/* TAHOE0 Enable */
+#define SDR0_MFR_TAH1 		0x40000000  	/* TAHOE1 Enable */
+#define SDR0_MFR_PCM  		0x10000000  	/* PPC440GP irq compat mode */
+#define SDR0_MFR_ECS  		0x08000000  	/* EMAC int clk */
+#define SDR0_MFR_T0TXFL		0x00080000
+#define SDR0_MFR_T0TXFH		0x00040000
+#define SDR0_MFR_T1TXFL		0x00020000
+#define SDR0_MFR_T1TXFH		0x00010000
+#define SDR0_MFR_E0TXFL		0x00008000
+#define SDR0_MFR_E0TXFH		0x00004000
+#define SDR0_MFR_E0RXFL		0x00002000
+#define SDR0_MFR_E0RXFH		0x00001000
+#define SDR0_MFR_E1TXFL		0x00000800
+#define SDR0_MFR_E1TXFH		0x00000400
+#define SDR0_MFR_E1RXFL		0x00000200
+#define SDR0_MFR_E1RXFH		0x00000100
+#define SDR0_MFR_E2TXFL		0x00000080
+#define SDR0_MFR_E2TXFH		0x00000040
+#define SDR0_MFR_E2RXFL		0x00000020
+#define SDR0_MFR_E2RXFH		0x00000010
+#define SDR0_MFR_E3TXFL		0x00000008
+#define SDR0_MFR_E3TXFH		0x00000004
+#define SDR0_MFR_E3RXFL		0x00000002
+#define SDR0_MFR_E3RXFH		0x00000001
+#define SDR0_UART0		0x0120
+#define SDR0_UART1		0x0121
+#define SDR0_UART2		0x0122
+#define SDR0_UART3		0x0123
+#define SDR0_CUST0		0x4000
+
+/* SDR for 405EZ */
+#define DCRN_SDR_ICINTSTAT	0x4510
+#define ICINTSTAT_ICRX	0x80000000
+#define ICINTSTAT_ICTX0	0x40000000
+#define ICINTSTAT_ICTX1 0x20000000
+#define ICINTSTAT_ICTX	0x60000000
+
+/* SDRs (460EX/460GT) */
+#define SDR0_ETH_CFG		0x4103
+#define SDR0_ETH_CFG_ECS	0x00000100	/* EMAC int clk source */
+
+/*
+ * All those DCR register addresses are offsets from the base address
+ * for the SRAM0 controller (e.g. 0x20 on 440GX). The base address is
+ * excluded here and configured in the device tree.
+ */
+#define DCRN_SRAM0_SB0CR	0x00
+#define DCRN_SRAM0_SB1CR	0x01
+#define DCRN_SRAM0_SB2CR	0x02
+#define DCRN_SRAM0_SB3CR	0x03
+#define  SRAM_SBCR_BU_MASK	0x00000180
+#define  SRAM_SBCR_BS_64KB	0x00000800
+#define  SRAM_SBCR_BU_RO	0x00000080
+#define  SRAM_SBCR_BU_RW	0x00000180
+#define DCRN_SRAM0_BEAR		0x04
+#define DCRN_SRAM0_BESR0	0x05
+#define DCRN_SRAM0_BESR1	0x06
+#define DCRN_SRAM0_PMEG		0x07
+#define DCRN_SRAM0_CID		0x08
+#define DCRN_SRAM0_REVID	0x09
+#define DCRN_SRAM0_DPC		0x0a
+#define  SRAM_DPC_ENABLE	0x80000000
+
+/*
+ * All those DCR register addresses are offsets from the base address
+ * for the SRAM0 controller (e.g. 0x30 on 440GX). The base address is
+ * excluded here and configured in the device tree.
+ */
+#define DCRN_L2C0_CFG		0x00
+#define  L2C_CFG_L2M		0x80000000
+#define  L2C_CFG_ICU		0x40000000
+#define  L2C_CFG_DCU		0x20000000
+#define  L2C_CFG_DCW_MASK	0x1e000000
+#define  L2C_CFG_TPC		0x01000000
+#define  L2C_CFG_CPC		0x00800000
+#define  L2C_CFG_FRAN		0x00200000
+#define  L2C_CFG_SS_MASK	0x00180000
+#define  L2C_CFG_SS_256		0x00000000
+#define  L2C_CFG_CPIM		0x00040000
+#define  L2C_CFG_TPIM		0x00020000
+#define  L2C_CFG_LIM		0x00010000
+#define  L2C_CFG_PMUX_MASK	0x00007000
+#define  L2C_CFG_PMUX_SNP	0x00000000
+#define  L2C_CFG_PMUX_IF	0x00001000
+#define  L2C_CFG_PMUX_DF	0x00002000
+#define  L2C_CFG_PMUX_DS	0x00003000
+#define  L2C_CFG_PMIM		0x00000800
+#define  L2C_CFG_TPEI		0x00000400
+#define  L2C_CFG_CPEI		0x00000200
+#define  L2C_CFG_NAM		0x00000100
+#define  L2C_CFG_SMCM		0x00000080
+#define  L2C_CFG_NBRM		0x00000040
+#define  L2C_CFG_RDBW		0x00000008	/* only 460EX/GT */
+#define DCRN_L2C0_CMD		0x01
+#define  L2C_CMD_CLR		0x80000000
+#define  L2C_CMD_DIAG		0x40000000
+#define  L2C_CMD_INV		0x20000000
+#define  L2C_CMD_CCP		0x10000000
+#define  L2C_CMD_CTE		0x08000000
+#define  L2C_CMD_STRC		0x04000000
+#define  L2C_CMD_STPC		0x02000000
+#define  L2C_CMD_RPMC		0x01000000
+#define  L2C_CMD_HCC		0x00800000
+#define DCRN_L2C0_ADDR		0x02
+#define DCRN_L2C0_DATA		0x03
+#define DCRN_L2C0_SR		0x04
+#define  L2C_SR_CC		0x80000000
+#define  L2C_SR_CPE		0x40000000
+#define  L2C_SR_TPE		0x20000000
+#define  L2C_SR_LRU		0x10000000
+#define  L2C_SR_PCS		0x08000000
+#define DCRN_L2C0_REVID		0x05
+#define DCRN_L2C0_SNP0		0x06
+#define DCRN_L2C0_SNP1		0x07
+#define  L2C_SNP_BA_MASK	0xffff0000
+#define  L2C_SNP_SSR_MASK	0x0000f000
+#define  L2C_SNP_SSR_32G	0x0000f000
+#define  L2C_SNP_ESR		0x00000800
+
+/*
+ * DCR register offsets for 440SP/440SPe I2O/DMA controller.
+ * The base address is configured in the device tree.
+ */
+#define DCRN_I2O0_IBAL		0x006
+#define DCRN_I2O0_IBAH		0x007
+#define I2O_REG_ENABLE		0x00000001	/* Enable I2O/DMA access */
+
+/* 440SP/440SPe Software Reset DCR */
+#define DCRN_SDR0_SRST		0x0200
+#define DCRN_SDR0_SRST_I2ODMA	(0x80000000 >> 15)	/* Reset I2O/DMA */
+
+/* 440SP/440SPe Memory Queue DCR offsets */
+#define DCRN_MQ0_XORBA		0x04
+#define DCRN_MQ0_CF2H		0x06
+#define DCRN_MQ0_CFBHL		0x0f
+#define DCRN_MQ0_BAUH		0x10
+
+/* HB/LL Paths Configuration Register */
+#define MQ0_CFBHL_TPLM		28
+#define MQ0_CFBHL_HBCL		23
+#define MQ0_CFBHL_POLY		15
+
+#endif /* __DCR_REGS_H__ */
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
new file mode 100644
index 0000000..9d6851c
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr.h
@@ -0,0 +1,78 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_H
+#define _ASM_POWERPC_DCR_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_DCR
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+#include <asm/dcr-native.h>
+#endif
+
+#ifdef CONFIG_PPC_DCR_MMIO
+#include <asm/dcr-mmio.h>
+#endif
+
+
+/* Indirection layer for providing both NATIVE and MMIO support. */
+
+#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
+
+#include <asm/dcr-generic.h>
+
+#define DCR_MAP_OK(host)	dcr_map_ok_generic(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value)
+
+#else
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+typedef dcr_host_native_t dcr_host_t;
+#define DCR_MAP_OK(host)	dcr_map_ok_native(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value)
+#else
+typedef dcr_host_mmio_t dcr_host_t;
+#define DCR_MAP_OK(host)	dcr_map_ok_mmio(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value)
+#endif
+
+#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
+
+/*
+ * additional helpers to read the DCR * base from the device-tree
+ */
+struct device_node;
+extern unsigned int dcr_resource_start(const struct device_node *np,
+				       unsigned int index);
+extern unsigned int dcr_resource_len(const struct device_node *np,
+				     unsigned int index);
+#endif /* CONFIG_PPC_DCR */
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_H */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
new file mode 100644
index 0000000..7756026
--- /dev/null
+++ b/arch/powerpc/include/asm/debug.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_DEBUG_H
+#define _ASM_POWERPC_DEBUG_H
+
+#include <asm/hw_breakpoint.h>
+
+struct pt_regs;
+
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
+
+extern int (*__debugger)(struct pt_regs *regs);
+extern int (*__debugger_ipi)(struct pt_regs *regs);
+extern int (*__debugger_bpt)(struct pt_regs *regs);
+extern int (*__debugger_sstep)(struct pt_regs *regs);
+extern int (*__debugger_iabr_match)(struct pt_regs *regs);
+extern int (*__debugger_break_match)(struct pt_regs *regs);
+extern int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+#define DEBUGGER_BOILERPLATE(__NAME) \
+static inline int __NAME(struct pt_regs *regs) \
+{ \
+	if (unlikely(__ ## __NAME)) \
+		return __ ## __NAME(regs); \
+	return 0; \
+}
+
+DEBUGGER_BOILERPLATE(debugger)
+DEBUGGER_BOILERPLATE(debugger_ipi)
+DEBUGGER_BOILERPLATE(debugger_bpt)
+DEBUGGER_BOILERPLATE(debugger_sstep)
+DEBUGGER_BOILERPLATE(debugger_iabr_match)
+DEBUGGER_BOILERPLATE(debugger_break_match)
+DEBUGGER_BOILERPLATE(debugger_fault_handler)
+
+#else
+static inline int debugger(struct pt_regs *regs) { return 0; }
+static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
+static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
+static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
+static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
+#endif
+
+void __set_breakpoint(struct arch_hw_breakpoint *brk);
+bool ppc_breakpoint_available(void);
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+extern void do_send_trap(struct pt_regs *regs, unsigned long address,
+			 unsigned long error_code, int brkpt);
+#else
+
+extern void do_break(struct pt_regs *regs, unsigned long address,
+		     unsigned long error_code);
+#endif
+
+#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/debugfs.h b/arch/powerpc/include/asm/debugfs.h
new file mode 100644
index 0000000..4f3b39f
--- /dev/null
+++ b/arch/powerpc/include/asm/debugfs.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_POWERPC_DEBUGFS_H
+#define _ASM_POWERPC_DEBUGFS_H
+
+/*
+ * Copyright 2017, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/debugfs.h>
+
+extern struct dentry *powerpc_debugfs_root;
+
+#endif /* _ASM_POWERPC_DEBUGFS_H */
diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h
new file mode 100644
index 0000000..3df4417
--- /dev/null
+++ b/arch/powerpc/include/asm/delay.h
@@ -0,0 +1,80 @@
+#ifndef _ASM_POWERPC_DELAY_H
+#define _ASM_POWERPC_DELAY_H
+#ifdef __KERNEL__
+
+#include <linux/processor.h>
+#include <asm/time.h>
+
+/*
+ * Copyright 1996, Paul Mackerras.
+ * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
+ * Anton Blanchard.
+ */
+
+extern void __delay(unsigned long loops);
+extern void udelay(unsigned long usecs);
+
+/*
+ * On shared processor machines the generic implementation of mdelay can
+ * result in large errors. While each iteration of the loop inside mdelay
+ * is supposed to take 1ms, the hypervisor could sleep our partition for
+ * longer (eg 10ms). With the right timing these errors can add up.
+ *
+ * Since there is no 32bit overflow issue on 64bit kernels, just call
+ * udelay directly.
+ */
+#ifdef CONFIG_PPC64
+#define mdelay(n)	udelay((n) * 1000)
+#endif
+
+/**
+ * spin_event_timeout - spin until a condition gets true or a timeout elapses
+ * @condition: a C expression to evalate
+ * @timeout: timeout, in microseconds
+ * @delay: the number of microseconds to delay between each evaluation of
+ *         @condition
+ *
+ * The process spins until the condition evaluates to true (non-zero) or the
+ * timeout elapses.  The return value of this macro is the value of
+ * @condition when the loop terminates. This allows you to determine the cause
+ * of the loop terminates.  If the return value is zero, then you know a
+ * timeout has occurred.
+ *
+ * This primary purpose of this macro is to poll on a hardware register
+ * until a status bit changes.  The timeout ensures that the loop still
+ * terminates even if the bit never changes.  The delay is for devices that
+ * need a delay in between successive reads.
+ *
+ * gcc will optimize out the if-statement if @delay is a constant.
+ */
+#define spin_event_timeout(condition, timeout, delay)                          \
+({                                                                             \
+	typeof(condition) __ret;                                               \
+	unsigned long __loops = tb_ticks_per_usec * timeout;                   \
+	unsigned long __start = get_tbl();                                     \
+                                                                               \
+	if (delay) {                                                           \
+		while (!(__ret = (condition)) &&                               \
+				(tb_ticks_since(__start) <= __loops))          \
+			udelay(delay);                                         \
+	} else {                                                               \
+		spin_begin();                                                  \
+		while (!(__ret = (condition)) &&                               \
+				(tb_ticks_since(__start) <= __loops))          \
+			spin_cpu_relax();                                      \
+		spin_end();                                                    \
+	}                                                                      \
+	if (!__ret)                                                            \
+		__ret = (condition);                                           \
+	__ret;		                                                       \
+})
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
new file mode 100644
index 0000000..0245bfc
--- /dev/null
+++ b/arch/powerpc/include/asm/device.h
@@ -0,0 +1,59 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_POWERPC_DEVICE_H
+#define _ASM_POWERPC_DEVICE_H
+
+struct device_node;
+#ifdef CONFIG_PPC64
+struct pci_dn;
+struct iommu_table;
+#endif
+
+/*
+ * Arch extensions to struct device.
+ *
+ * When adding fields, consider macio_add_one_device in
+ * drivers/macintosh/macio_asic.c
+ */
+struct dev_archdata {
+	/*
+	 * These two used to be a union. However, with the hybrid ops we need
+	 * both so here we store both a DMA offset for direct mappings and
+	 * an iommu_table for remapped DMA.
+	 */
+	dma_addr_t		dma_offset;
+
+#ifdef CONFIG_PPC64
+	struct iommu_table	*iommu_table_base;
+#endif
+
+#ifdef CONFIG_IOMMU_API
+	void			*iommu_domain;
+#endif
+#ifdef CONFIG_SWIOTLB
+	dma_addr_t		max_direct_dma_addr;
+#endif
+#ifdef CONFIG_PPC64
+	struct pci_dn		*pci_data;
+#endif
+#ifdef CONFIG_EEH
+	struct eeh_dev		*edev;
+#endif
+#ifdef CONFIG_FAIL_IOMMU
+	int fail_iommu;
+#endif
+#ifdef CONFIG_CXL_BASE
+	struct cxl_context	*cxl_ctx;
+#endif
+};
+
+struct pdev_archdata {
+	u64 dma_mask;
+};
+
+#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
+#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
new file mode 100644
index 0000000..c0a5505
--- /dev/null
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -0,0 +1,128 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __ASM_PPC_DISASSEMBLE_H__
+#define __ASM_PPC_DISASSEMBLE_H__
+
+#include <linux/types.h>
+
+static inline unsigned int get_op(u32 inst)
+{
+	return inst >> 26;
+}
+
+static inline unsigned int get_xop(u32 inst)
+{
+	return (inst >> 1) & 0x3ff;
+}
+
+static inline unsigned int get_sprn(u32 inst)
+{
+	return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_dcrn(u32 inst)
+{
+	return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_tmrn(u32 inst)
+{
+	return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_rt(u32 inst)
+{
+	return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_rs(u32 inst)
+{
+	return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_ra(u32 inst)
+{
+	return (inst >> 16) & 0x1f;
+}
+
+static inline unsigned int get_rb(u32 inst)
+{
+	return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_rc(u32 inst)
+{
+	return inst & 0x1;
+}
+
+static inline unsigned int get_ws(u32 inst)
+{
+	return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_d(u32 inst)
+{
+	return inst & 0xffff;
+}
+
+static inline unsigned int get_oc(u32 inst)
+{
+	return (inst >> 11) & 0x7fff;
+}
+
+static inline unsigned int get_tx_or_sx(u32 inst)
+{
+	return (inst) & 0x1;
+}
+
+#define IS_XFORM(inst)	(get_op(inst)  == 31)
+#define IS_DSFORM(inst)	(get_op(inst) >= 56)
+
+/*
+ * Create a DSISR value from the instruction
+ */
+static inline unsigned make_dsisr(unsigned instr)
+{
+	unsigned dsisr;
+
+
+	/* bits  6:15 --> 22:31 */
+	dsisr = (instr & 0x03ff0000) >> 16;
+
+	if (IS_XFORM(instr)) {
+		/* bits 29:30 --> 15:16 */
+		dsisr |= (instr & 0x00000006) << 14;
+		/* bit     25 -->    17 */
+		dsisr |= (instr & 0x00000040) << 8;
+		/* bits 21:24 --> 18:21 */
+		dsisr |= (instr & 0x00000780) << 3;
+	} else {
+		/* bit      5 -->    17 */
+		dsisr |= (instr & 0x04000000) >> 12;
+		/* bits  1: 4 --> 18:21 */
+		dsisr |= (instr & 0x78000000) >> 17;
+		/* bits 30:31 --> 12:13 */
+		if (IS_DSFORM(instr))
+			dsisr |= (instr & 0x00000003) << 18;
+	}
+
+	return dsisr;
+}
+#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
new file mode 100644
index 0000000..7702875
--- /dev/null
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_POWERPC_DMA_DIRECT_H
+#define ASM_POWERPC_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+#ifdef CONFIG_SWIOTLB
+	struct dev_archdata *sd = &dev->archdata;
+
+	if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
+		return false;
+#endif
+
+	if (!dev->dma_mask)
+		return false;
+
+	return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	return paddr + get_dma_offset(dev);
+}
+
+static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+	return daddr - get_dma_offset(dev);
+}
+#endif /* ASM_POWERPC_DMA_DIRECT_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
new file mode 100644
index 0000000..8fa3945
--- /dev/null
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2004 IBM
+ *
+ * Implements the generic device dma API for powerpc.
+ * the pci and vio busses
+ */
+#ifndef _ASM_DMA_MAPPING_H
+#define _ASM_DMA_MAPPING_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/cache.h>
+/* need struct page definitions */
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <asm/io.h>
+#include <asm/swiotlb.h>
+
+/* Some dma direct funcs must be visible for use in other dma_ops */
+extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
+					 dma_addr_t *dma_handle, gfp_t flag,
+					 unsigned long attrs);
+extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
+				       void *vaddr, dma_addr_t dma_handle,
+				       unsigned long attrs);
+extern int dma_nommu_mmap_coherent(struct device *dev,
+				    struct vm_area_struct *vma,
+				    void *cpu_addr, dma_addr_t handle,
+				    size_t size, unsigned long attrs);
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/*
+ * DMA-consistent mapping functions for PowerPCs that don't support
+ * cache snooping.  These allocate/free a region of uncached mapped
+ * memory space for use with DMA devices.  Alternatively, you could
+ * allocate the space "normally" and use the cache management functions
+ * to ensure it is consistent.
+ */
+struct device;
+extern void *__dma_alloc_coherent(struct device *dev, size_t size,
+				  dma_addr_t *handle, gfp_t gfp);
+extern void __dma_free_coherent(size_t size, void *vaddr);
+extern void __dma_sync(void *vaddr, size_t size, int direction);
+extern void __dma_sync_page(struct page *page, unsigned long offset,
+				 size_t size, int direction);
+extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
+
+#else /* ! CONFIG_NOT_COHERENT_CACHE */
+/*
+ * Cache coherent cores.
+ */
+
+#define __dma_alloc_coherent(dev, gfp, size, handle)	NULL
+#define __dma_free_coherent(size, addr)		((void)0)
+#define __dma_sync(addr, size, rw)		((void)0)
+#define __dma_sync_page(pg, off, sz, rw)	((void)0)
+
+#endif /* ! CONFIG_NOT_COHERENT_CACHE */
+
+static inline unsigned long device_to_mask(struct device *dev)
+{
+	if (dev->dma_mask && *dev->dma_mask)
+		return *dev->dma_mask;
+	/* Assume devices without mask can take 32 bit addresses */
+	return 0xfffffffful;
+}
+
+/*
+ * Available generic sets of operations
+ */
+#ifdef CONFIG_PPC64
+extern struct dma_map_ops dma_iommu_ops;
+#endif
+extern const struct dma_map_ops dma_nommu_ops;
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+	/* We don't handle the NULL dev case for ISA for now. We could
+	 * do it via an out of line call but it is not needed for now. The
+	 * only ISA DMA device we support is the floppy and we have a hack
+	 * in the floppy driver directly to get a device for us.
+	 */
+	return NULL;
+}
+
+/*
+ * get_dma_offset()
+ *
+ * Get the dma offset on configurations where the dma address can be determined
+ * from the physical address by looking at a simple offset.  Direct dma and
+ * swiotlb use this function, but it is typically not used by implementations
+ * with an iommu.
+ */
+static inline dma_addr_t get_dma_offset(struct device *dev)
+{
+	if (dev)
+		return dev->archdata.dma_offset;
+
+	return PCI_DRAM_OFFSET;
+}
+
+static inline void set_dma_offset(struct device *dev, dma_addr_t off)
+{
+	if (dev)
+		dev->archdata.dma_offset = off;
+}
+
+#define HAVE_ARCH_DMA_SET_MASK 1
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+
+extern u64 __dma_get_required_mask(struct device *dev);
+
+#define ARCH_HAS_DMA_MMAP_COHERENT
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_DMA_MAPPING_H */
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
new file mode 100644
index 0000000..1b4f025
--- /dev/null
+++ b/arch/powerpc/include/asm/dma.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_DMA_H
+#define _ASM_POWERPC_DMA_H
+#ifdef __KERNEL__
+
+/*
+ * Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ * Changes for ppc sound by Christoph Nadig
+ */
+
+/*
+ * Note: Adapted for PowerPC by Gary Thomas
+ * Modified by Cort Dougan <cort@cs.nmt.edu>
+ *
+ * None of this really applies for Power Macintoshes.  There is
+ * basically just enough here to get kernel/dma.c to compile.
+ */
+
+#include <asm/io.h>
+#include <linux/spinlock.h>
+
+#ifndef MAX_DMA_CHANNELS
+#define MAX_DMA_CHANNELS	8
+#endif
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Doesn't really apply... */
+#define MAX_DMA_ADDRESS		(~0UL)
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb	outb_p
+#else
+#define dma_outb	outb
+#endif
+
+#define dma_inb		inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ *  controller 1: channels 0-3, byte operations, ports 00-1F
+ *  controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ *  - ALL registers are 8 bits only, regardless of transfer size
+ *  - channel 4 is not used - cascades 1 into 2.
+ *  - channels 0-3 are byte - addresses/counts are for physical bytes
+ *  - channels 5-7 are word - addresses/counts are for physical words
+ *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ *  - transfer count loaded to registers is 1 less than actual count
+ *  - controller 2 offsets are all even (2x offsets for controller 1)
+ *  - page registers for 5-7 don't use data bit 0, represent 128K pages
+ *  - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ *  Address mapping for channels 0-3:
+ *
+ *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *   P7  ...  P0  A7 ... A0  A7 ... A0
+ * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
+ *
+ *  Address mapping for channels 5-7:
+ *
+ *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
+ *    |  ...  |   \   \   ... \  \  \  ... \  \
+ *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
+ *    |  ...  |     \   \   ... \  \  \  ... \
+ *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
+ * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG		0x08	/* command register (w) */
+#define DMA1_STAT_REG		0x08	/* status register (r) */
+#define DMA1_REQ_REG		0x09	/* request register (w) */
+#define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
+#define DMA1_MODE_REG		0x0B	/* mode register (w) */
+#define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG		0x0D	/* Temporary Register (r) */
+#define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
+#define DMA1_CLR_MASK_REG	0x0E	/* Clear Mask */
+#define DMA1_MASK_ALL_REG	0x0F	/* all-channels mask (w) */
+
+#define DMA2_CMD_REG		0xD0	/* command register (w) */
+#define DMA2_STAT_REG		0xD0	/* status register (r) */
+#define DMA2_REQ_REG		0xD2	/* request register (w) */
+#define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
+#define DMA2_MODE_REG		0xD6	/* mode register (w) */
+#define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG		0xDA	/* Temporary Register (r) */
+#define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
+#define DMA2_CLR_MASK_REG	0xDC	/* Clear Mask */
+#define DMA2_MASK_ALL_REG	0xDE	/* all-channels mask (w) */
+
+#define DMA_ADDR_0		0x00	/* DMA address registers */
+#define DMA_ADDR_1		0x02
+#define DMA_ADDR_2		0x04
+#define DMA_ADDR_3		0x06
+#define DMA_ADDR_4		0xC0
+#define DMA_ADDR_5		0xC4
+#define DMA_ADDR_6		0xC8
+#define DMA_ADDR_7		0xCC
+
+#define DMA_CNT_0		0x01	/* DMA count registers */
+#define DMA_CNT_1		0x03
+#define DMA_CNT_2		0x05
+#define DMA_CNT_3		0x07
+#define DMA_CNT_4		0xC2
+#define DMA_CNT_5		0xC6
+#define DMA_CNT_6		0xCA
+#define DMA_CNT_7		0xCE
+
+#define DMA_LO_PAGE_0		0x87	/* DMA page registers */
+#define DMA_LO_PAGE_1		0x83
+#define DMA_LO_PAGE_2		0x81
+#define DMA_LO_PAGE_3		0x82
+#define DMA_LO_PAGE_5		0x8B
+#define DMA_LO_PAGE_6		0x89
+#define DMA_LO_PAGE_7		0x8A
+
+#define DMA_HI_PAGE_0		0x487	/* DMA page registers */
+#define DMA_HI_PAGE_1		0x483
+#define DMA_HI_PAGE_2		0x481
+#define DMA_HI_PAGE_3		0x482
+#define DMA_HI_PAGE_5		0x48B
+#define DMA_HI_PAGE_6		0x489
+#define DMA_HI_PAGE_7		0x48A
+
+#define DMA1_EXT_REG		0x40B
+#define DMA2_EXT_REG		0x4D6
+
+#ifndef __powerpc64__
+    /* in arch/ppc/kernel/setup.c -- Cort */
+    extern unsigned int DMA_MODE_WRITE;
+    extern unsigned int DMA_MODE_READ;
+    extern unsigned long ISA_DMA_THRESHOLD;
+#else
+    #define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */
+    #define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */
+#endif
+
+#define DMA_MODE_CASCADE	0xC0	/* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT		0x10
+
+extern spinlock_t dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dma_spin_lock, flags);
+	return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+	unsigned char ucDmaCmd = 0x00;
+
+	if (dmanr != 4) {
+		dma_outb(0, DMA2_MASK_REG);	/* This may not be enabled */
+		dma_outb(ucDmaCmd, DMA2_CMD_REG);	/* Enable group */
+	}
+	if (dmanr <= 3) {
+		dma_outb(dmanr, DMA1_MASK_REG);
+		dma_outb(ucDmaCmd, DMA1_CMD_REG);	/* Enable group */
+	} else {
+		dma_outb(dmanr & 3, DMA2_MASK_REG);
+	}
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+	if (dmanr <= 3)
+		dma_outb(dmanr | 4, DMA1_MASK_REG);
+	else
+		dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+	if (dmanr <= 3)
+		dma_outb(0, DMA1_CLEAR_FF_REG);
+	else
+		dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+	if (dmanr <= 3)
+		dma_outb(mode | dmanr, DMA1_MODE_REG);
+	else
+		dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
+{
+	switch (dmanr) {
+	case 0:
+		dma_outb(pagenr, DMA_LO_PAGE_0);
+		dma_outb(pagenr >> 8, DMA_HI_PAGE_0);
+		break;
+	case 1:
+		dma_outb(pagenr, DMA_LO_PAGE_1);
+		dma_outb(pagenr >> 8, DMA_HI_PAGE_1);
+		break;
+	case 2:
+		dma_outb(pagenr, DMA_LO_PAGE_2);
+		dma_outb(pagenr >> 8, DMA_HI_PAGE_2);
+		break;
+	case 3:
+		dma_outb(pagenr, DMA_LO_PAGE_3);
+		dma_outb(pagenr >> 8, DMA_HI_PAGE_3);
+		break;
+	case 5:
+		dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
+		dma_outb(pagenr >> 8, DMA_HI_PAGE_5);
+		break;
+	case 6:
+		dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
+		dma_outb(pagenr >> 8, DMA_HI_PAGE_6);
+		break;
+	case 7:
+		dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
+		dma_outb(pagenr >> 8, DMA_HI_PAGE_7);
+		break;
+	}
+}
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
+{
+	if (dmanr <= 3) {
+		dma_outb(phys & 0xff,
+			 ((dmanr & 3) << 1) + IO_DMA1_BASE);
+		dma_outb((phys >> 8) & 0xff,
+			 ((dmanr & 3) << 1) + IO_DMA1_BASE);
+	} else {
+		dma_outb((phys >> 1) & 0xff,
+			 ((dmanr & 3) << 2) + IO_DMA2_BASE);
+		dma_outb((phys >> 9) & 0xff,
+			 ((dmanr & 3) << 2) + IO_DMA2_BASE);
+	}
+	set_dma_page(dmanr, phys >> 16);
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+	count--;
+	if (dmanr <= 3) {
+		dma_outb(count & 0xff,
+			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+		dma_outb((count >> 8) & 0xff,
+			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+	} else {
+		dma_outb((count >> 1) & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+		dma_outb((count >> 9) & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+	}
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+	unsigned int io_port = (dmanr <= 3)
+	    ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
+	    : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
+
+	/* using short to get 16-bit wrap around */
+	unsigned short count;
+
+	count = 1 + dma_inb(io_port);
+	count += dma_inb(io_port) << 8;
+
+	return (dmanr <= 3) ? count : (count << 1);
+}
+
+/* These are in kernel/dma.c: */
+
+/* reserve a DMA channel */
+extern int request_dma(unsigned int dmanr, const char *device_id);
+/* release it again */
+extern void free_dma(unsigned int dmanr);
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy	(0)
+#endif
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
new file mode 100644
index 0000000..ce242b9
--- /dev/null
+++ b/arch/powerpc/include/asm/drmem.h
@@ -0,0 +1,102 @@
+/*
+ * drmem.h: Power specific logical memory block representation
+ *
+ * Copyright 2017 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_LMB_H
+#define _ASM_POWERPC_LMB_H
+
+struct drmem_lmb {
+	u64     base_addr;
+	u32     drc_index;
+	u32     aa_index;
+	u32     flags;
+};
+
+struct drmem_lmb_info {
+	struct drmem_lmb        *lmbs;
+	int                     n_lmbs;
+	u32                     lmb_size;
+};
+
+extern struct drmem_lmb_info *drmem_info;
+
+#define for_each_drmem_lmb_in_range(lmb, start, end)		\
+	for ((lmb) = (start); (lmb) <= (end); (lmb)++)
+
+#define for_each_drmem_lmb(lmb)					\
+	for_each_drmem_lmb_in_range((lmb),			\
+		&drmem_info->lmbs[0],				\
+		&drmem_info->lmbs[drmem_info->n_lmbs - 1])
+
+/*
+ * The of_drconf_cell_v1 struct defines the layout of the LMB data
+ * specified in the ibm,dynamic-memory device tree property.
+ * The property itself is a 32-bit value specifying the number of
+ * LMBs followed by an array of of_drconf_cell_v1 entries, one
+ * per LMB.
+ */
+struct of_drconf_cell_v1 {
+	__be64	base_addr;
+	__be32	drc_index;
+	__be32	reserved;
+	__be32	aa_index;
+	__be32	flags;
+};
+
+/*
+ * Version 2 of the ibm,dynamic-memory property is defined as a
+ * 32-bit value specifying the number of LMB sets followed by an
+ * array of of_drconf_cell_v2 entries, one per LMB set.
+ */
+struct of_drconf_cell_v2 {
+	u32	seq_lmbs;
+	u64	base_addr;
+	u32	drc_index;
+	u32	aa_index;
+	u32	flags;
+} __packed;
+
+#define DRCONF_MEM_ASSIGNED	0x00000008
+#define DRCONF_MEM_AI_INVALID	0x00000040
+#define DRCONF_MEM_RESERVED	0x00000080
+
+static inline u32 drmem_lmb_size(void)
+{
+	return drmem_info->lmb_size;
+}
+
+#define DRMEM_LMB_RESERVED	0x80000000
+
+static inline void drmem_mark_lmb_reserved(struct drmem_lmb *lmb)
+{
+	lmb->flags |= DRMEM_LMB_RESERVED;
+}
+
+static inline void drmem_remove_lmb_reservation(struct drmem_lmb *lmb)
+{
+	lmb->flags &= ~DRMEM_LMB_RESERVED;
+}
+
+static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
+{
+	return lmb->flags & DRMEM_LMB_RESERVED;
+}
+
+u64 drmem_lmb_memory_max(void);
+void __init walk_drmem_lmbs(struct device_node *dn,
+			void (*func)(struct drmem_lmb *, const __be32 **));
+int drmem_update_dt(void);
+
+#ifdef CONFIG_PPC_PSERIES
+void __init walk_drmem_lmbs_early(unsigned long node,
+			void (*func)(struct drmem_lmb *, const __be32 **));
+#endif
+
+#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h
new file mode 100644
index 0000000..0c729e2
--- /dev/null
+++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_DT_CPU_FTRS_H
+#define __ASM_POWERPC_DT_CPU_FTRS_H
+
+/*
+ *  Copyright 2017, IBM Corporation
+ *  cpufeatures is the new way to discover CPU features with /cpus/features
+ *  devicetree. This supersedes PVR based discovery ("cputable"), and older
+ *  device tree feature advertisement.
+ */
+
+#include <linux/types.h>
+#include <uapi/asm/cputable.h>
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+bool dt_cpu_ftrs_init(void *fdt);
+void dt_cpu_ftrs_scan(void);
+bool dt_cpu_ftrs_in_use(void);
+#else
+static inline bool dt_cpu_ftrs_init(void *fdt) { return false; }
+static inline void dt_cpu_ftrs_scan(void) { }
+static inline bool dt_cpu_ftrs_in_use(void) { return false; }
+#endif
+
+#endif /* __ASM_POWERPC_DT_CPU_FTRS_H */
diff --git a/arch/powerpc/include/asm/edac.h b/arch/powerpc/include/asm/edac.h
new file mode 100644
index 0000000..5571e23
--- /dev/null
+++ b/arch/powerpc/include/asm/edac.h
@@ -0,0 +1,40 @@
+/*
+ * PPC EDAC common defs
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch edac_atomic_scrub() that EDAC use for software
+ * ECC scrubbing.  It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static __inline__ void edac_atomic_scrub(void *va, u32 size)
+{
+	unsigned int *virt_addr = va;
+	unsigned int temp;
+	unsigned int i;
+
+	for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+		/* Very carefully read and write to memory atomically
+		 * so we are interrupt, DMA and SMP safe.
+		 */
+		__asm__ __volatile__ ("\n\
+				1:	lwarx	%0,0,%1\n\
+					stwcx.	%0,0,%1\n\
+					bne-	1b\n\
+					isync"
+					: "=&r"(temp)
+					: "r"(virt_addr)
+					: "cr0", "memory");
+	}
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
new file mode 100644
index 0000000..219637e
--- /dev/null
+++ b/arch/powerpc/include/asm/eeh.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2001  Dave Engebretsen & Todd Inglett IBM Corporation.
+ * Copyright 2001-2012 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _POWERPC_EEH_H
+#define _POWERPC_EEH_H
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+
+#include <uapi/asm/eeh.h>
+
+struct pci_dev;
+struct pci_bus;
+struct pci_dn;
+
+#ifdef CONFIG_EEH
+
+/* EEH subsystem flags */
+#define EEH_ENABLED		0x01	/* EEH enabled			     */
+#define EEH_FORCE_DISABLED	0x02	/* EEH disabled			     */
+#define EEH_PROBE_MODE_DEV	0x04	/* From PCI device		     */
+#define EEH_PROBE_MODE_DEVTREE	0x08	/* From device tree		     */
+#define EEH_VALID_PE_ZERO	0x10	/* PE#0 is valid		     */
+#define EEH_ENABLE_IO_FOR_LOG	0x20	/* Enable IO for log		     */
+#define EEH_EARLY_DUMP_LOG	0x40	/* Dump log immediately		     */
+#define EEH_POSTPONED_PROBE	0x80    /* Powernv may postpone device probe */
+
+/*
+ * Delay for PE reset, all in ms
+ *
+ * PCI specification has reset hold time of 100 milliseconds.
+ * We have 250 milliseconds here. The PCI bus settlement time
+ * is specified as 1.5 seconds and we have 1.8 seconds.
+ */
+#define EEH_PE_RST_HOLD_TIME		250
+#define EEH_PE_RST_SETTLE_TIME		1800
+
+/*
+ * The struct is used to trace PE related EEH functionality.
+ * In theory, there will have one instance of the struct to
+ * be created against particular PE. In nature, PEs correlate
+ * to each other. the struct has to reflect that hierarchy in
+ * order to easily pick up those affected PEs when one particular
+ * PE has EEH errors.
+ *
+ * Also, one particular PE might be composed of PCI device, PCI
+ * bus and its subordinate components. The struct also need ship
+ * the information. Further more, one particular PE is only meaingful
+ * in the corresponding PHB. Therefore, the root PEs should be created
+ * against existing PHBs in on-to-one fashion.
+ */
+#define EEH_PE_INVALID	(1 << 0)	/* Invalid   */
+#define EEH_PE_PHB	(1 << 1)	/* PHB PE    */
+#define EEH_PE_DEVICE 	(1 << 2)	/* Device PE */
+#define EEH_PE_BUS	(1 << 3)	/* Bus PE    */
+#define EEH_PE_VF	(1 << 4)	/* VF PE     */
+
+#define EEH_PE_ISOLATED		(1 << 0)	/* Isolated PE		*/
+#define EEH_PE_RECOVERING	(1 << 1)	/* Recovering PE	*/
+#define EEH_PE_CFG_BLOCKED	(1 << 2)	/* Block config access	*/
+#define EEH_PE_RESET		(1 << 3)	/* PE reset in progress */
+
+#define EEH_PE_KEEP		(1 << 8)	/* Keep PE on hotplug	*/
+#define EEH_PE_CFG_RESTRICTED	(1 << 9)	/* Block config on error */
+#define EEH_PE_REMOVED		(1 << 10)	/* Removed permanently	*/
+#define EEH_PE_PRI_BUS		(1 << 11)	/* Cached primary bus   */
+
+struct eeh_pe {
+	int type;			/* PE type: PHB/Bus/Device	*/
+	int state;			/* PE EEH dependent mode	*/
+	int config_addr;		/* Traditional PCI address	*/
+	int addr;			/* PE configuration address	*/
+	struct pci_controller *phb;	/* Associated PHB		*/
+	struct pci_bus *bus;		/* Top PCI bus for bus PE	*/
+	int check_count;		/* Times of ignored error	*/
+	int freeze_count;		/* Times of froze up		*/
+	time64_t tstamp;		/* Time on first-time freeze	*/
+	int false_positives;		/* Times of reported #ff's	*/
+	atomic_t pass_dev_cnt;		/* Count of passed through devs	*/
+	struct eeh_pe *parent;		/* Parent PE			*/
+	void *data;			/* PE auxillary data		*/
+	struct list_head child_list;	/* Link PE to the child list	*/
+	struct list_head edevs;		/* Link list of EEH devices	*/
+	struct list_head child;		/* Child PEs			*/
+};
+
+#define eeh_pe_for_each_dev(pe, edev, tmp) \
+		list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
+
+#define eeh_for_each_pe(root, pe) \
+	for (pe = root; pe; pe = eeh_pe_next(pe, root))
+
+static inline bool eeh_pe_passed(struct eeh_pe *pe)
+{
+	return pe ? !!atomic_read(&pe->pass_dev_cnt) : false;
+}
+
+/*
+ * The struct is used to trace EEH state for the associated
+ * PCI device node or PCI device. In future, it might
+ * represent PE as well so that the EEH device to form
+ * another tree except the currently existing tree of PCI
+ * buses and PCI devices
+ */
+#define EEH_DEV_BRIDGE		(1 << 0)	/* PCI bridge		*/
+#define EEH_DEV_ROOT_PORT	(1 << 1)	/* PCIe root port	*/
+#define EEH_DEV_DS_PORT		(1 << 2)	/* Downstream port	*/
+#define EEH_DEV_IRQ_DISABLED	(1 << 3)	/* Interrupt disabled	*/
+#define EEH_DEV_DISCONNECTED	(1 << 4)	/* Removing from PE	*/
+
+#define EEH_DEV_NO_HANDLER	(1 << 8)	/* No error handler	*/
+#define EEH_DEV_SYSFS		(1 << 9)	/* Sysfs created	*/
+#define EEH_DEV_REMOVED		(1 << 10)	/* Removed permanently	*/
+
+struct eeh_dev {
+	int mode;			/* EEH mode			*/
+	int class_code;			/* Class code of the device	*/
+	int pe_config_addr;		/* PE config address		*/
+	u32 config_space[16];		/* Saved PCI config space	*/
+	int pcix_cap;			/* Saved PCIx capability	*/
+	int pcie_cap;			/* Saved PCIe capability	*/
+	int aer_cap;			/* Saved AER capability		*/
+	int af_cap;			/* Saved AF capability		*/
+	struct eeh_pe *pe;		/* Associated PE		*/
+	struct list_head list;		/* Form link list in the PE	*/
+	struct list_head rmv_list;	/* Record the removed edevs	*/
+	struct pci_dn *pdn;		/* Associated PCI device node	*/
+	struct pci_dev *pdev;		/* Associated PCI device	*/
+	bool in_error;			/* Error flag for edev		*/
+	struct pci_dev *physfn;		/* Associated SRIOV PF		*/
+	struct pci_bus *bus;		/* PCI bus for partial hotplug	*/
+};
+
+static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev)
+{
+	return edev ? edev->pdn : NULL;
+}
+
+static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
+{
+	return edev ? edev->pdev : NULL;
+}
+
+static inline struct eeh_pe *eeh_dev_to_pe(struct eeh_dev* edev)
+{
+	return edev ? edev->pe : NULL;
+}
+
+/* Return values from eeh_ops::next_error */
+enum {
+	EEH_NEXT_ERR_NONE = 0,
+	EEH_NEXT_ERR_INF,
+	EEH_NEXT_ERR_FROZEN_PE,
+	EEH_NEXT_ERR_FENCED_PHB,
+	EEH_NEXT_ERR_DEAD_PHB,
+	EEH_NEXT_ERR_DEAD_IOC
+};
+
+/*
+ * The struct is used to trace the registered EEH operation
+ * callback functions. Actually, those operation callback
+ * functions are heavily platform dependent. That means the
+ * platform should register its own EEH operation callback
+ * functions before any EEH further operations.
+ */
+#define EEH_OPT_DISABLE		0	/* EEH disable	*/
+#define EEH_OPT_ENABLE		1	/* EEH enable	*/
+#define EEH_OPT_THAW_MMIO	2	/* MMIO enable	*/
+#define EEH_OPT_THAW_DMA	3	/* DMA enable	*/
+#define EEH_OPT_FREEZE_PE	4	/* Freeze PE	*/
+#define EEH_STATE_UNAVAILABLE	(1 << 0)	/* State unavailable	*/
+#define EEH_STATE_NOT_SUPPORT	(1 << 1)	/* EEH not supported	*/
+#define EEH_STATE_RESET_ACTIVE	(1 << 2)	/* Active reset		*/
+#define EEH_STATE_MMIO_ACTIVE	(1 << 3)	/* Active MMIO		*/
+#define EEH_STATE_DMA_ACTIVE	(1 << 4)	/* Active DMA		*/
+#define EEH_STATE_MMIO_ENABLED	(1 << 5)	/* MMIO enabled		*/
+#define EEH_STATE_DMA_ENABLED	(1 << 6)	/* DMA enabled		*/
+#define EEH_RESET_DEACTIVATE	0	/* Deactivate the PE reset	*/
+#define EEH_RESET_HOT		1	/* Hot reset			*/
+#define EEH_RESET_FUNDAMENTAL	3	/* Fundamental reset		*/
+#define EEH_LOG_TEMP		1	/* EEH temporary error log	*/
+#define EEH_LOG_PERM		2	/* EEH permanent error log	*/
+
+struct eeh_ops {
+	char *name;
+	int (*init)(void);
+	void* (*probe)(struct pci_dn *pdn, void *data);
+	int (*set_option)(struct eeh_pe *pe, int option);
+	int (*get_pe_addr)(struct eeh_pe *pe);
+	int (*get_state)(struct eeh_pe *pe, int *state);
+	int (*reset)(struct eeh_pe *pe, int option);
+	int (*wait_state)(struct eeh_pe *pe, int max_wait);
+	int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
+	int (*configure_bridge)(struct eeh_pe *pe);
+	int (*err_inject)(struct eeh_pe *pe, int type, int func,
+			  unsigned long addr, unsigned long mask);
+	int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val);
+	int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val);
+	int (*next_error)(struct eeh_pe **pe);
+	int (*restore_config)(struct pci_dn *pdn);
+	int (*notify_resume)(struct pci_dn *pdn);
+};
+
+extern int eeh_subsystem_flags;
+extern int eeh_max_freezes;
+extern struct eeh_ops *eeh_ops;
+extern raw_spinlock_t confirm_error_lock;
+
+static inline void eeh_add_flag(int flag)
+{
+	eeh_subsystem_flags |= flag;
+}
+
+static inline void eeh_clear_flag(int flag)
+{
+	eeh_subsystem_flags &= ~flag;
+}
+
+static inline bool eeh_has_flag(int flag)
+{
+        return !!(eeh_subsystem_flags & flag);
+}
+
+static inline bool eeh_enabled(void)
+{
+	if (eeh_has_flag(EEH_FORCE_DISABLED) ||
+	    !eeh_has_flag(EEH_ENABLED))
+		return false;
+
+	return true;
+}
+
+static inline void eeh_serialize_lock(unsigned long *flags)
+{
+	raw_spin_lock_irqsave(&confirm_error_lock, *flags);
+}
+
+static inline void eeh_serialize_unlock(unsigned long flags)
+{
+	raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+}
+
+static inline bool eeh_state_active(int state)
+{
+	return (state & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
+	== (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+}
+
+typedef void *(*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag);
+typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag);
+void eeh_set_pe_aux_size(int size);
+int eeh_phb_pe_create(struct pci_controller *phb);
+struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
+struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root);
+struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
+			  int pe_no, int config_addr);
+int eeh_add_to_parent_pe(struct eeh_dev *edev);
+int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
+void eeh_pe_update_time_stamp(struct eeh_pe *pe);
+void *eeh_pe_traverse(struct eeh_pe *root,
+		      eeh_pe_traverse_func fn, void *flag);
+void *eeh_pe_dev_traverse(struct eeh_pe *root,
+			  eeh_edev_traverse_func fn, void *flag);
+void eeh_pe_restore_bars(struct eeh_pe *pe);
+const char *eeh_pe_loc_get(struct eeh_pe *pe);
+struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
+
+struct eeh_dev *eeh_dev_init(struct pci_dn *pdn);
+void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
+void eeh_probe_devices(void);
+int __init eeh_ops_register(struct eeh_ops *ops);
+int __exit eeh_ops_unregister(const char *name);
+int eeh_check_failure(const volatile void __iomem *token);
+int eeh_dev_check_failure(struct eeh_dev *edev);
+void eeh_addr_cache_build(void);
+void eeh_add_device_early(struct pci_dn *);
+void eeh_add_device_tree_early(struct pci_dn *);
+void eeh_add_device_late(struct pci_dev *);
+void eeh_add_device_tree_late(struct pci_bus *);
+void eeh_add_sysfs_files(struct pci_bus *);
+void eeh_remove_device(struct pci_dev *);
+int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state);
+int eeh_pe_reset_and_recover(struct eeh_pe *pe);
+int eeh_dev_open(struct pci_dev *pdev);
+void eeh_dev_release(struct pci_dev *pdev);
+struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group);
+int eeh_pe_set_option(struct eeh_pe *pe, int option);
+int eeh_pe_get_state(struct eeh_pe *pe);
+int eeh_pe_reset(struct eeh_pe *pe, int option);
+int eeh_pe_configure(struct eeh_pe *pe);
+int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
+		      unsigned long addr, unsigned long mask);
+int eeh_restore_vf_config(struct pci_dn *pdn);
+
+/**
+ * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
+ *
+ * If this macro yields TRUE, the caller relays to eeh_check_failure()
+ * which does further tests out of line.
+ */
+#define EEH_POSSIBLE_ERROR(val, type)	((val) == (type)~0 && eeh_enabled())
+
+/*
+ * Reads from a device which has been isolated by EEH will return
+ * all 1s.  This macro gives an all-1s value of the given size (in
+ * bytes: 1, 2, or 4) for comparing with the result of a read.
+ */
+#define EEH_IO_ERROR_VALUE(size)	(~0U >> ((4 - (size)) * 8))
+
+#else /* !CONFIG_EEH */
+
+static inline bool eeh_enabled(void)
+{
+        return false;
+}
+
+static inline void eeh_probe_devices(void) { }
+
+static inline void *eeh_dev_init(struct pci_dn *pdn, void *data)
+{
+	return NULL;
+}
+
+static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
+
+static inline int eeh_check_failure(const volatile void __iomem *token)
+{
+	return 0;
+}
+
+#define eeh_dev_check_failure(x) (0)
+
+static inline void eeh_addr_cache_build(void) { }
+
+static inline void eeh_add_device_early(struct pci_dn *pdn) { }
+
+static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
+
+static inline void eeh_add_device_late(struct pci_dev *dev) { }
+
+static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+
+static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+
+static inline void eeh_remove_device(struct pci_dev *dev) { }
+
+#define EEH_POSSIBLE_ERROR(val, type) (0)
+#define EEH_IO_ERROR_VALUE(size) (-1UL)
+#endif /* CONFIG_EEH */
+
+#ifdef CONFIG_PPC64
+/*
+ * MMIO read/write operations with EEH support.
+ */
+static inline u8 eeh_readb(const volatile void __iomem *addr)
+{
+	u8 val = in_8(addr);
+	if (EEH_POSSIBLE_ERROR(val, u8))
+		eeh_check_failure(addr);
+	return val;
+}
+
+static inline u16 eeh_readw(const volatile void __iomem *addr)
+{
+	u16 val = in_le16(addr);
+	if (EEH_POSSIBLE_ERROR(val, u16))
+		eeh_check_failure(addr);
+	return val;
+}
+
+static inline u32 eeh_readl(const volatile void __iomem *addr)
+{
+	u32 val = in_le32(addr);
+	if (EEH_POSSIBLE_ERROR(val, u32))
+		eeh_check_failure(addr);
+	return val;
+}
+
+static inline u64 eeh_readq(const volatile void __iomem *addr)
+{
+	u64 val = in_le64(addr);
+	if (EEH_POSSIBLE_ERROR(val, u64))
+		eeh_check_failure(addr);
+	return val;
+}
+
+static inline u16 eeh_readw_be(const volatile void __iomem *addr)
+{
+	u16 val = in_be16(addr);
+	if (EEH_POSSIBLE_ERROR(val, u16))
+		eeh_check_failure(addr);
+	return val;
+}
+
+static inline u32 eeh_readl_be(const volatile void __iomem *addr)
+{
+	u32 val = in_be32(addr);
+	if (EEH_POSSIBLE_ERROR(val, u32))
+		eeh_check_failure(addr);
+	return val;
+}
+
+static inline u64 eeh_readq_be(const volatile void __iomem *addr)
+{
+	u64 val = in_be64(addr);
+	if (EEH_POSSIBLE_ERROR(val, u64))
+		eeh_check_failure(addr);
+	return val;
+}
+
+static inline void eeh_memcpy_fromio(void *dest, const
+				     volatile void __iomem *src,
+				     unsigned long n)
+{
+	_memcpy_fromio(dest, src, n);
+
+	/* Look for ffff's here at dest[n].  Assume that at least 4 bytes
+	 * were copied. Check all four bytes.
+	 */
+	if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32))
+		eeh_check_failure(src);
+}
+
+/* in-string eeh macros */
+static inline void eeh_readsb(const volatile void __iomem *addr, void * buf,
+			      int ns)
+{
+	_insb(addr, buf, ns);
+	if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
+		eeh_check_failure(addr);
+}
+
+static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
+			      int ns)
+{
+	_insw(addr, buf, ns);
+	if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
+		eeh_check_failure(addr);
+}
+
+static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
+			      int nl)
+{
+	_insl(addr, buf, nl);
+	if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
+		eeh_check_failure(addr);
+}
+
+#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_EEH_H */
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
new file mode 100644
index 0000000..9884e87
--- /dev/null
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -0,0 +1,41 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
+ */
+
+#ifndef ASM_POWERPC_EEH_EVENT_H
+#define ASM_POWERPC_EEH_EVENT_H
+#ifdef __KERNEL__
+
+/*
+ * structure holding pci controller data that describes a
+ * change in the isolation status of a PCI slot.  A pointer
+ * to this struct is passed as the data pointer in a notify
+ * callback.
+ */
+struct eeh_event {
+	struct list_head	list;	/* to form event queue	*/
+	struct eeh_pe		*pe;	/* EEH PE		*/
+};
+
+int eeh_event_init(void);
+int eeh_send_failure_event(struct eeh_pe *pe);
+void eeh_remove_event(struct eeh_pe *pe, bool force);
+void eeh_handle_normal_event(struct eeh_pe *pe);
+void eeh_handle_special_event(void);
+
+#endif /* __KERNEL__ */
+#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
new file mode 100644
index 0000000..dc7d48e
--- /dev/null
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -0,0 +1,40 @@
+/*
+ * EHV_PIC private definitions and structure.
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __EHV_PIC_H__
+#define __EHV_PIC_H__
+
+#include <linux/irq.h>
+
+#define NR_EHV_PIC_INTS 1024
+
+#define EHV_PIC_INFO(name) EHV_PIC_##name
+
+#define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0
+#define EHV_PIC_VECPRI_POLARITY_POSITIVE 1
+#define EHV_PIC_VECPRI_SENSE_EDGE 0
+#define EHV_PIC_VECPRI_SENSE_LEVEL 0x2
+#define EHV_PIC_VECPRI_POLARITY_MASK 0x1
+#define EHV_PIC_VECPRI_SENSE_MASK 0x2
+
+struct ehv_pic {
+	/* The remapper for this EHV_PIC */
+	struct irq_domain	*irqhost;
+
+	/* The "linux" controller struct */
+	struct irq_chip	hc_irq;
+
+	/* core int flag */
+	int coreint_flag;
+};
+
+void ehv_pic_init(void);
+unsigned int ehv_pic_get_irq(void);
+
+#endif /* __EHV_PIC_H__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
new file mode 100644
index 0000000..548d9a4
--- /dev/null
+++ b/arch/powerpc/include/asm/elf.h
@@ -0,0 +1,182 @@
+/*
+ * ELF register definitions..
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_ELF_H
+#define _ASM_POWERPC_ELF_H
+
+#include <linux/sched.h>	/* for task_struct */
+#include <asm/page.h>
+#include <asm/string.h>
+#include <uapi/asm/elf.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+#define compat_elf_check_arch(x)	((x)->e_machine == EM_PPC)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
+
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE		(is_32bit_task() ? 0x000400000UL : \
+						   0x100000000UL)
+
+#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+
+/*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+ * process or 64 bit, on either a 64 bit or 32 bit kernel.
+ *
+ * This macro relies on elf_regs[i] having the right type to truncate to,
+ * either u32 or u64.  It defines the body of the elf_core_copy_regs
+ * function, either the native one with elf_gregset_t elf_regs or
+ * the 32-bit one with elf_gregset_t32 elf_regs.
+ */
+#define PPC_ELF_CORE_COPY_REGS(elf_regs, regs) \
+	int i, nregs = min(sizeof(*regs) / sizeof(unsigned long), \
+			   (size_t)ELF_NGREG);			  \
+	for (i = 0; i < nregs; i++) \
+		elf_regs[i] = ((unsigned long *) regs)[i]; \
+	memset(&elf_regs[i], 0, (ELF_NGREG - i) * sizeof(elf_regs[0]))
+
+/* Common routine for both 32-bit and 64-bit native processes */
+static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
+					  struct pt_regs *regs)
+{
+	PPC_ELF_CORE_COPY_REGS(elf_regs, regs);
+}
+#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
+
+typedef elf_vrregset_t elf_fpxregset_t;
+
+/* ELF_HWCAP yields a mask that user programs can use to figure out what
+   instruction set this cpu supports.  This could be done in userspace,
+   but it's not easy, and we've already done it here.  */
+# define ELF_HWCAP	(cur_cpu_spec->cpu_user_features)
+# define ELF_HWCAP2	(cur_cpu_spec->cpu_user_features2)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.  */
+
+#define ELF_PLATFORM	(cur_cpu_spec->platform)
+
+/* While ELF_PLATFORM indicates the ISA supported by the platform, it
+ * may not accurately reflect the underlying behavior of the hardware
+ * (as in the case of running in Power5+ compatibility mode on a
+ * Power6 machine).  ELF_BASE_PLATFORM allows ld.so to load libraries
+ * that are tuned for the real hardware.
+ */
+#define ELF_BASE_PLATFORM (powerpc_base_platform)
+
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr)	do {	\
+	_r->gpr[2] = load_addr; 		\
+} while (0)
+#endif /* __powerpc64__ */
+
+#ifdef __powerpc64__
+# define SET_PERSONALITY(ex)					\
+do {								\
+	if (((ex).e_flags & 0x3) == 2)				\
+		set_thread_flag(TIF_ELF2ABI);			\
+	else							\
+		clear_thread_flag(TIF_ELF2ABI);			\
+	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)		\
+		set_thread_flag(TIF_32BIT);			\
+	else							\
+		clear_thread_flag(TIF_32BIT);			\
+	if (personality(current->personality) != PER_LINUX32)	\
+		set_personality(PER_LINUX |			\
+			(current->personality & (~PER_MASK)));	\
+} while (0)
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically. This
+ * is only required to work around bugs in old 32bit toolchains. Since
+ * the 64bit ABI has never had these issues dont enable the workaround
+ * even if we have an executable stack.
+ */
+# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
+		(exec_stk == EXSTACK_DEFAULT) : 0)
+#else 
+# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
+#endif /* __powerpc64__ */
+
+extern int dcache_bsize;
+extern int icache_bsize;
+extern int ucache_bsize;
+
+/* vDSO has arch_setup_additional_pages */
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+				       int uses_interp);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b)
+
+/* 1GB for 64bit, 8MB for 32bit */
+#define STACK_RND_MASK (is_32bit_task() ? \
+	(0x7ff >> (PAGE_SHIFT - 12)) : \
+	(0x3ffff >> (PAGE_SHIFT - 12)))
+
+#ifdef CONFIG_SPU_BASE
+/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
+#define NT_SPU		1
+
+#define ARCH_HAVE_EXTRA_ELF_NOTES
+
+#endif /* CONFIG_SPU_BASE */
+
+#ifdef CONFIG_PPC64
+
+#define get_cache_geometry(level) \
+	(ppc64_caches.level.assoc << 16 | ppc64_caches.level.line_size)
+
+#define ARCH_DLINFO_CACHE_GEOMETRY					\
+	NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size);		\
+	NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i));	\
+	NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size);		\
+	NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d));	\
+	NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size);		\
+	NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2));	\
+	NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size);		\
+	NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, get_cache_geometry(l3))
+
+#else
+#define ARCH_DLINFO_CACHE_GEOMETRY
+#endif
+
+/*
+ * The requirements here are:
+ * - keep the final alignment of sp (sp & 0xf)
+ * - make sure the 32-bit value at the first 16 byte aligned position of
+ *   AUXV is greater than 16 for glibc compatibility.
+ *   AT_IGNOREPPC is used for that.
+ * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
+ *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
+ * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes
+ */
+#define ARCH_DLINFO							\
+do {									\
+	/* Handle glibc compatibility. */				\
+	NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);			\
+	NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);			\
+	/* Cache size items */						\
+	NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);			\
+	NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);			\
+	NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);			\
+	VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base);	\
+	ARCH_DLINFO_CACHE_GEOMETRY;					\
+} while (0)
+
+#endif /* _ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/asm/emergency-restart.h b/arch/powerpc/include/asm/emergency-restart.h
new file mode 100644
index 0000000..3711bd9
--- /dev/null
+++ b/arch/powerpc/include/asm/emergency-restart.h
@@ -0,0 +1 @@
+#include <asm-generic/emergency-restart.h>
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
new file mode 100644
index 0000000..651e135
--- /dev/null
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -0,0 +1,96 @@
+/*
+ *  Copyright 2007 Sony Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.
+ *  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_POWERPC_EMULATED_OPS_H
+#define _ASM_POWERPC_EMULATED_OPS_H
+
+#include <linux/atomic.h>
+#include <linux/perf_event.h>
+
+
+#ifdef CONFIG_PPC_EMULATED_STATS
+
+struct ppc_emulated_entry {
+	const char *name;
+	atomic_t val;
+};
+
+extern struct ppc_emulated {
+#ifdef CONFIG_ALTIVEC
+	struct ppc_emulated_entry altivec;
+#endif
+	struct ppc_emulated_entry dcba;
+	struct ppc_emulated_entry dcbz;
+	struct ppc_emulated_entry fp_pair;
+	struct ppc_emulated_entry isel;
+	struct ppc_emulated_entry mcrxr;
+	struct ppc_emulated_entry mfpvr;
+	struct ppc_emulated_entry multiple;
+	struct ppc_emulated_entry popcntb;
+	struct ppc_emulated_entry spe;
+	struct ppc_emulated_entry string;
+	struct ppc_emulated_entry sync;
+	struct ppc_emulated_entry unaligned;
+#ifdef CONFIG_MATH_EMULATION
+	struct ppc_emulated_entry math;
+#endif
+#ifdef CONFIG_VSX
+	struct ppc_emulated_entry vsx;
+#endif
+#ifdef CONFIG_PPC64
+	struct ppc_emulated_entry mfdscr;
+	struct ppc_emulated_entry mtdscr;
+	struct ppc_emulated_entry lq_stq;
+	struct ppc_emulated_entry lxvw4x;
+	struct ppc_emulated_entry lxvh8x;
+	struct ppc_emulated_entry lxvd2x;
+	struct ppc_emulated_entry lxvb16x;
+#endif
+} ppc_emulated;
+
+extern u32 ppc_warn_emulated;
+
+extern void ppc_warn_emulated_print(const char *type);
+
+#define __PPC_WARN_EMULATED(type)					 \
+	do {								 \
+		atomic_inc(&ppc_emulated.type.val);			 \
+		if (ppc_warn_emulated)					 \
+			ppc_warn_emulated_print(ppc_emulated.type.name); \
+	} while (0)
+
+#else /* !CONFIG_PPC_EMULATED_STATS */
+
+#define __PPC_WARN_EMULATED(type)	do { } while (0)
+
+#endif /* !CONFIG_PPC_EMULATED_STATS */
+
+#define PPC_WARN_EMULATED(type, regs)					\
+	do {								\
+		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,		\
+			1, regs, 0);					\
+		__PPC_WARN_EMULATED(type);				\
+	} while (0)
+
+#define PPC_WARN_ALIGNMENT(type, regs)					\
+	do {								\
+		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,		\
+			1, regs, regs->dar);				\
+		__PPC_WARN_EMULATED(type);				\
+	} while (0)
+
+#endif /* _ASM_POWERPC_EMULATED_OPS_H */
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
new file mode 100644
index 0000000..d3a7e36
--- /dev/null
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -0,0 +1,575 @@
+/*
+ * ePAPR hcall interface
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* A "hypercall" is an "sc 1" instruction.  This header file file provides C
+ * wrapper functions for the ePAPR hypervisor interface.  It is inteded
+ * for use by Linux device drivers and other operating systems.
+ *
+ * The hypercalls are implemented as inline assembly, rather than assembly
+ * language functions in a .S file, for optimization.  It allows
+ * the caller to issue the hypercall instruction directly, improving both
+ * performance and memory footprint.
+ */
+
+#ifndef _EPAPR_HCALLS_H
+#define _EPAPR_HCALLS_H
+
+#include <uapi/asm/epapr_hcalls.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+/*
+ * Hypercall register clobber list
+ *
+ * These macros are used to define the list of clobbered registers during a
+ * hypercall.  Technically, registers r0 and r3-r12 are always clobbered,
+ * but the gcc inline assembly syntax does not allow us to specify registers
+ * on the clobber list that are also on the input/output list.  Therefore,
+ * the lists of clobbered registers depends on the number of register
+ * parmeters ("+r" and "=r") passed to the hypercall.
+ *
+ * Each assembly block should use one of the HCALL_CLOBBERSx macros.  As a
+ * general rule, 'x' is the number of parameters passed to the assembly
+ * block *except* for r11.
+ *
+ * If you're not sure, just use the smallest value of 'x' that does not
+ * generate a compilation error.  Because these are static inline functions,
+ * the compiler will only check the clobber list for a function if you
+ * compile code that calls that function.
+ *
+ * r3 and r11 are not included in any clobbers list because they are always
+ * listed as output registers.
+ *
+ * XER, CTR, and LR are currently listed as clobbers because it's uncertain
+ * whether they will be clobbered.
+ *
+ * Note that r11 can be used as an output parameter.
+ *
+ * The "memory" clobber is only necessary for hcalls where the Hypervisor
+ * will read or write guest memory. However, we add it to all hcalls because
+ * the impact is minimal, and we want to ensure that it's present for the
+ * hcalls that need it.
+*/
+
+/* List of common clobbered registers.  Do not use this macro. */
+#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
+
+#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
+#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
+#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
+#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
+#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
+#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
+#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
+#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
+
+extern bool epapr_paravirt_enabled;
+extern u32 epapr_hypercall_start[];
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+int __init epapr_paravirt_early_init(void);
+#else
+static inline int epapr_paravirt_early_init(void) { return 0; }
+#endif
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only.  Registers that are
+ * initialized before making the hypercall are input/output.  All
+ * input/output registers are represented with "+r".  Output-only registers
+ * are represented with "=r".  Do not specify any unused registers.  The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * ev_int_set_config - configure the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: configuration for this interrupt
+ * @priority: interrupt priority
+ * @destination: destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_config(unsigned int interrupt,
+	uint32_t config, unsigned int priority, uint32_t destination)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r6 __asm__("r6");
+
+	r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
+	r3  = interrupt;
+	r4  = config;
+	r5  = priority;
+	r6  = destination;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
+		: : EV_HCALL_CLOBBERS4
+	);
+
+	return r3;
+}
+
+/**
+ * ev_int_get_config - return the config of the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: returned configuration for this interrupt
+ * @priority: returned interrupt priority
+ * @destination: returned destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_config(unsigned int interrupt,
+	uint32_t *config, unsigned int *priority, uint32_t *destination)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r6 __asm__("r6");
+
+	r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
+	r3 = interrupt;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
+		: : EV_HCALL_CLOBBERS4
+	);
+
+	*config = r4;
+	*priority = r5;
+	*destination = r6;
+
+	return r3;
+}
+
+/**
+ * ev_int_set_mask - sets the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: 0=enable interrupts, 1=disable interrupts
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_mask(unsigned int interrupt,
+	unsigned int mask)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
+	r3 = interrupt;
+	r4 = mask;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	return r3;
+}
+
+/**
+ * ev_int_get_mask - returns the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_mask(unsigned int interrupt,
+	unsigned int *mask)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
+	r3 = interrupt;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "=r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	*mask = r4;
+
+	return r3;
+}
+
+/**
+ * ev_int_eoi - signal the end of interrupt processing
+ * @interrupt: the interrupt number
+ *
+ * This function signals the end of processing for the the specified
+ * interrupt, which must be the interrupt currently in service. By
+ * definition, this is also the highest-priority interrupt.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_eoi(unsigned int interrupt)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = EV_HCALL_TOKEN(EV_INT_EOI);
+	r3 = interrupt;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+/**
+ * ev_byte_channel_send - send characters to a byte stream
+ * @handle: byte stream handle
+ * @count: (input) num of chars to send, (output) num chars sent
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * @buffer must be at least 16 bytes long, because all 16 bytes will be
+ * read from memory into registers, even if count < 16.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_send(unsigned int handle,
+	unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r6 __asm__("r6");
+	register uintptr_t r7 __asm__("r7");
+	register uintptr_t r8 __asm__("r8");
+	const uint32_t *p = (const uint32_t *) buffer;
+
+	r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
+	r3 = handle;
+	r4 = *count;
+	r5 = be32_to_cpu(p[0]);
+	r6 = be32_to_cpu(p[1]);
+	r7 = be32_to_cpu(p[2]);
+	r8 = be32_to_cpu(p[3]);
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3),
+		  "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
+		: : EV_HCALL_CLOBBERS6
+	);
+
+	*count = r4;
+
+	return r3;
+}
+
+/**
+ * ev_byte_channel_receive - fetch characters from a byte channel
+ * @handle: byte channel handle
+ * @count: (input) max num of chars to receive, (output) num chars received
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * The size of @buffer must be at least 16 bytes, even if you request fewer
+ * than 16 characters, because we always write 16 bytes to @buffer.  This is
+ * for performance reasons.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_receive(unsigned int handle,
+	unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r6 __asm__("r6");
+	register uintptr_t r7 __asm__("r7");
+	register uintptr_t r8 __asm__("r8");
+	uint32_t *p = (uint32_t *) buffer;
+
+	r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
+	r3 = handle;
+	r4 = *count;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4),
+		  "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
+		: : EV_HCALL_CLOBBERS6
+	);
+
+	*count = r4;
+	p[0] = cpu_to_be32(r5);
+	p[1] = cpu_to_be32(r6);
+	p[2] = cpu_to_be32(r7);
+	p[3] = cpu_to_be32(r8);
+
+	return r3;
+}
+
+/**
+ * ev_byte_channel_poll - returns the status of the byte channel buffers
+ * @handle: byte channel handle
+ * @rx_count: returned count of bytes in receive queue
+ * @tx_count: returned count of free space in transmit queue
+ *
+ * This function reports the amount of data in the receive queue (i.e. the
+ * number of bytes you can read), and the amount of free space in the transmit
+ * queue (i.e. the number of bytes you can write).
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_poll(unsigned int handle,
+	unsigned int *rx_count,	unsigned int *tx_count)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+
+	r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
+	r3 = handle;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
+		: : EV_HCALL_CLOBBERS3
+	);
+
+	*rx_count = r4;
+	*tx_count = r5;
+
+	return r3;
+}
+
+/**
+ * ev_int_iack - acknowledge an interrupt
+ * @handle: handle to the target interrupt controller
+ * @vector: returned interrupt vector
+ *
+ * If handle is zero, the function returns the next interrupt source
+ * number to be handled irrespective of the hierarchy or cascading
+ * of interrupt controllers. If non-zero, specifies a handle to the
+ * interrupt controller that is the target of the acknowledge.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_iack(unsigned int handle,
+	unsigned int *vector)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = EV_HCALL_TOKEN(EV_INT_IACK);
+	r3 = handle;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "=r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	*vector = r4;
+
+	return r3;
+}
+
+/**
+ * ev_doorbell_send - send a doorbell to another partition
+ * @handle: doorbell send handle
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_doorbell_send(unsigned int handle)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
+	r3 = handle;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+/**
+ * ev_idle -- wait for next interrupt on this core
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_idle(void)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = EV_HCALL_TOKEN(EV_IDLE);
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "=r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+static inline unsigned long epapr_hypercall(unsigned long *in,
+			    unsigned long *out,
+			    unsigned long nr)
+{
+	register unsigned long r0 asm("r0");
+	register unsigned long r3 asm("r3") = in[0];
+	register unsigned long r4 asm("r4") = in[1];
+	register unsigned long r5 asm("r5") = in[2];
+	register unsigned long r6 asm("r6") = in[3];
+	register unsigned long r7 asm("r7") = in[4];
+	register unsigned long r8 asm("r8") = in[5];
+	register unsigned long r9 asm("r9") = in[6];
+	register unsigned long r10 asm("r10") = in[7];
+	register unsigned long r11 asm("r11") = nr;
+	register unsigned long r12 asm("r12");
+
+	asm volatile("bl	epapr_hypercall_start"
+		     : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
+		       "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
+		       "=r"(r12)
+		     : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
+		       "r"(r9), "r"(r10), "r"(r11)
+		     : "memory", "cc", "xer", "ctr", "lr");
+
+	out[0] = r4;
+	out[1] = r5;
+	out[2] = r6;
+	out[3] = r7;
+	out[4] = r8;
+	out[5] = r9;
+	out[6] = r10;
+	out[7] = r11;
+
+	return r3;
+}
+#else
+static unsigned long epapr_hypercall(unsigned long *in,
+				   unsigned long *out,
+				   unsigned long nr)
+{
+	return EV_UNIMPLEMENTED;
+}
+#endif
+
+static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
+{
+	unsigned long in[8] = {0};
+	unsigned long out[8];
+	unsigned long r;
+
+	r = epapr_hypercall(in, out, nr);
+	*r2 = out[0];
+
+	return r;
+}
+
+static inline long epapr_hypercall0(unsigned int nr)
+{
+	unsigned long in[8] = {0};
+	unsigned long out[8];
+
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
+{
+	unsigned long in[8] = {0};
+	unsigned long out[8];
+
+	in[0] = p1;
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
+				    unsigned long p2)
+{
+	unsigned long in[8] = {0};
+	unsigned long out[8];
+
+	in[0] = p1;
+	in[1] = p2;
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
+				    unsigned long p2, unsigned long p3)
+{
+	unsigned long in[8] = {0};
+	unsigned long out[8];
+
+	in[0] = p1;
+	in[1] = p2;
+	in[2] = p3;
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
+				    unsigned long p2, unsigned long p3,
+				    unsigned long p4)
+{
+	unsigned long in[8] = {0};
+	unsigned long out[8];
+
+	in[0] = p1;
+	in[1] = p2;
+	in[2] = p3;
+	in[3] = p4;
+	return epapr_hypercall(in, out, nr);
+}
+#endif /* !__ASSEMBLY__ */
+#endif /* _EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
new file mode 100644
index 0000000..555e22d
--- /dev/null
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -0,0 +1,219 @@
+/*
+ *  Definitions for use by exception code on Book3-E
+ *
+ *  Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_EXCEPTION_64E_H
+#define _ASM_POWERPC_EXCEPTION_64E_H
+
+/*
+ * SPRGs usage an other considerations...
+ *
+ * Since TLB miss and other standard exceptions can be interrupted by
+ * critical exceptions which can themselves be interrupted by machine
+ * checks, and since the two later can themselves cause a TLB miss when
+ * hitting the linear mapping for the kernel stacks, we need to be a bit
+ * creative on how we use SPRGs.
+ *
+ * The base idea is that we have one SRPG reserved for critical and one
+ * for machine check interrupts. Those are used to save a GPR that can
+ * then be used to get the PACA, and store as much context as we need
+ * to save in there. That includes saving the SPRGs used by the TLB miss
+ * handler for linear mapping misses and the associated SRR0/1 due to
+ * the above re-entrancy issue.
+ *
+ * So here's the current usage pattern. It's done regardless of which
+ * SPRGs are user-readable though, thus we might have to change some of
+ * this later. In order to do that more easily, we use special constants
+ * for naming them
+ *
+ * WARNING: Some of these SPRGs are user readable. We need to do something
+ * about it as some point by making sure they can't be used to leak kernel
+ * critical data
+ */
+
+#define PACA_EXGDBELL PACA_EXGEN
+
+/* We are out of SPRGs so we save some things in the PACA. The normal
+ * exception frame is smaller than the CRIT or MC one though
+ */
+#define EX_R1		(0 * 8)
+#define EX_CR		(1 * 8)
+#define EX_R10		(2 * 8)
+#define EX_R11		(3 * 8)
+#define EX_R14		(4 * 8)
+#define EX_R15		(5 * 8)
+
+/*
+ * The TLB miss exception uses different slots.
+ *
+ * The bolted variant uses only the first six fields,
+ * which in combination with pgd and kernel_pgd fits in
+ * one 64-byte cache line.
+ */
+
+#define EX_TLB_R10	( 0 * 8)
+#define EX_TLB_R11	( 1 * 8)
+#define EX_TLB_R14	( 2 * 8)
+#define EX_TLB_R15	( 3 * 8)
+#define EX_TLB_R16	( 4 * 8)
+#define EX_TLB_CR	( 5 * 8)
+#define EX_TLB_R12	( 6 * 8)
+#define EX_TLB_R13	( 7 * 8)
+#define EX_TLB_DEAR	( 8 * 8) /* Level 0 and 2 only */
+#define EX_TLB_ESR	( 9 * 8) /* Level 0 and 2 only */
+#define EX_TLB_SRR0	(10 * 8)
+#define EX_TLB_SRR1	(11 * 8)
+#define EX_TLB_R7	(12 * 8)
+#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
+#define EX_TLB_R8	(13 * 8)
+#define EX_TLB_R9	(14 * 8)
+#define EX_TLB_LR	(15 * 8)
+#define EX_TLB_SIZE	(16 * 8)
+#else
+#define EX_TLB_SIZE	(13 * 8)
+#endif
+
+#define	START_EXCEPTION(label)						\
+	.globl exc_##label##_book3e;					\
+exc_##label##_book3e:
+
+/* TLB miss exception prolog
+ *
+ * This prolog handles re-entrancy (up to 3 levels supported in the PACA
+ * though we currently don't test for overflow). It provides you with a
+ * re-entrancy safe working space of r10...r16 and CR with r12 being used
+ * as the exception area pointer in the PACA for that level of re-entrancy
+ * and r13 containing the PACA pointer.
+ *
+ * SRR0 and SRR1 are saved, but DEAR and ESR are not, since they don't apply
+ * as-is for instruction exceptions. It's up to the actual exception code
+ * to save them as well if required.
+ */
+#define TLB_MISS_PROLOG							    \
+	mtspr	SPRN_SPRG_TLB_SCRATCH,r12;				    \
+	mfspr	r12,SPRN_SPRG_TLB_EXFRAME;				    \
+	std	r10,EX_TLB_R10(r12);					    \
+	mfcr	r10;							    \
+	std	r11,EX_TLB_R11(r12);					    \
+	mfspr	r11,SPRN_SPRG_TLB_SCRATCH;				    \
+	std	r13,EX_TLB_R13(r12);					    \
+	mfspr	r13,SPRN_SPRG_PACA;					    \
+	std	r14,EX_TLB_R14(r12);					    \
+	addi	r14,r12,EX_TLB_SIZE;					    \
+	std	r15,EX_TLB_R15(r12);					    \
+	mfspr	r15,SPRN_SRR1;						    \
+	std	r16,EX_TLB_R16(r12);					    \
+	mfspr	r16,SPRN_SRR0;						    \
+	std	r10,EX_TLB_CR(r12);					    \
+	std	r11,EX_TLB_R12(r12);					    \
+	mtspr	SPRN_SPRG_TLB_EXFRAME,r14;				    \
+	std	r15,EX_TLB_SRR1(r12);					    \
+	std	r16,EX_TLB_SRR0(r12);					    \
+	TLB_MISS_PROLOG_STATS
+
+/* And these are the matching epilogs that restores things
+ *
+ * There are 3 epilogs:
+ *
+ * - SUCCESS       : Unwinds one level
+ * - ERROR         : restore from level 0 and reset
+ * - ERROR_SPECIAL : restore from current level and reset
+ *
+ * Normal errors use ERROR, that is, they restore the initial fault context
+ * and trigger a fault. However, there is a special case for linear mapping
+ * errors. Those should basically never happen, but if they do happen, we
+ * want the error to point out the context that did that linear mapping
+ * fault, not the initial level 0 (basically, we got a bogus PGF or something
+ * like that). For userland errors on the linear mapping, there is no
+ * difference since those are always level 0 anyway
+ */
+
+#define TLB_MISS_RESTORE(freg)						    \
+	ld	r14,EX_TLB_CR(r12);					    \
+	ld	r10,EX_TLB_R10(r12);					    \
+	ld	r15,EX_TLB_SRR0(r12);					    \
+	ld	r16,EX_TLB_SRR1(r12);					    \
+	mtspr	SPRN_SPRG_TLB_EXFRAME,freg;				    \
+	ld	r11,EX_TLB_R11(r12);					    \
+	mtcr	r14;							    \
+	ld	r13,EX_TLB_R13(r12);					    \
+	ld	r14,EX_TLB_R14(r12);					    \
+	mtspr	SPRN_SRR0,r15;						    \
+	ld	r15,EX_TLB_R15(r12);					    \
+	mtspr	SPRN_SRR1,r16;						    \
+	TLB_MISS_RESTORE_STATS						    \
+	ld	r16,EX_TLB_R16(r12);					    \
+	ld	r12,EX_TLB_R12(r12);					    \
+
+#define TLB_MISS_EPILOG_SUCCESS						    \
+	TLB_MISS_RESTORE(r12)
+
+#define TLB_MISS_EPILOG_ERROR						    \
+	addi	r12,r13,PACA_EXTLB;					    \
+	TLB_MISS_RESTORE(r12)
+
+#define TLB_MISS_EPILOG_ERROR_SPECIAL					    \
+	addi	r11,r13,PACA_EXTLB;					    \
+	TLB_MISS_RESTORE(r11)
+
+#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
+#define TLB_MISS_PROLOG_STATS						    \
+	mflr	r10;							    \
+	std	r8,EX_TLB_R8(r12);					    \
+	std	r9,EX_TLB_R9(r12);					    \
+	std	r10,EX_TLB_LR(r12);
+#define TLB_MISS_RESTORE_STATS					            \
+	ld	r16,EX_TLB_LR(r12);					    \
+	ld	r9,EX_TLB_R9(r12);					    \
+	ld	r8,EX_TLB_R8(r12);					    \
+	mtlr	r16;
+#define TLB_MISS_STATS_D(name)						    \
+	addi	r9,r13,MMSTAT_DSTATS+name;				    \
+	bl	tlb_stat_inc;
+#define TLB_MISS_STATS_I(name)						    \
+	addi	r9,r13,MMSTAT_ISTATS+name;				    \
+	bl	tlb_stat_inc;
+#define TLB_MISS_STATS_X(name)						    \
+	ld	r8,PACA_EXTLB+EX_TLB_ESR(r13);				    \
+	cmpdi	cr2,r8,-1;						    \
+	beq	cr2,61f;						    \
+	addi	r9,r13,MMSTAT_DSTATS+name;				    \
+	b	62f;							    \
+61:	addi	r9,r13,MMSTAT_ISTATS+name;				    \
+62:	bl	tlb_stat_inc;
+#define TLB_MISS_STATS_SAVE_INFO					    \
+	std	r14,EX_TLB_ESR(r12);	/* save ESR */
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED					    \
+	std	r14,PACA_EXTLB+EX_TLB_ESR(r13);	/* save ESR */
+#else
+#define TLB_MISS_PROLOG_STATS
+#define TLB_MISS_RESTORE_STATS
+#define TLB_MISS_PROLOG_STATS_BOLTED
+#define TLB_MISS_RESTORE_STATS_BOLTED
+#define TLB_MISS_STATS_D(name)
+#define TLB_MISS_STATS_I(name)
+#define TLB_MISS_STATS_X(name)
+#define TLB_MISS_STATS_Y(name)
+#define TLB_MISS_STATS_SAVE_INFO
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED
+#endif
+
+#define SET_IVOR(vector_number, vector_offset)	\
+	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+	ori	r3,r3,vector_offset@l;		\
+	mtspr	SPRN_IVOR##vector_number,r3;
+
+#define RFI_TO_KERNEL							\
+	rfi
+
+#define RFI_TO_USER							\
+	rfi
+
+#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
+
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
new file mode 100644
index 0000000..a86fedd
--- /dev/null
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -0,0 +1,742 @@
+#ifndef _ASM_POWERPC_EXCEPTION_H
+#define _ASM_POWERPC_EXCEPTION_H
+/*
+ * Extracted from head_64.S
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *
+ *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
+ *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
+ *
+ *  This file contains the low-level support and setup for the
+ *  PowerPC-64 platform, including trap and interrupt dispatch.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+/*
+ * The following macros define the code that appears as
+ * the prologue to each of the exception handlers.  They
+ * are split into two parts to allow a single kernel binary
+ * to be used for pSeries and iSeries.
+ *
+ * We make as much of the exception code common between native
+ * exception handlers (including pSeries LPAR) and iSeries LPAR
+ * implementations as possible.
+ */
+#include <asm/head-64.h>
+#include <asm/feature-fixups.h>
+
+/* PACA save area offsets (exgen, exmc, etc) */
+#define EX_R9		0
+#define EX_R10		8
+#define EX_R11		16
+#define EX_R12		24
+#define EX_R13		32
+#define EX_DAR		40
+#define EX_DSISR	48
+#define EX_CCR		52
+#define EX_CFAR		56
+#define EX_PPR		64
+#if defined(CONFIG_RELOCATABLE)
+#define EX_CTR		72
+#define EX_SIZE		10	/* size in u64 units */
+#else
+#define EX_SIZE		9	/* size in u64 units */
+#endif
+
+/*
+ * maximum recursive depth of MCE exceptions
+ */
+#define MAX_MCE_DEPTH	4
+
+/*
+ * EX_LR is only used in EXSLB and where it does not overlap with EX_DAR
+ * EX_CCR similarly with DSISR, but being 4 byte registers there is a hole
+ * in the save area so it's not necessary to overlap them. Could be used
+ * for future savings though if another 4 byte register was to be saved.
+ */
+#define EX_LR		EX_DAR
+
+/*
+ * EX_R3 is only used by the bad_stack handler. bad_stack reloads and
+ * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap
+ * with EX_DAR.
+ */
+#define EX_R3		EX_DAR
+
+#define STF_ENTRY_BARRIER_SLOT						\
+	STF_ENTRY_BARRIER_FIXUP_SECTION;				\
+	nop;								\
+	nop;								\
+	nop
+
+#define STF_EXIT_BARRIER_SLOT						\
+	STF_EXIT_BARRIER_FIXUP_SECTION;					\
+	nop;								\
+	nop;								\
+	nop;								\
+	nop;								\
+	nop;								\
+	nop
+
+/*
+ * r10 must be free to use, r13 must be paca
+ */
+#define INTERRUPT_TO_KERNEL						\
+	STF_ENTRY_BARRIER_SLOT
+
+/*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+ * The nop instructions allow us to insert one or more instructions to flush the
+ * L1-D cache when returning to userspace or a guest.
+ */
+#define RFI_FLUSH_SLOT							\
+	RFI_FLUSH_FIXUP_SECTION;					\
+	nop;								\
+	nop;								\
+	nop
+
+#define RFI_TO_KERNEL							\
+	rfid
+
+#define RFI_TO_USER							\
+	STF_EXIT_BARRIER_SLOT;						\
+	RFI_FLUSH_SLOT;							\
+	rfid;								\
+	b	rfi_flush_fallback
+
+#define RFI_TO_USER_OR_KERNEL						\
+	STF_EXIT_BARRIER_SLOT;						\
+	RFI_FLUSH_SLOT;							\
+	rfid;								\
+	b	rfi_flush_fallback
+
+#define RFI_TO_GUEST							\
+	STF_EXIT_BARRIER_SLOT;						\
+	RFI_FLUSH_SLOT;							\
+	rfid;								\
+	b	rfi_flush_fallback
+
+#define HRFI_TO_KERNEL							\
+	hrfid
+
+#define HRFI_TO_USER							\
+	STF_EXIT_BARRIER_SLOT;						\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
+#define HRFI_TO_USER_OR_KERNEL						\
+	STF_EXIT_BARRIER_SLOT;						\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
+#define HRFI_TO_GUEST							\
+	STF_EXIT_BARRIER_SLOT;						\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
+#define HRFI_TO_UNKNOWN							\
+	STF_EXIT_BARRIER_SLOT;						\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
+#ifdef CONFIG_RELOCATABLE
+#define __EXCEPTION_PROLOG_2_RELON(label, h)				\
+	mfspr	r11,SPRN_##h##SRR0;	/* save SRR0 */			\
+	LOAD_HANDLER(r12,label);					\
+	mtctr	r12;							\
+	mfspr	r12,SPRN_##h##SRR1;	/* and SRR1 */			\
+	li	r10,MSR_RI;						\
+	mtmsrd 	r10,1;			/* Set RI (EE=0) */		\
+	bctr;
+#else
+/* If not relocatable, we can jump directly -- and save messing with LR */
+#define __EXCEPTION_PROLOG_2_RELON(label, h)				\
+	mfspr	r11,SPRN_##h##SRR0;	/* save SRR0 */			\
+	mfspr	r12,SPRN_##h##SRR1;	/* and SRR1 */			\
+	li	r10,MSR_RI;						\
+	mtmsrd 	r10,1;			/* Set RI (EE=0) */		\
+	b	label;
+#endif
+#define EXCEPTION_PROLOG_2_RELON(label, h)				\
+	__EXCEPTION_PROLOG_2_RELON(label, h)
+
+/*
+ * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to
+ * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case
+ * EXCEPTION_PROLOG_2_RELON will be using LR.
+ */
+#define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec)		\
+	SET_SCRATCH0(r13);		/* save r13 */			\
+	EXCEPTION_PROLOG_0(area);					\
+	EXCEPTION_PROLOG_1(area, extra, vec);				\
+	EXCEPTION_PROLOG_2_RELON(label, h)
+
+/*
+ * We're short on space and time in the exception prolog, so we can't
+ * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
+ * Instead we get the base of the kernel from paca->kernelbase and or in the low
+ * part of label. This requires that the label be within 64KB of kernelbase, and
+ * that kernelbase be 64K aligned.
+ */
+#define LOAD_HANDLER(reg, label)					\
+	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
+	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
+
+#define __LOAD_HANDLER(reg, label)					\
+	ld	reg,PACAKBASE(r13);					\
+	ori	reg,reg,(ABS_ADDR(label))@l;
+
+/*
+ * Branches from unrelocated code (e.g., interrupts) to labels outside
+ * head-y require >64K offsets.
+ */
+#define __LOAD_FAR_HANDLER(reg, label)					\
+	ld	reg,PACAKBASE(r13);					\
+	ori	reg,reg,(ABS_ADDR(label))@l;				\
+	addis	reg,reg,(ABS_ADDR(label))@h;
+
+/* Exception register prefixes */
+#define EXC_HV	H
+#define EXC_STD
+
+#if defined(CONFIG_RELOCATABLE)
+/*
+ * If we support interrupts with relocation on AND we're a relocatable kernel,
+ * we need to use CTR to get to the 2nd level handler.  So, save/restore it
+ * when required.
+ */
+#define SAVE_CTR(reg, area)	mfctr	reg ; 	std	reg,area+EX_CTR(r13)
+#define GET_CTR(reg, area) 			ld	reg,area+EX_CTR(r13)
+#define RESTORE_CTR(reg, area)	ld	reg,area+EX_CTR(r13) ; mtctr reg
+#else
+/* ...else CTR is unused and in register. */
+#define SAVE_CTR(reg, area)
+#define GET_CTR(reg, area) 	mfctr	reg
+#define RESTORE_CTR(reg, area)
+#endif
+
+/*
+ * PPR save/restore macros used in exceptions_64s.S  
+ * Used for P7 or later processors
+ */
+#define SAVE_PPR(area, ra, rb)						\
+BEGIN_FTR_SECTION_NESTED(940)						\
+	ld	ra,PACACURRENT(r13);					\
+	ld	rb,area+EX_PPR(r13);	/* Read PPR from paca */	\
+	std	rb,TASKTHREADPPR(ra);					\
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
+
+#define RESTORE_PPR_PACA(area, ra)					\
+BEGIN_FTR_SECTION_NESTED(941)						\
+	ld	ra,area+EX_PPR(r13);					\
+	mtspr	SPRN_PPR,ra;						\
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
+
+/*
+ * Get an SPR into a register if the CPU has the given feature
+ */
+#define OPT_GET_SPR(ra, spr, ftr)					\
+BEGIN_FTR_SECTION_NESTED(943)						\
+	mfspr	ra,spr;							\
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Set an SPR from a register if the CPU has the given feature
+ */
+#define OPT_SET_SPR(ra, spr, ftr)					\
+BEGIN_FTR_SECTION_NESTED(943)						\
+	mtspr	spr,ra;							\
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Save a register to the PACA if the CPU has the given feature
+ */
+#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr)				\
+BEGIN_FTR_SECTION_NESTED(943)						\
+	std	ra,offset(r13);						\
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+#define EXCEPTION_PROLOG_0(area)					\
+	GET_PACA(r13);							\
+	std	r9,area+EX_R9(r13);	/* save r9 */			\
+	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);			\
+	HMT_MEDIUM;							\
+	std	r10,area+EX_R10(r13);	/* save r10 - r12 */		\
+	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+
+#define __EXCEPTION_PROLOG_1_PRE(area)					\
+	OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);		\
+	OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);		\
+	INTERRUPT_TO_KERNEL;						\
+	SAVE_CTR(r10, area);						\
+	mfcr	r9;
+
+#define __EXCEPTION_PROLOG_1_POST(area)					\
+	std	r11,area+EX_R11(r13);					\
+	std	r12,area+EX_R12(r13);					\
+	GET_SCRATCH0(r10);						\
+	std	r10,area+EX_R13(r13)
+
+/*
+ * This version of the EXCEPTION_PROLOG_1 will carry
+ * addition parameter called "bitmask" to support
+ * checking of the interrupt maskable level in the SOFTEN_TEST.
+ * Intended to be used in MASKABLE_EXCPETION_* macros.
+ */
+#define MASKABLE_EXCEPTION_PROLOG_1(area, extra, vec, bitmask)			\
+	__EXCEPTION_PROLOG_1_PRE(area);					\
+	extra(vec, bitmask);						\
+	__EXCEPTION_PROLOG_1_POST(area);
+
+/*
+ * This version of the EXCEPTION_PROLOG_1 is intended
+ * to be used in STD_EXCEPTION* macros
+ */
+#define _EXCEPTION_PROLOG_1(area, extra, vec)				\
+	__EXCEPTION_PROLOG_1_PRE(area);					\
+	extra(vec);							\
+	__EXCEPTION_PROLOG_1_POST(area);
+
+#define EXCEPTION_PROLOG_1(area, extra, vec)				\
+	_EXCEPTION_PROLOG_1(area, extra, vec)
+
+#define __EXCEPTION_PROLOG_2(label, h)					\
+	ld	r10,PACAKMSR(r13);	/* get MSR value for kernel */	\
+	mfspr	r11,SPRN_##h##SRR0;	/* save SRR0 */			\
+	LOAD_HANDLER(r12,label)						\
+	mtspr	SPRN_##h##SRR0,r12;					\
+	mfspr	r12,SPRN_##h##SRR1;	/* and SRR1 */			\
+	mtspr	SPRN_##h##SRR1,r10;					\
+	h##RFI_TO_KERNEL;						\
+	b	.	/* prevent speculative execution */
+#define EXCEPTION_PROLOG_2(label, h)					\
+	__EXCEPTION_PROLOG_2(label, h)
+
+/* _NORI variant keeps MSR_RI clear */
+#define __EXCEPTION_PROLOG_2_NORI(label, h)				\
+	ld	r10,PACAKMSR(r13);	/* get MSR value for kernel */	\
+	xori	r10,r10,MSR_RI;		/* Clear MSR_RI */		\
+	mfspr	r11,SPRN_##h##SRR0;	/* save SRR0 */			\
+	LOAD_HANDLER(r12,label)						\
+	mtspr	SPRN_##h##SRR0,r12;					\
+	mfspr	r12,SPRN_##h##SRR1;	/* and SRR1 */			\
+	mtspr	SPRN_##h##SRR1,r10;					\
+	h##RFI_TO_KERNEL;						\
+	b	.	/* prevent speculative execution */
+
+#define EXCEPTION_PROLOG_2_NORI(label, h)				\
+	__EXCEPTION_PROLOG_2_NORI(label, h)
+
+#define EXCEPTION_PROLOG(area, label, h, extra, vec)			\
+	SET_SCRATCH0(r13);		/* save r13 */			\
+	EXCEPTION_PROLOG_0(area);					\
+	EXCEPTION_PROLOG_1(area, extra, vec);				\
+	EXCEPTION_PROLOG_2(label, h);
+
+#define __KVMTEST(h, n)							\
+	lbz	r10,HSTATE_IN_GUEST(r13);				\
+	cmpwi	r10,0;							\
+	bne	do_kvm_##h##n
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * If hv is possible, interrupts come into to the hv version
+ * of the kvmppc_interrupt code, which then jumps to the PR handler,
+ * kvmppc_interrupt_pr, if the guest is a PR guest.
+ */
+#define kvmppc_interrupt kvmppc_interrupt_hv
+#else
+#define kvmppc_interrupt kvmppc_interrupt_pr
+#endif
+
+/*
+ * Branch to label using its 0xC000 address. This results in instruction
+ * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
+ * on using mtmsr rather than rfid.
+ *
+ * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
+ * load KBASE for a slight optimisation.
+ */
+#define BRANCH_TO_C000(reg, label)					\
+	__LOAD_HANDLER(reg, label);					\
+	mtctr	reg;							\
+	bctr
+
+#ifdef CONFIG_RELOCATABLE
+#define BRANCH_TO_COMMON(reg, label)					\
+	__LOAD_HANDLER(reg, label);					\
+	mtctr	reg;							\
+	bctr
+
+#define BRANCH_LINK_TO_FAR(label)					\
+	__LOAD_FAR_HANDLER(r12, label);					\
+	mtctr	r12;							\
+	bctrl
+
+/*
+ * KVM requires __LOAD_FAR_HANDLER.
+ *
+ * __BRANCH_TO_KVM_EXIT branches are also a special case because they
+ * explicitly use r9 then reload it from PACA before branching. Hence
+ * the double-underscore.
+ */
+#define __BRANCH_TO_KVM_EXIT(area, label)				\
+	mfctr	r9;							\
+	std	r9,HSTATE_SCRATCH1(r13);				\
+	__LOAD_FAR_HANDLER(r9, label);					\
+	mtctr	r9;							\
+	ld	r9,area+EX_R9(r13);					\
+	bctr
+
+#else
+#define BRANCH_TO_COMMON(reg, label)					\
+	b	label
+
+#define BRANCH_LINK_TO_FAR(label)					\
+	bl	label
+
+#define __BRANCH_TO_KVM_EXIT(area, label)				\
+	ld	r9,area+EX_R9(r13);					\
+	b	label
+
+#endif
+
+/* Do not enable RI */
+#define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec)		\
+	EXCEPTION_PROLOG_0(area);					\
+	EXCEPTION_PROLOG_1(area, extra, vec);				\
+	EXCEPTION_PROLOG_2_NORI(label, h);
+
+
+#define __KVM_HANDLER(area, h, n)					\
+	BEGIN_FTR_SECTION_NESTED(947)					\
+	ld	r10,area+EX_CFAR(r13);					\
+	std	r10,HSTATE_CFAR(r13);					\
+	END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947);		\
+	BEGIN_FTR_SECTION_NESTED(948)					\
+	ld	r10,area+EX_PPR(r13);					\
+	std	r10,HSTATE_PPR(r13);					\
+	END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);	\
+	ld	r10,area+EX_R10(r13);					\
+	std	r12,HSTATE_SCRATCH0(r13);				\
+	sldi	r12,r9,32;						\
+	ori	r12,r12,(n);						\
+	/* This reloads r9 before branching to kvmppc_interrupt */	\
+	__BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt)
+
+#define __KVM_HANDLER_SKIP(area, h, n)					\
+	cmpwi	r10,KVM_GUEST_MODE_SKIP;				\
+	beq	89f;							\
+	BEGIN_FTR_SECTION_NESTED(948)					\
+	ld	r10,area+EX_PPR(r13);					\
+	std	r10,HSTATE_PPR(r13);					\
+	END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);	\
+	ld	r10,area+EX_R10(r13);					\
+	std	r12,HSTATE_SCRATCH0(r13);				\
+	sldi	r12,r9,32;						\
+	ori	r12,r12,(n);						\
+	/* This reloads r9 before branching to kvmppc_interrupt */	\
+	__BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt);			\
+89:	mtocrf	0x80,r9;						\
+	ld	r9,area+EX_R9(r13);					\
+	ld	r10,area+EX_R10(r13);					\
+	b	kvmppc_skip_##h##interrupt
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#define KVMTEST(h, n)			__KVMTEST(h, n)
+#define KVM_HANDLER(area, h, n)		__KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n)	__KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST(h, n)
+#define KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n)
+#endif
+
+#define NOTEST(n)
+
+#define EXCEPTION_PROLOG_COMMON_1()					   \
+	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
+	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
+	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
+	std	r10,0(r1);		/* make stack chain pointer	*/ \
+	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
+	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
+
+
+/*
+ * The common exception prolog is used for all except a few exceptions
+ * such as a segment miss on a kernel address.  We have to be prepared
+ * to take another exception from the point where we first touch the
+ * kernel stack onwards.
+ *
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
+ * SRR1, and relocation is on.
+ */
+#define EXCEPTION_PROLOG_COMMON(n, area)				   \
+	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
+	mr	r10,r1;			/* Save r1			*/ \
+	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
+	beq-	1f;							   \
+	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
+1:	cmpdi	cr1,r1,-INT_FRAME_SIZE;	/* check if r1 is in userspace	*/ \
+	blt+	cr1,3f;			/* abort if it is		*/ \
+	li	r1,(n);			/* will be reloaded later	*/ \
+	sth	r1,PACA_TRAP_SAVE(r13);					   \
+	std	r3,area+EX_R3(r13);					   \
+	addi	r3,r13,area;		/* r3 -> where regs are saved*/	   \
+	RESTORE_CTR(r1, area);						   \
+	b	bad_stack;						   \
+3:	EXCEPTION_PROLOG_COMMON_1();					   \
+	beq	4f;			/* if from kernel mode		*/ \
+	ACCOUNT_CPU_USER_ENTRY(r13, r9, r10);				   \
+	SAVE_PPR(area, r9, r10);					   \
+4:	EXCEPTION_PROLOG_COMMON_2(area)					   \
+	EXCEPTION_PROLOG_COMMON_3(n)					   \
+	ACCOUNT_STOLEN_TIME
+
+/* Save original regs values from save area to stack frame. */
+#define EXCEPTION_PROLOG_COMMON_2(area)					   \
+	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
+	ld	r10,area+EX_R10(r13);					   \
+	std	r9,GPR9(r1);						   \
+	std	r10,GPR10(r1);						   \
+	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
+	ld	r10,area+EX_R12(r13);					   \
+	ld	r11,area+EX_R13(r13);					   \
+	std	r9,GPR11(r1);						   \
+	std	r10,GPR12(r1);						   \
+	std	r11,GPR13(r1);						   \
+	BEGIN_FTR_SECTION_NESTED(66);					   \
+	ld	r10,area+EX_CFAR(r13);					   \
+	std	r10,ORIG_GPR3(r1);					   \
+	END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66);		   \
+	GET_CTR(r10, area);						   \
+	std	r10,_CTR(r1);
+
+#define EXCEPTION_PROLOG_COMMON_3(n)					   \
+	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
+	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe   */ \
+	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
+	mflr	r9;			/* Get LR, later save to stack	*/ \
+	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
+	std	r9,_LINK(r1);						   \
+	lbz	r10,PACAIRQSOFTMASK(r13);				   \
+	mfspr	r11,SPRN_XER;		/* save XER in stackframe	*/ \
+	std	r10,SOFTE(r1);						   \
+	std	r11,_XER(r1);						   \
+	li	r9,(n)+1;						   \
+	std	r9,_TRAP(r1);		/* set trap number		*/ \
+	li	r10,0;							   \
+	ld	r11,exception_marker@toc(r2);				   \
+	std	r10,RESULT(r1);		/* clear regs->result		*/ \
+	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
+
+/*
+ * Exception vectors.
+ */
+#define STD_EXCEPTION(vec, label)				\
+	EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec);
+
+/* Version of above for when we have to branch out-of-line */
+#define __OOL_EXCEPTION(vec, label, hdlr)			\
+	SET_SCRATCH0(r13)					\
+	EXCEPTION_PROLOG_0(PACA_EXGEN)				\
+	b hdlr;
+
+#define STD_EXCEPTION_OOL(vec, label)				\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec);	\
+	EXCEPTION_PROLOG_2(label, EXC_STD)
+
+#define STD_EXCEPTION_HV(loc, vec, label)			\
+	EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
+
+#define STD_EXCEPTION_HV_OOL(vec, label)			\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec);	\
+	EXCEPTION_PROLOG_2(label, EXC_HV)
+
+#define STD_RELON_EXCEPTION(loc, vec, label)		\
+	/* No guest interrupts come through here */	\
+	EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
+
+#define STD_RELON_EXCEPTION_OOL(vec, label)			\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec);		\
+	EXCEPTION_PROLOG_2_RELON(label, EXC_STD)
+
+#define STD_RELON_EXCEPTION_HV(loc, vec, label)		\
+	EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
+
+#define STD_RELON_EXCEPTION_HV_OOL(vec, label)			\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec);	\
+	EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
+
+/* This associate vector numbers with bits in paca->irq_happened */
+#define SOFTEN_VALUE_0x500	PACA_IRQ_EE
+#define SOFTEN_VALUE_0x900	PACA_IRQ_DEC
+#define SOFTEN_VALUE_0x980	PACA_IRQ_DEC
+#define SOFTEN_VALUE_0xa00	PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe80	PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe60	PACA_IRQ_HMI
+#define SOFTEN_VALUE_0xea0	PACA_IRQ_EE
+#define SOFTEN_VALUE_0xf00	PACA_IRQ_PMI
+
+#define __SOFTEN_TEST(h, vec, bitmask)					\
+	lbz	r10,PACAIRQSOFTMASK(r13);				\
+	andi.	r10,r10,bitmask;					\
+	li	r10,SOFTEN_VALUE_##vec;					\
+	bne	masked_##h##interrupt
+
+#define _SOFTEN_TEST(h, vec, bitmask)	__SOFTEN_TEST(h, vec, bitmask)
+
+#define SOFTEN_TEST_PR(vec, bitmask)					\
+	KVMTEST(EXC_STD, vec);						\
+	_SOFTEN_TEST(EXC_STD, vec, bitmask)
+
+#define SOFTEN_TEST_HV(vec, bitmask)					\
+	KVMTEST(EXC_HV, vec);						\
+	_SOFTEN_TEST(EXC_HV, vec, bitmask)
+
+#define KVMTEST_PR(vec)							\
+	KVMTEST(EXC_STD, vec)
+
+#define KVMTEST_HV(vec)							\
+	KVMTEST(EXC_HV, vec)
+
+#define SOFTEN_NOTEST_PR(vec, bitmask)	_SOFTEN_TEST(EXC_STD, vec, bitmask)
+#define SOFTEN_NOTEST_HV(vec, bitmask)	_SOFTEN_TEST(EXC_HV, vec, bitmask)
+
+#define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask)		\
+	SET_SCRATCH0(r13);    /* save r13 */				\
+	EXCEPTION_PROLOG_0(PACA_EXGEN);					\
+	MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask);	\
+	EXCEPTION_PROLOG_2(label, h);
+
+#define MASKABLE_EXCEPTION(vec, label, bitmask)				\
+	__MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask)
+
+#define MASKABLE_EXCEPTION_OOL(vec, label, bitmask)			\
+	MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\
+	EXCEPTION_PROLOG_2(label, EXC_STD)
+
+#define MASKABLE_EXCEPTION_HV(vec, label, bitmask)			\
+	__MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
+
+#define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask)			\
+	MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
+	EXCEPTION_PROLOG_2(label, EXC_HV)
+
+#define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask)	\
+	SET_SCRATCH0(r13);    /* save r13 */				\
+	EXCEPTION_PROLOG_0(PACA_EXGEN);					\
+	MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask);	\
+	EXCEPTION_PROLOG_2_RELON(label, h)
+
+#define MASKABLE_RELON_EXCEPTION(vec, label, bitmask)			\
+	__MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask)
+
+#define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask)		\
+	MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
+	EXCEPTION_PROLOG_2(label, EXC_STD);
+
+#define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask)		\
+	__MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
+
+#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask)		\
+	MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
+	EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
+
+/*
+ * Our exception common code can be passed various "additions"
+ * to specify the behaviour of interrupts, whether to kick the
+ * runlatch, etc...
+ */
+
+/*
+ * This addition reconciles our actual IRQ state with the various software
+ * flags that track it. This may call C code.
+ */
+#define ADD_RECONCILE	RECONCILE_IRQ_STATE(r10,r11)
+
+#define ADD_NVGPRS				\
+	bl	save_nvgprs
+
+#define RUNLATCH_ON				\
+BEGIN_FTR_SECTION				\
+	CURRENT_THREAD_INFO(r3, r1);		\
+	ld	r4,TI_LOCAL_FLAGS(r3);		\
+	andi.	r0,r4,_TLF_RUNLATCH;		\
+	beql	ppc64_runlatch_on_trampoline;	\
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
+#define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
+	EXCEPTION_PROLOG_COMMON(trap, area);			\
+	/* Volatile regs are potentially clobbered here */	\
+	additions;						\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
+	bl	hdlr;						\
+	b	ret
+
+/*
+ * Exception where stack is already set in r1, r1 is saved in r10, and it
+ * continues rather than returns.
+ */
+#define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \
+	EXCEPTION_PROLOG_COMMON_1();				\
+	EXCEPTION_PROLOG_COMMON_2(area);			\
+	EXCEPTION_PROLOG_COMMON_3(trap);			\
+	/* Volatile regs are potentially clobbered here */	\
+	additions;						\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
+	bl	hdlr
+
+#define STD_EXCEPTION_COMMON(trap, label, hdlr)			\
+	EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr,		\
+		ret_from_except, ADD_NVGPRS;ADD_RECONCILE)
+
+/*
+ * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
+ * in the idle task and therefore need the special idle handling
+ * (finish nap and runlatch)
+ */
+#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr)		\
+	EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr,		\
+		ret_from_except_lite, FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON)
+
+/*
+ * When the idle code in power4_idle puts the CPU into NAP mode,
+ * it has to do so in a loop, and relies on the external interrupt
+ * and decrementer interrupt entry code to get it out of the loop.
+ * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
+ * to signal that it is in the loop and needs help to get out.
+ */
+#ifdef CONFIG_PPC_970_NAP
+#define FINISH_NAP				\
+BEGIN_FTR_SECTION				\
+	CURRENT_THREAD_INFO(r11, r1);		\
+	ld	r9,TI_LOCAL_FLAGS(r11);		\
+	andi.	r10,r9,_TLF_NAPPING;		\
+	bnel	power4_fixup_nap;		\
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+#else
+#define FINISH_NAP
+#endif
+
+#endif	/* _ASM_POWERPC_EXCEPTION_H */
diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
new file mode 100644
index 0000000..92cac48
--- /dev/null
+++ b/arch/powerpc/include/asm/exec.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_EXEC_H
+#define _ASM_POWERPC_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_POWERPC_EXEC_H */
diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h
new file mode 100644
index 0000000..eb91b2d
--- /dev/null
+++ b/arch/powerpc/include/asm/extable.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARCH_POWERPC_EXTABLE_H
+#define _ARCH_POWERPC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
+ *
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path.  This means when everything is well, we don't even
+ * have to jump over them.  Further, they do not intrude on our cache or tlb
+ * entries.
+ */
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+struct exception_table_entry {
+	int insn;
+	int fixup;
+};
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+	return (unsigned long)&x->fixup + x->fixup;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
new file mode 100644
index 0000000..1e7a335
--- /dev/null
+++ b/arch/powerpc/include/asm/fadump.h
@@ -0,0 +1,218 @@
+/*
+ * Firmware Assisted dump header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2011 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __PPC64_FA_DUMP_H__
+#define __PPC64_FA_DUMP_H__
+
+#ifdef CONFIG_FA_DUMP
+
+/*
+ * The RMA region will be saved for later dumping when kernel crashes.
+ * RMA is Real Mode Area, the first block of logical memory address owned
+ * by logical partition, containing the storage that may be accessed with
+ * translate off.
+ */
+#define RMA_START	0x0
+#define RMA_END		(ppc64_rma_size)
+
+/*
+ * On some Power systems where RMO is 128MB, it still requires minimum of
+ * 256MB for kernel to boot successfully. When kdump infrastructure is
+ * configured to save vmcore over network, we run into OOM issue while
+ * loading modules related to network setup. Hence we need aditional 64M
+ * of memory to avoid OOM issue.
+ */
+#define MIN_BOOT_MEM	(((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \
+			+ (0x1UL << 26))
+
+/* The upper limit percentage for user specified boot memory size (25%) */
+#define MAX_BOOT_MEM_RATIO			4
+
+#define memblock_num_regions(memblock_type)	(memblock.memblock_type.cnt)
+
+/* Firmware provided dump sections */
+#define FADUMP_CPU_STATE_DATA	0x0001
+#define FADUMP_HPTE_REGION	0x0002
+#define FADUMP_REAL_MODE_REGION	0x0011
+
+/* Dump request flag */
+#define FADUMP_REQUEST_FLAG	0x00000001
+
+/* FAD commands */
+#define FADUMP_REGISTER		1
+#define FADUMP_UNREGISTER	2
+#define FADUMP_INVALIDATE	3
+
+/* Dump status flag */
+#define FADUMP_ERROR_FLAG	0x2000
+
+#define FADUMP_CPU_ID_MASK	((1UL << 32) - 1)
+
+#define CPU_UNKNOWN		(~((u32)0))
+
+/* Utility macros */
+#define SKIP_TO_NEXT_CPU(reg_entry)					\
+({									\
+	while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND"))	\
+		reg_entry++;						\
+	reg_entry++;							\
+})
+
+extern int crashing_cpu;
+
+/* Kernel Dump section info */
+struct fadump_section {
+	__be32	request_flag;
+	__be16	source_data_type;
+	__be16	error_flags;
+	__be64	source_address;
+	__be64	source_len;
+	__be64	bytes_dumped;
+	__be64	destination_address;
+};
+
+/* ibm,configure-kernel-dump header. */
+struct fadump_section_header {
+	__be32	dump_format_version;
+	__be16	dump_num_sections;
+	__be16	dump_status_flag;
+	__be32	offset_first_dump_section;
+
+	/* Fields for disk dump option. */
+	__be32	dd_block_size;
+	__be64	dd_block_offset;
+	__be64	dd_num_blocks;
+	__be32	dd_offset_disk_path;
+
+	/* Maximum time allowed to prevent an automatic dump-reboot. */
+	__be32	max_time_auto;
+};
+
+/*
+ * Firmware Assisted dump memory structure. This structure is required for
+ * registering future kernel dump with power firmware through rtas call.
+ *
+ * No disk dump option. Hence disk dump path string section is not included.
+ */
+struct fadump_mem_struct {
+	struct fadump_section_header	header;
+
+	/* Kernel dump sections */
+	struct fadump_section		cpu_state_data;
+	struct fadump_section		hpte_region;
+	struct fadump_section		rmr_region;
+};
+
+/* Firmware-assisted dump configuration details. */
+struct fw_dump {
+	unsigned long	cpu_state_data_size;
+	unsigned long	hpte_region_size;
+	unsigned long	boot_memory_size;
+	unsigned long	reserve_dump_area_start;
+	unsigned long	reserve_dump_area_size;
+	/* cmd line option during boot */
+	unsigned long	reserve_bootvar;
+
+	unsigned long	fadumphdr_addr;
+	unsigned long	cpu_notes_buf;
+	unsigned long	cpu_notes_buf_size;
+
+	int		ibm_configure_kernel_dump;
+
+	unsigned long	fadump_enabled:1;
+	unsigned long	fadump_supported:1;
+	unsigned long	dump_active:1;
+	unsigned long	dump_registered:1;
+};
+
+/*
+ * Copy the ascii values for first 8 characters from a string into u64
+ * variable at their respective indexes.
+ * e.g.
+ *  The string "FADMPINF" will be converted into 0x4641444d50494e46
+ */
+static inline u64 str_to_u64(const char *str)
+{
+	u64 val = 0;
+	int i;
+
+	for (i = 0; i < sizeof(val); i++)
+		val = (*str) ? (val << 8) | *str++ : val << 8;
+	return val;
+}
+#define STR_TO_HEX(x)	str_to_u64(x)
+#define REG_ID(x)	str_to_u64(x)
+
+#define FADUMP_CRASH_INFO_MAGIC		STR_TO_HEX("FADMPINF")
+#define REGSAVE_AREA_MAGIC		STR_TO_HEX("REGSAVE")
+
+/* The firmware-assisted dump format.
+ *
+ * The register save area is an area in the partition's memory used to preserve
+ * the register contents (CPU state data) for the active CPUs during a firmware
+ * assisted dump. The dump format contains register save area header followed
+ * by register entries. Each list of registers for a CPU starts with
+ * "CPUSTRT" and ends with "CPUEND".
+ */
+
+/* Register save area header. */
+struct fadump_reg_save_area_header {
+	__be64		magic_number;
+	__be32		version;
+	__be32		num_cpu_offset;
+};
+
+/* Register entry. */
+struct fadump_reg_entry {
+	__be64		reg_id;
+	__be64		reg_value;
+};
+
+/* fadump crash info structure */
+struct fadump_crash_info_header {
+	u64		magic_number;
+	u64		elfcorehdr_addr;
+	u32		crashing_cpu;
+	struct pt_regs	regs;
+	struct cpumask	online_mask;
+};
+
+struct fad_crash_memory_ranges {
+	unsigned long long	base;
+	unsigned long long	size;
+};
+
+extern int is_fadump_boot_memory_area(u64 addr, ulong size);
+extern int early_init_dt_scan_fw_dump(unsigned long node,
+		const char *uname, int depth, void *data);
+extern int fadump_reserve_mem(void);
+extern int setup_fadump(void);
+extern int is_fadump_active(void);
+extern int should_fadump_crash(void);
+extern void crash_fadump(struct pt_regs *, const char *);
+extern void fadump_cleanup(void);
+
+#else	/* CONFIG_FA_DUMP */
+static inline int is_fadump_active(void) { return 0; }
+static inline int should_fadump_crash(void) { return 0; }
+static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
+#endif
+#endif
diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/fb.h
new file mode 100644
index 0000000..6541ab7
--- /dev/null
+++ b/arch/powerpc/include/asm/fb.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
+						 vma->vm_end - vma->vm_start,
+						 vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
new file mode 100644
index 0000000..33b6f9c
--- /dev/null
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -0,0 +1,238 @@
+#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H
+#define __ASM_POWERPC_FEATURE_FIXUPS_H
+
+#include <asm/asm-const.h>
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Feature section common macros
+ *
+ * Note that the entries now contain offsets between the table entry
+ * and the code rather than absolute code pointers in order to be
+ * useable with the vdso shared library. There is also an assumption
+ * that values will be negative, that is, the fixup table has to be
+ * located after the code it fixes up.
+ */
+#if defined(CONFIG_PPC64) && !defined(__powerpc64__)
+/* 64 bits kernel, 32 bits code (ie. vdso32) */
+#define FTR_ENTRY_LONG		.8byte
+#define FTR_ENTRY_OFFSET	.long 0xffffffff; .long
+#elif defined(CONFIG_PPC64)
+#define FTR_ENTRY_LONG		.8byte
+#define FTR_ENTRY_OFFSET	.8byte
+#else
+#define FTR_ENTRY_LONG		.long
+#define FTR_ENTRY_OFFSET	.long
+#endif
+
+#define START_FTR_SECTION(label)	label##1:
+
+#define FTR_SECTION_ELSE_NESTED(label)			\
+label##2:						\
+	.pushsection __ftr_alt_##label,"a";		\
+	.align 2;					\
+label##3:
+
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)		\
+label##4:							\
+	.popsection;						\
+	.pushsection sect,"a";					\
+	.align 3;						\
+label##5:							\
+	FTR_ENTRY_LONG msk;					\
+	FTR_ENTRY_LONG val;					\
+	FTR_ENTRY_OFFSET label##1b-label##5b;			\
+	FTR_ENTRY_OFFSET label##2b-label##5b;			\
+	FTR_ENTRY_OFFSET label##3b-label##5b;			\
+	FTR_ENTRY_OFFSET label##4b-label##5b;			\
+	.ifgt (label##4b- label##3b)-(label##2b- label##1b);	\
+	.error "Feature section else case larger than body";	\
+	.endif;							\
+	.popsection;
+
+
+/* CPU feature dependent sections */
+#define BEGIN_FTR_SECTION_NESTED(label)	START_FTR_SECTION(label)
+#define BEGIN_FTR_SECTION		START_FTR_SECTION(97)
+
+#define END_FTR_SECTION_NESTED(msk, val, label) 		\
+	FTR_SECTION_ELSE_NESTED(label)				\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
+
+#define END_FTR_SECTION(msk, val)		\
+	END_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_FTR_SECTION_NESTED_IFSET(msk, label)	\
+	END_FTR_SECTION_NESTED((msk), (msk), label)
+
+#define END_FTR_SECTION_IFSET(msk)	END_FTR_SECTION((msk), (msk))
+#define END_FTR_SECTION_IFCLR(msk)	END_FTR_SECTION((msk), 0)
+
+/* CPU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define FTR_SECTION_ELSE	FTR_SECTION_ELSE_NESTED(97)
+#define ALT_FTR_SECTION_END_NESTED(msk, val, label)	\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
+#define ALT_FTR_SECTION_END_NESTED_IFSET(msk, label)	\
+	ALT_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_FTR_SECTION_END_NESTED_IFCLR(msk, label)	\
+	ALT_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_FTR_SECTION_END(msk, val)	\
+	ALT_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_FTR_SECTION_END_IFSET(msk)	\
+	ALT_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_FTR_SECTION_END_IFCLR(msk)	\
+	ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+/* MMU feature dependent sections */
+#define BEGIN_MMU_FTR_SECTION_NESTED(label)	START_FTR_SECTION(label)
+#define BEGIN_MMU_FTR_SECTION			START_FTR_SECTION(97)
+
+#define END_MMU_FTR_SECTION_NESTED(msk, val, label) 		\
+	FTR_SECTION_ELSE_NESTED(label)				\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+
+#define END_MMU_FTR_SECTION(msk, val)		\
+	END_MMU_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_MMU_FTR_SECTION_IFSET(msk)	END_MMU_FTR_SECTION((msk), (msk))
+#define END_MMU_FTR_SECTION_IFCLR(msk)	END_MMU_FTR_SECTION((msk), 0)
+
+/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define MMU_FTR_SECTION_ELSE_NESTED(label)	FTR_SECTION_ELSE_NESTED(label)
+#define MMU_FTR_SECTION_ELSE	MMU_FTR_SECTION_ELSE_NESTED(97)
+#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label)	\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label)	\
+	ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label)	\
+	ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_MMU_FTR_SECTION_END(msk, val)	\
+	ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_MMU_FTR_SECTION_END_IFSET(msk)	\
+	ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_MMU_FTR_SECTION_END_IFCLR(msk)	\
+	ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+/* Firmware feature dependent sections */
+#define BEGIN_FW_FTR_SECTION_NESTED(label)	START_FTR_SECTION(label)
+#define BEGIN_FW_FTR_SECTION			START_FTR_SECTION(97)
+
+#define END_FW_FTR_SECTION_NESTED(msk, val, label) 		\
+	FTR_SECTION_ELSE_NESTED(label)				\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
+
+#define END_FW_FTR_SECTION(msk, val)		\
+	END_FW_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_FW_FTR_SECTION_IFSET(msk)	END_FW_FTR_SECTION((msk), (msk))
+#define END_FW_FTR_SECTION_IFCLR(msk)	END_FW_FTR_SECTION((msk), 0)
+
+/* Firmware feature sections with alternatives */
+#define FW_FTR_SECTION_ELSE_NESTED(label)	FTR_SECTION_ELSE_NESTED(label)
+#define FW_FTR_SECTION_ELSE	FTR_SECTION_ELSE_NESTED(97)
+#define ALT_FW_FTR_SECTION_END_NESTED(msk, val, label)	\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
+#define ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, label)	\
+	ALT_FW_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, label)	\
+	ALT_FW_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_FW_FTR_SECTION_END(msk, val)	\
+	ALT_FW_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_FW_FTR_SECTION_END_IFSET(msk)	\
+	ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_FW_FTR_SECTION_END_IFCLR(msk)	\
+	ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+#ifndef __ASSEMBLY__
+
+#define ASM_FTR_IF(section_if, section_else, msk, val)	\
+	stringify_in_c(BEGIN_FTR_SECTION)			\
+	section_if "; "						\
+	stringify_in_c(FTR_SECTION_ELSE)			\
+	section_else "; "					\
+	stringify_in_c(ALT_FTR_SECTION_END((msk), (val)))
+
+#define ASM_FTR_IFSET(section_if, section_else, msk)	\
+	ASM_FTR_IF(section_if, section_else, (msk), (msk))
+
+#define ASM_FTR_IFCLR(section_if, section_else, msk)	\
+	ASM_FTR_IF(section_if, section_else, (msk), 0)
+
+#define ASM_MMU_FTR_IF(section_if, section_else, msk, val)	\
+	stringify_in_c(BEGIN_MMU_FTR_SECTION)			\
+	section_if "; "						\
+	stringify_in_c(MMU_FTR_SECTION_ELSE)			\
+	section_else "; "					\
+	stringify_in_c(ALT_MMU_FTR_SECTION_END((msk), (val)))
+
+#define ASM_MMU_FTR_IFSET(section_if, section_else, msk)	\
+	ASM_MMU_FTR_IF(section_if, section_else, (msk), (msk))
+
+#define ASM_MMU_FTR_IFCLR(section_if, section_else, msk)	\
+	ASM_MMU_FTR_IF(section_if, section_else, (msk), 0)
+
+#endif /* __ASSEMBLY__ */
+
+/* LWSYNC feature sections */
+#define START_LWSYNC_SECTION(label)	label##1:
+#define MAKE_LWSYNC_SECTION_ENTRY(label, sect)		\
+label##2:						\
+	.pushsection sect,"a";				\
+	.align 2;					\
+label##3:					       	\
+	FTR_ENTRY_OFFSET label##1b-label##3b;		\
+	.popsection;
+
+#define STF_ENTRY_BARRIER_FIXUP_SECTION			\
+953:							\
+	.pushsection __stf_entry_barrier_fixup,"a";	\
+	.align 2;					\
+954:							\
+	FTR_ENTRY_OFFSET 953b-954b;			\
+	.popsection;
+
+#define STF_EXIT_BARRIER_FIXUP_SECTION			\
+955:							\
+	.pushsection __stf_exit_barrier_fixup,"a";	\
+	.align 2;					\
+956:							\
+	FTR_ENTRY_OFFSET 955b-956b;			\
+	.popsection;
+
+#define RFI_FLUSH_FIXUP_SECTION				\
+951:							\
+	.pushsection __rfi_flush_fixup,"a";		\
+	.align 2;					\
+952:							\
+	FTR_ENTRY_OFFSET 951b-952b;			\
+	.popsection;
+
+#define NOSPEC_BARRIER_FIXUP_SECTION			\
+953:							\
+	.pushsection __barrier_nospec_fixup,"a";	\
+	.align 2;					\
+954:							\
+	FTR_ENTRY_OFFSET 953b-954b;			\
+	.popsection;
+
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+extern long stf_barrier_fallback;
+extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+
+void apply_feature_fixups(void);
+void setup_feature_keys(void);
+#endif
+
+#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
new file mode 100644
index 0000000..7a051bd
--- /dev/null
+++ b/arch/powerpc/include/asm/firmware.h
@@ -0,0 +1,140 @@
+/*
+ *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  Modifications for ppc64:
+ *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_POWERPC_FIRMWARE_H
+#define __ASM_POWERPC_FIRMWARE_H
+
+#ifdef __KERNEL__
+
+#include <asm/asm-const.h>
+
+/* firmware feature bitmask values */
+
+#define FW_FEATURE_PFT		ASM_CONST(0x0000000000000001)
+#define FW_FEATURE_TCE		ASM_CONST(0x0000000000000002)
+#define FW_FEATURE_SPRG0	ASM_CONST(0x0000000000000004)
+#define FW_FEATURE_DABR		ASM_CONST(0x0000000000000008)
+#define FW_FEATURE_COPY		ASM_CONST(0x0000000000000010)
+#define FW_FEATURE_ASR		ASM_CONST(0x0000000000000020)
+#define FW_FEATURE_DEBUG	ASM_CONST(0x0000000000000040)
+#define FW_FEATURE_TERM		ASM_CONST(0x0000000000000080)
+#define FW_FEATURE_PERF		ASM_CONST(0x0000000000000100)
+#define FW_FEATURE_DUMP		ASM_CONST(0x0000000000000200)
+#define FW_FEATURE_INTERRUPT	ASM_CONST(0x0000000000000400)
+#define FW_FEATURE_MIGRATE	ASM_CONST(0x0000000000000800)
+#define FW_FEATURE_PERFMON	ASM_CONST(0x0000000000001000)
+#define FW_FEATURE_CRQ		ASM_CONST(0x0000000000002000)
+#define FW_FEATURE_VIO		ASM_CONST(0x0000000000004000)
+#define FW_FEATURE_RDMA		ASM_CONST(0x0000000000008000)
+#define FW_FEATURE_LLAN		ASM_CONST(0x0000000000010000)
+#define FW_FEATURE_BULK_REMOVE	ASM_CONST(0x0000000000020000)
+#define FW_FEATURE_XDABR	ASM_CONST(0x0000000000040000)
+#define FW_FEATURE_MULTITCE	ASM_CONST(0x0000000000080000)
+#define FW_FEATURE_SPLPAR	ASM_CONST(0x0000000000100000)
+#define FW_FEATURE_LPAR		ASM_CONST(0x0000000000400000)
+#define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
+#define FW_FEATURE_HPT_RESIZE	ASM_CONST(0x0000000001000000)
+#define FW_FEATURE_CMO		ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_VPHN		ASM_CONST(0x0000000004000000)
+#define FW_FEATURE_XCMO		ASM_CONST(0x0000000008000000)
+#define FW_FEATURE_OPAL		ASM_CONST(0x0000000010000000)
+#define FW_FEATURE_SET_MODE	ASM_CONST(0x0000000040000000)
+#define FW_FEATURE_BEST_ENERGY	ASM_CONST(0x0000000080000000)
+#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
+#define FW_FEATURE_PRRN		ASM_CONST(0x0000000200000000)
+#define FW_FEATURE_DRMEM_V2	ASM_CONST(0x0000000400000000)
+#define FW_FEATURE_DRC_INFO	ASM_CONST(0x0000000800000000)
+
+#ifndef __ASSEMBLY__
+
+enum {
+#ifdef CONFIG_PPC64
+	FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
+		FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
+		FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
+		FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
+		FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
+		FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
+		FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
+		FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
+		FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
+		FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
+		FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
+		FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 |
+		FW_FEATURE_DRC_INFO,
+	FW_FEATURE_PSERIES_ALWAYS = 0,
+	FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
+	FW_FEATURE_POWERNV_ALWAYS = 0,
+	FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+	FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+	FW_FEATURE_NATIVE_POSSIBLE = 0,
+	FW_FEATURE_NATIVE_ALWAYS = 0,
+	FW_FEATURE_POSSIBLE =
+#ifdef CONFIG_PPC_PSERIES
+		FW_FEATURE_PSERIES_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_POWERNV
+		FW_FEATURE_POWERNV_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_PS3
+		FW_FEATURE_PS3_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_NATIVE
+		FW_FEATURE_NATIVE_ALWAYS |
+#endif
+		0,
+	FW_FEATURE_ALWAYS =
+#ifdef CONFIG_PPC_PSERIES
+		FW_FEATURE_PSERIES_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_POWERNV
+		FW_FEATURE_POWERNV_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_PS3
+		FW_FEATURE_PS3_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_NATIVE
+		FW_FEATURE_NATIVE_ALWAYS &
+#endif
+		FW_FEATURE_POSSIBLE,
+
+#else /* CONFIG_PPC64 */
+	FW_FEATURE_POSSIBLE = 0,
+	FW_FEATURE_ALWAYS = 0,
+#endif
+};
+
+/* This is used to identify firmware features which are available
+ * to the kernel.
+ */
+extern unsigned long	powerpc_firmware_features;
+
+#define firmware_has_feature(feature)					\
+	((FW_FEATURE_ALWAYS & (feature)) ||				\
+		(FW_FEATURE_POSSIBLE & powerpc_firmware_features & (feature)))
+
+extern void system_reset_fwnmi(void);
+extern void machine_check_fwnmi(void);
+
+/* This is true if we are using the firmware NMI handler (typically LPAR) */
+extern int fwnmi_active;
+
+extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
+
+#ifdef CONFIG_PPC_PSERIES
+void pseries_probe_fw_features(void);
+#else
+static inline void pseries_probe_fw_features(void) { };
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
new file mode 100644
index 0000000..41cc15c
--- /dev/null
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -0,0 +1,79 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Copyright 2008 Freescale Semiconductor Inc.
+ *   Port to powerpc added by Kumar Gala
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#ifndef __ASSEMBLY__
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+#define FIXADDR_TOP	((unsigned long)(-PAGE_SIZE))
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+	FIX_HOLE,
+	/* reserve the top 128K for early debugging purposes */
+	FIX_EARLY_DEBUG_TOP = FIX_HOLE,
+	FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+((128*1024)/PAGE_SIZE)-1,
+#ifdef CONFIG_HIGHMEM
+	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
+	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+#ifdef CONFIG_PPC_8xx
+	/* For IMMR we need an aligned 512K area */
+#define FIX_IMMR_SIZE	(512 * 1024 / PAGE_SIZE)
+	FIX_IMMR_START,
+	FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
+		       FIX_IMMR_SIZE,
+#endif
+	/* FIX_PCIE_MCFG, */
+	__end_of_fixed_addresses
+};
+
+#define __FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
+
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
+
+#include <asm-generic/fixmap.h>
+
+static inline void __set_fixmap(enum fixed_addresses idx,
+				phys_addr_t phys, pgprot_t flags)
+{
+	map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags));
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
new file mode 100644
index 0000000..167c44b
--- /dev/null
+++ b/arch/powerpc/include/asm/floppy.h
@@ -0,0 +1,211 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_POWERPC_FLOPPY_H
+#define __ASM_POWERPC_FLOPPY_H
+#ifdef __KERNEL__
+
+#include <asm/machdep.h>
+
+#define fd_inb(port)		inb_p(port)
+#define fd_outb(value,port)	outb_p(value,port)
+
+#define fd_enable_dma()         enable_dma(FLOPPY_DMA)
+#define fd_disable_dma()	 fd_ops->_disable_dma(FLOPPY_DMA)
+#define fd_free_dma()           fd_ops->_free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff()       clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode)   set_dma_mode(FLOPPY_DMA, mode)
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count)
+#define fd_get_dma_residue()    fd_ops->_get_dma_residue(FLOPPY_DMA)
+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
+#define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL);
+
+#include <linux/pci.h>
+#include <asm/ppc-pci.h>	/* for isa_bridge_pcidev */
+
+#define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io)
+
+static int fd_request_dma(void);
+
+struct fd_dma_ops {
+	void (*_disable_dma)(unsigned int dmanr);
+	void (*_free_dma)(unsigned int dmanr);
+	int (*_get_dma_residue)(unsigned int dummy);
+	int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+};
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_vdma;
+static struct fd_dma_ops *fd_ops;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id)
+{
+	unsigned char st;
+	int lcount;
+	char *lptr;
+
+	if (!doing_vdma)
+		return floppy_interrupt(irq, dev_id);
+
+
+	st = 1;
+	for (lcount=virtual_dma_count, lptr=virtual_dma_addr;
+	     lcount; lcount--, lptr++) {
+		st=inb(virtual_dma_port+4) & 0xa0 ;
+		if (st != 0xa0)
+			break;
+		if (virtual_dma_mode)
+			outb_p(*lptr, virtual_dma_port+5);
+		else
+			*lptr = inb_p(virtual_dma_port+5);
+	}
+	virtual_dma_count = lcount;
+	virtual_dma_addr = lptr;
+	st = inb(virtual_dma_port+4);
+
+	if (st == 0x20)
+		return IRQ_HANDLED;
+	if (!(st & 0x20)) {
+		virtual_dma_residue += virtual_dma_count;
+		virtual_dma_count=0;
+		doing_vdma = 0;
+		floppy_interrupt(irq, dev_id);
+		return IRQ_HANDLED;
+	}
+	return IRQ_HANDLED;
+}
+
+static void vdma_disable_dma(unsigned int dummy)
+{
+	doing_vdma = 0;
+	virtual_dma_residue += virtual_dma_count;
+	virtual_dma_count=0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+	return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+	if (can_use_virtual_dma)
+		return request_irq(FLOPPY_IRQ, floppy_hardint,
+				   0, "floppy", NULL);
+	else
+		return request_irq(FLOPPY_IRQ, floppy_interrupt,
+				   0, "floppy", NULL);
+}
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+	doing_vdma = 1;
+	virtual_dma_port = io;
+	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
+	virtual_dma_addr = addr;
+	virtual_dma_count = size;
+	virtual_dma_residue = 0;
+	return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+	static unsigned long prev_size;
+	static dma_addr_t bus_addr = 0;
+	static char *prev_addr;
+	static int prev_dir;
+	int dir;
+
+	doing_vdma = 0;
+	dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
+
+	if (bus_addr 
+	    && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
+		/* different from last time -- unmap prev */
+		pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir);
+		bus_addr = 0;
+	}
+
+	if (!bus_addr)	/* need to map it */
+		bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir);
+
+	/* remember this one as prev */
+	prev_addr = addr;
+	prev_size = size;
+	prev_dir = dir;
+
+	fd_clear_dma_ff();
+	fd_set_dma_mode(mode);
+	set_dma_addr(FLOPPY_DMA, bus_addr);
+	fd_set_dma_count(size);
+	virtual_dma_port = io;
+	fd_enable_dma();
+
+	return 0;
+}
+
+static struct fd_dma_ops real_dma_ops =
+{
+	._disable_dma = disable_dma,
+	._free_dma = free_dma,
+	._get_dma_residue = get_dma_residue,
+	._dma_setup = hard_dma_setup
+};
+
+static struct fd_dma_ops virt_dma_ops =
+{
+	._disable_dma = vdma_disable_dma,
+	._free_dma = vdma_nop,
+	._get_dma_residue = vdma_get_dma_residue,
+	._dma_setup = vdma_dma_setup
+};
+
+static int fd_request_dma(void)
+{
+	if (can_use_virtual_dma & 1) {
+		fd_ops = &virt_dma_ops;
+		return 0;
+	}
+	else {
+		fd_ops = &real_dma_ops;
+		return request_dma(FLOPPY_DMA, "floppy");
+	}
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+/*
+ * Again, the CMOS information not available
+ */
+#define FLOPPY0_TYPE 6
+#define FLOPPY1_TYPE 0
+
+#define N_FDC 2			/* Don't change this! */
+#define N_DRIVE 8
+
+/*
+ * The PowerPC has no problems with floppy DMA crossing 64k borders.
+ */
+#define CROSS_64KB(a,s)	(0)
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_FLOPPY_H */
diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h
new file mode 100644
index 0000000..8def56e
--- /dev/null
+++ b/arch/powerpc/include/asm/fs_pd.h
@@ -0,0 +1,49 @@
+/*
+ * Platform information definitions.
+ *
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef FS_PD_H
+#define FS_PD_H
+#include <sysdev/fsl_soc.h>
+#include <asm/time.h>
+
+#ifdef CONFIG_CPM2
+#include <asm/cpm2.h>
+
+#if defined(CONFIG_8260)
+#include <asm/mpc8260.h>
+#endif
+
+#define cpm2_map(member) (&cpm2_immr->member)
+#define cpm2_map_size(member, size) (&cpm2_immr->member)
+#define cpm2_unmap(addr) do {} while(0)
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#include <asm/8xx_immap.h>
+
+extern immap_t __iomem *mpc8xx_immr;
+
+#define immr_map(member) (&mpc8xx_immr->member)
+#define immr_map_size(member, size) (&mpc8xx_immr->member)
+#define immr_unmap(addr) do {} while (0)
+#endif
+
+static inline int uart_baudrate(void)
+{
+        return get_baudrate();
+}
+
+static inline int uart_clock(void)
+{
+        return ppc_proc_freq;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h
new file mode 100644
index 0000000..2af2bdc
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ *
+ * Cache SRAM handling for QorIQ platform
+ *
+ * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
+
+ * This file is derived from the original work done
+ * by Sylvain Munaut for the Bestcomm SRAM allocator.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
+#define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
+
+#include <asm/rheap.h>
+#include <linux/spinlock.h>
+
+/*
+ * Cache-SRAM
+ */
+
+struct mpc85xx_cache_sram {
+	phys_addr_t base_phys;
+	void *base_virt;
+	unsigned int size;
+	rh_info_t *rh;
+	spinlock_t lock;
+};
+
+extern void mpc85xx_cache_sram_free(void *ptr);
+extern void *mpc85xx_cache_sram_alloc(unsigned int size,
+				  phys_addr_t *phys, unsigned int align);
+
+#endif /* __AMS_POWERPC_FSL_85XX_CACHE_SRAM_H__ */
diff --git a/arch/powerpc/include/asm/fsl_gtm.h b/arch/powerpc/include/asm/fsl_gtm.h
new file mode 100644
index 0000000..3b05808
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_gtm.h
@@ -0,0 +1,47 @@
+/*
+ * Freescale General-purpose Timers Module
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_FSL_GTM_H
+#define __ASM_FSL_GTM_H
+
+#include <linux/types.h>
+
+struct gtm;
+
+struct gtm_timer {
+	unsigned int irq;
+
+	struct gtm *gtm;
+	bool requested;
+	u8 __iomem *gtcfr;
+	__be16 __iomem *gtmdr;
+	__be16 __iomem *gtpsr;
+	__be16 __iomem *gtcnr;
+	__be16 __iomem *gtrfr;
+	__be16 __iomem *gtevr;
+};
+
+extern struct gtm_timer *gtm_get_timer16(void);
+extern struct gtm_timer *gtm_get_specific_timer16(struct gtm *gtm,
+						  unsigned int timer);
+extern void gtm_put_timer16(struct gtm_timer *tmr);
+extern int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec,
+			     bool reload);
+extern int gtm_set_exact_timer16(struct gtm_timer *tmr, u16 usec,
+				 bool reload);
+extern void gtm_stop_timer16(struct gtm_timer *tmr);
+extern void gtm_ack_timer16(struct gtm_timer *tmr, u16 events);
+
+#endif /* __ASM_FSL_GTM_H */
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
new file mode 100644
index 0000000..b889d13
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -0,0 +1,655 @@
+/*
+ * Freescale hypervisor call interface
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_HCALLS_H
+#define _FSL_HCALLS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+#include <asm/epapr_hcalls.h>
+
+#define FH_API_VERSION			1
+
+#define FH_ERR_GET_INFO			1
+#define FH_PARTITION_GET_DTPROP		2
+#define FH_PARTITION_SET_DTPROP		3
+#define FH_PARTITION_RESTART		4
+#define FH_PARTITION_GET_STATUS		5
+#define FH_PARTITION_START		6
+#define FH_PARTITION_STOP		7
+#define FH_PARTITION_MEMCPY		8
+#define FH_DMA_ENABLE			9
+#define FH_DMA_DISABLE			10
+#define FH_SEND_NMI			11
+#define FH_VMPIC_GET_MSIR		12
+#define FH_SYSTEM_RESET			13
+#define FH_GET_CORE_STATE		14
+#define FH_ENTER_NAP			15
+#define FH_EXIT_NAP			16
+#define FH_CLAIM_DEVICE			17
+#define FH_PARTITION_STOP_DMA		18
+
+/* vendor ID: Freescale Semiconductor */
+#define FH_HCALL_TOKEN(num)		_EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num)
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only.  Registers that are
+ * initialized before making the hypercall are input/output.  All
+ * input/output registers are represented with "+r".  Output-only registers
+ * are represented with "=r".  Do not specify any unused registers.  The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * fh_send_nmi - send NMI to virtual cpu(s).
+ * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask.
+ *
+ * Returns 0 for success, or EINVAL for invalid vcpu_mask.
+ */
+static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
+	r3 = vcpu_mask;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+/* Arbitrary limits to avoid excessive memory allocation in hypervisor */
+#define FH_DTPROP_MAX_PATHLEN 4096
+#define FH_DTPROP_MAX_PROPLEN 32768
+
+/**
+ * fh_partition_get_dtprop - get a property from a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value buffer
+ * @propvalue_len: length of buffer on entry, length of property on return
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_get_dtprop(int handle,
+						   uint64_t dtpath_addr,
+						   uint64_t propname_addr,
+						   uint64_t propvalue_addr,
+						   uint32_t *propvalue_len)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r6 __asm__("r6");
+	register uintptr_t r7 __asm__("r7");
+	register uintptr_t r8 __asm__("r8");
+	register uintptr_t r9 __asm__("r9");
+	register uintptr_t r10 __asm__("r10");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP);
+	r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+	r4 = dtpath_addr >> 32;
+	r6 = propname_addr >> 32;
+	r8 = propvalue_addr >> 32;
+#else
+	r4 = 0;
+	r6 = 0;
+	r8 = 0;
+#endif
+	r5 = (uint32_t)dtpath_addr;
+	r7 = (uint32_t)propname_addr;
+	r9 = (uint32_t)propvalue_addr;
+	r10 = *propvalue_len;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11),
+		  "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+		  "+r" (r8), "+r" (r9), "+r" (r10)
+		: : EV_HCALL_CLOBBERS8
+	);
+
+	*propvalue_len = r4;
+	return r3;
+}
+
+/**
+ * Set a property in a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value
+ * @propvalue_len: length of property
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_set_dtprop(int handle,
+						   uint64_t dtpath_addr,
+						   uint64_t propname_addr,
+						   uint64_t propvalue_addr,
+						   uint32_t propvalue_len)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r6 __asm__("r6");
+	register uintptr_t r8 __asm__("r8");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r7 __asm__("r7");
+	register uintptr_t r9 __asm__("r9");
+	register uintptr_t r10 __asm__("r10");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP);
+	r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+	r4 = dtpath_addr >> 32;
+	r6 = propname_addr >> 32;
+	r8 = propvalue_addr >> 32;
+#else
+	r4 = 0;
+	r6 = 0;
+	r8 = 0;
+#endif
+	r5 = (uint32_t)dtpath_addr;
+	r7 = (uint32_t)propname_addr;
+	r9 = (uint32_t)propvalue_addr;
+	r10 = propvalue_len;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11),
+		  "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+		  "+r" (r8), "+r" (r9), "+r" (r10)
+		: : EV_HCALL_CLOBBERS8
+	);
+
+	return r3;
+}
+
+/**
+ * fh_partition_restart - reboot the current partition
+ * @partition: partition ID
+ *
+ * Returns an error code if reboot failed.  Does not return if it succeeds.
+ */
+static inline unsigned int fh_partition_restart(unsigned int partition)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
+	r3 = partition;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+#define FH_PARTITION_STOPPED	0
+#define FH_PARTITION_RUNNING	1
+#define FH_PARTITION_STARTING	2
+#define FH_PARTITION_STOPPING	3
+#define FH_PARTITION_PAUSING	4
+#define FH_PARTITION_PAUSED	5
+#define FH_PARTITION_RESUMING	6
+
+/**
+ * fh_partition_get_status - gets the status of a partition
+ * @partition: partition ID
+ * @status: returned status code
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_get_status(unsigned int partition,
+	unsigned int *status)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
+	r3 = partition;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "=r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	*status = r4;
+
+	return r3;
+}
+
+/**
+ * fh_partition_start - boots and starts execution of the specified partition
+ * @partition: partition ID
+ * @entry_point: guest physical address to start execution
+ *
+ * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot
+ * time, guest physical address are the same as guest virtual addresses.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_start(unsigned int partition,
+	uint32_t entry_point, int load)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_START);
+	r3 = partition;
+	r4 = entry_point;
+	r5 = load;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
+		: : EV_HCALL_CLOBBERS3
+	);
+
+	return r3;
+}
+
+/**
+ * fh_partition_stop - stops another partition
+ * @partition: partition ID
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop(unsigned int partition)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
+	r3 = partition;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+/**
+ * struct fh_sg_list: definition of the fh_partition_memcpy S/G list
+ * @source: guest physical address to copy from
+ * @target: guest physical address to copy to
+ * @size: number of bytes to copy
+ * @reserved: reserved, must be zero
+ *
+ * The scatter/gather list for fh_partition_memcpy() is an array of these
+ * structures.  The array must be guest physically contiguous.
+ *
+ * This structure must be aligned on 32-byte boundary, so that no single
+ * strucuture can span two pages.
+ */
+struct fh_sg_list {
+	uint64_t source;   /**< guest physical address to copy from */
+	uint64_t target;   /**< guest physical address to copy to */
+	uint64_t size;     /**< number of bytes to copy */
+	uint64_t reserved; /**< reserved, must be zero */
+} __attribute__ ((aligned(32)));
+
+/**
+ * fh_partition_memcpy - copies data from one guest to another
+ * @source: the ID of the partition to copy from
+ * @target: the ID of the partition to copy to
+ * @sg_list: guest physical address of an array of &fh_sg_list structures
+ * @count: the number of entries in @sg_list
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_memcpy(unsigned int source,
+	unsigned int target, phys_addr_t sg_list, unsigned int count)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r6 __asm__("r6");
+	register uintptr_t r7 __asm__("r7");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY);
+	r3 = source;
+	r4 = target;
+	r5 = (uint32_t) sg_list;
+
+#ifdef CONFIG_PHYS_64BIT
+	r6 = sg_list >> 32;
+#else
+	r6 = 0;
+#endif
+	r7 = count;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11),
+		  "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
+		: : EV_HCALL_CLOBBERS5
+	);
+
+	return r3;
+}
+
+/**
+ * fh_dma_enable - enable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to enable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_enable(unsigned int liodn)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
+	r3 = liodn;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+/**
+ * fh_dma_disable - disable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to disable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_disable(unsigned int liodn)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
+	r3 = liodn;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+
+/**
+ * fh_vmpic_get_msir - returns the MPIC-MSI register value
+ * @interrupt: the interrupt number
+ * @msir_val: returned MPIC-MSI register value
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
+	unsigned int *msir_val)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
+	r3 = interrupt;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "=r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	*msir_val = r4;
+
+	return r3;
+}
+
+/**
+ * fh_system_reset - reset the system
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_system_reset(void)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "=r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+
+/**
+ * fh_err_get_info - get platform error information
+ * @queue id:
+ * 0 for guest error event queue
+ * 1 for global error event queue
+ *
+ * @pointer to store the platform error data:
+ * platform error data is returned in registers r4 - r11
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
+	uint32_t addr_hi, uint32_t addr_lo, int peek)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+	register uintptr_t r5 __asm__("r5");
+	register uintptr_t r6 __asm__("r6");
+	register uintptr_t r7 __asm__("r7");
+
+	r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO);
+	r3 = queue;
+	r4 = *bufsize;
+	r5 = addr_hi;
+	r6 = addr_lo;
+	r7 = peek;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
+		  "+r" (r7)
+		: : EV_HCALL_CLOBBERS5
+	);
+
+	*bufsize = r4;
+
+	return r3;
+}
+
+
+#define FH_VCPU_RUN	0
+#define FH_VCPU_IDLE	1
+#define FH_VCPU_NAP	2
+
+/**
+ * fh_get_core_state - get the state of a vcpu
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ * @state:the current state of the vcpu, see FH_VCPU_*
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_get_core_state(unsigned int handle,
+	unsigned int vcpu, unsigned int *state)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE);
+	r3 = handle;
+	r4 = vcpu;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	*state = r4;
+	return r3;
+}
+
+/**
+ * fh_enter_nap - enter nap on a vcpu
+ *
+ * Note that though the API supports entering nap on a vcpu other
+ * than the caller, this may not be implmented and may return EINVAL.
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = FH_HCALL_TOKEN(FH_ENTER_NAP);
+	r3 = handle;
+	r4 = vcpu;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	return r3;
+}
+
+/**
+ * fh_exit_nap - exit nap on a vcpu
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+	register uintptr_t r4 __asm__("r4");
+
+	r11 = FH_HCALL_TOKEN(FH_EXIT_NAP);
+	r3 = handle;
+	r4 = vcpu;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3), "+r" (r4)
+		: : EV_HCALL_CLOBBERS2
+	);
+
+	return r3;
+}
+/**
+ * fh_claim_device - claim a "claimable" shared device
+ * @handle: fsl,hv-device-handle of node to claim
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_claim_device(unsigned int handle)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
+	r3 = handle;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+
+/**
+ * Run deferred DMA disabling on a partition's private devices
+ *
+ * This applies to devices which a partition owns either privately,
+ * or which are claimable and still actively owned by that partition,
+ * and which do not have the no-dma-disable property.
+ *
+ * @handle: partition (must be stopped) whose DMA is to be disabled
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop_dma(unsigned int handle)
+{
+	register uintptr_t r11 __asm__("r11");
+	register uintptr_t r3 __asm__("r3");
+
+	r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
+	r3 = handle;
+
+	asm volatile("bl	epapr_hypercall_start"
+		: "+r" (r11), "+r" (r3)
+		: : EV_HCALL_CLOBBERS1
+	);
+
+	return r3;
+}
+#endif
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
new file mode 100644
index 0000000..c7240a0
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -0,0 +1,309 @@
+/* Freescale Local Bus Controller
+ *
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ *          Scott Wood <scottwood@freescale.com>
+ *          Jack Lan <jack.lan@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ASM_FSL_LBC_H
+#define __ASM_FSL_LBC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+
+struct fsl_lbc_bank {
+	__be32 br;             /**< Base Register  */
+#define BR_BA           0xFFFF8000
+#define BR_BA_SHIFT             15
+#define BR_PS           0x00001800
+#define BR_PS_SHIFT             11
+#define BR_PS_8         0x00000800  /* Port Size 8 bit */
+#define BR_PS_16        0x00001000  /* Port Size 16 bit */
+#define BR_PS_32        0x00001800  /* Port Size 32 bit */
+#define BR_DECC         0x00000600
+#define BR_DECC_SHIFT            9
+#define BR_DECC_OFF     0x00000000  /* HW ECC checking and generation off */
+#define BR_DECC_CHK     0x00000200  /* HW ECC checking on, generation off */
+#define BR_DECC_CHK_GEN 0x00000400  /* HW ECC checking and generation on */
+#define BR_WP           0x00000100
+#define BR_WP_SHIFT              8
+#define BR_MSEL         0x000000E0
+#define BR_MSEL_SHIFT            5
+#define BR_MS_GPCM      0x00000000  /* GPCM */
+#define BR_MS_FCM       0x00000020  /* FCM */
+#define BR_MS_SDRAM     0x00000060  /* SDRAM */
+#define BR_MS_UPMA      0x00000080  /* UPMA */
+#define BR_MS_UPMB      0x000000A0  /* UPMB */
+#define BR_MS_UPMC      0x000000C0  /* UPMC */
+#define BR_V            0x00000001
+#define BR_V_SHIFT               0
+#define BR_RES          ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V)
+
+	__be32 or;             /**< Base Register  */
+#define OR0 0x5004
+#define OR1 0x500C
+#define OR2 0x5014
+#define OR3 0x501C
+#define OR4 0x5024
+#define OR5 0x502C
+#define OR6 0x5034
+#define OR7 0x503C
+
+#define OR_FCM_AM               0xFFFF8000
+#define OR_FCM_AM_SHIFT                 15
+#define OR_FCM_BCTLD            0x00001000
+#define OR_FCM_BCTLD_SHIFT              12
+#define OR_FCM_PGS              0x00000400
+#define OR_FCM_PGS_SHIFT                10
+#define OR_FCM_CSCT             0x00000200
+#define OR_FCM_CSCT_SHIFT                9
+#define OR_FCM_CST              0x00000100
+#define OR_FCM_CST_SHIFT                 8
+#define OR_FCM_CHT              0x00000080
+#define OR_FCM_CHT_SHIFT                 7
+#define OR_FCM_SCY              0x00000070
+#define OR_FCM_SCY_SHIFT                 4
+#define OR_FCM_SCY_1            0x00000010
+#define OR_FCM_SCY_2            0x00000020
+#define OR_FCM_SCY_3            0x00000030
+#define OR_FCM_SCY_4            0x00000040
+#define OR_FCM_SCY_5            0x00000050
+#define OR_FCM_SCY_6            0x00000060
+#define OR_FCM_SCY_7            0x00000070
+#define OR_FCM_RST              0x00000008
+#define OR_FCM_RST_SHIFT                 3
+#define OR_FCM_TRLX             0x00000004
+#define OR_FCM_TRLX_SHIFT                2
+#define OR_FCM_EHTR             0x00000002
+#define OR_FCM_EHTR_SHIFT                1
+
+#define OR_GPCM_AM		0xFFFF8000
+#define OR_GPCM_AM_SHIFT		15
+};
+
+struct fsl_lbc_regs {
+	struct fsl_lbc_bank bank[12];
+	u8 res0[0x8];
+	__be32 mar;             /**< UPM Address Register */
+	u8 res1[0x4];
+	__be32 mamr;            /**< UPMA Mode Register */
+#define MxMR_OP_NO	(0 << 28) /**< normal operation */
+#define MxMR_OP_WA	(1 << 28) /**< write array */
+#define MxMR_OP_RA	(2 << 28) /**< read array */
+#define MxMR_OP_RP	(3 << 28) /**< run pattern */
+#define MxMR_MAD	0x3f      /**< machine address */
+	__be32 mbmr;            /**< UPMB Mode Register */
+	__be32 mcmr;            /**< UPMC Mode Register */
+	u8 res2[0x8];
+	__be32 mrtpr;           /**< Memory Refresh Timer Prescaler Register */
+	__be32 mdr;             /**< UPM Data Register */
+	u8 res3[0x4];
+	__be32 lsor;            /**< Special Operation Initiation Register */
+	__be32 lsdmr;           /**< SDRAM Mode Register */
+	u8 res4[0x8];
+	__be32 lurt;            /**< UPM Refresh Timer */
+	__be32 lsrt;            /**< SDRAM Refresh Timer */
+	u8 res5[0x8];
+	__be32 ltesr;           /**< Transfer Error Status Register */
+#define LTESR_BM   0x80000000
+#define LTESR_FCT  0x40000000
+#define LTESR_PAR  0x20000000
+#define LTESR_WP   0x04000000
+#define LTESR_ATMW 0x00800000
+#define LTESR_ATMR 0x00400000
+#define LTESR_CS   0x00080000
+#define LTESR_UPM  0x00000002
+#define LTESR_CC   0x00000001
+#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+#define LTESR_MASK      (LTESR_BM | LTESR_FCT | LTESR_PAR | LTESR_WP \
+			 | LTESR_ATMW | LTESR_ATMR | LTESR_CS | LTESR_UPM \
+			 | LTESR_CC)
+#define LTESR_CLEAR	0xFFFFFFFF
+#define LTECCR_CLEAR	0xFFFFFFFF
+#define LTESR_STATUS	LTESR_MASK
+#define LTEIR_ENABLE	LTESR_MASK
+#define LTEDR_ENABLE	0x00000000
+	__be32 ltedr;           /**< Transfer Error Disable Register */
+	__be32 lteir;           /**< Transfer Error Interrupt Register */
+	__be32 lteatr;          /**< Transfer Error Attributes Register */
+	__be32 ltear;           /**< Transfer Error Address Register */
+	__be32 lteccr;          /**< Transfer Error ECC Register */
+	u8 res6[0x8];
+	__be32 lbcr;            /**< Configuration Register */
+#define LBCR_LDIS  0x80000000
+#define LBCR_LDIS_SHIFT    31
+#define LBCR_BCTLC 0x00C00000
+#define LBCR_BCTLC_SHIFT   22
+#define LBCR_AHD   0x00200000
+#define LBCR_LPBSE 0x00020000
+#define LBCR_LPBSE_SHIFT   17
+#define LBCR_EPAR  0x00010000
+#define LBCR_EPAR_SHIFT    16
+#define LBCR_BMT   0x0000FF00
+#define LBCR_BMT_SHIFT      8
+#define LBCR_BMTPS 0x0000000F
+#define LBCR_BMTPS_SHIFT    0
+#define LBCR_INIT  0x00040000
+	__be32 lcrr;            /**< Clock Ratio Register */
+#define LCRR_DBYP    0x80000000
+#define LCRR_DBYP_SHIFT      31
+#define LCRR_BUFCMDC 0x30000000
+#define LCRR_BUFCMDC_SHIFT   28
+#define LCRR_ECL     0x03000000
+#define LCRR_ECL_SHIFT       24
+#define LCRR_EADC    0x00030000
+#define LCRR_EADC_SHIFT      16
+#define LCRR_CLKDIV  0x0000000F
+#define LCRR_CLKDIV_SHIFT     0
+	u8 res7[0x8];
+	__be32 fmr;             /**< Flash Mode Register */
+#define FMR_CWTO     0x0000F000
+#define FMR_CWTO_SHIFT       12
+#define FMR_BOOT     0x00000800
+#define FMR_ECCM     0x00000100
+#define FMR_AL       0x00000030
+#define FMR_AL_SHIFT          4
+#define FMR_OP       0x00000003
+#define FMR_OP_SHIFT          0
+	__be32 fir;             /**< Flash Instruction Register */
+#define FIR_OP0      0xF0000000
+#define FIR_OP0_SHIFT        28
+#define FIR_OP1      0x0F000000
+#define FIR_OP1_SHIFT        24
+#define FIR_OP2      0x00F00000
+#define FIR_OP2_SHIFT        20
+#define FIR_OP3      0x000F0000
+#define FIR_OP3_SHIFT        16
+#define FIR_OP4      0x0000F000
+#define FIR_OP4_SHIFT        12
+#define FIR_OP5      0x00000F00
+#define FIR_OP5_SHIFT         8
+#define FIR_OP6      0x000000F0
+#define FIR_OP6_SHIFT         4
+#define FIR_OP7      0x0000000F
+#define FIR_OP7_SHIFT         0
+#define FIR_OP_NOP   0x0	/* No operation and end of sequence */
+#define FIR_OP_CA    0x1        /* Issue current column address */
+#define FIR_OP_PA    0x2        /* Issue current block+page address */
+#define FIR_OP_UA    0x3        /* Issue user defined address */
+#define FIR_OP_CM0   0x4        /* Issue command from FCR[CMD0] */
+#define FIR_OP_CM1   0x5        /* Issue command from FCR[CMD1] */
+#define FIR_OP_CM2   0x6        /* Issue command from FCR[CMD2] */
+#define FIR_OP_CM3   0x7        /* Issue command from FCR[CMD3] */
+#define FIR_OP_WB    0x8        /* Write FBCR bytes from FCM buffer */
+#define FIR_OP_WS    0x9        /* Write 1 or 2 bytes from MDR[AS] */
+#define FIR_OP_RB    0xA        /* Read FBCR bytes to FCM buffer */
+#define FIR_OP_RS    0xB        /* Read 1 or 2 bytes to MDR[AS] */
+#define FIR_OP_CW0   0xC        /* Wait then issue FCR[CMD0] */
+#define FIR_OP_CW1   0xD        /* Wait then issue FCR[CMD1] */
+#define FIR_OP_RBW   0xE        /* Wait then read FBCR bytes */
+#define FIR_OP_RSW   0xE        /* Wait then read 1 or 2 bytes */
+	__be32 fcr;             /**< Flash Command Register */
+#define FCR_CMD0     0xFF000000
+#define FCR_CMD0_SHIFT       24
+#define FCR_CMD1     0x00FF0000
+#define FCR_CMD1_SHIFT       16
+#define FCR_CMD2     0x0000FF00
+#define FCR_CMD2_SHIFT        8
+#define FCR_CMD3     0x000000FF
+#define FCR_CMD3_SHIFT        0
+	__be32 fbar;            /**< Flash Block Address Register */
+#define FBAR_BLK     0x00FFFFFF
+	__be32 fpar;            /**< Flash Page Address Register */
+#define FPAR_SP_PI   0x00007C00
+#define FPAR_SP_PI_SHIFT     10
+#define FPAR_SP_MS   0x00000200
+#define FPAR_SP_CI   0x000001FF
+#define FPAR_SP_CI_SHIFT      0
+#define FPAR_LP_PI   0x0003F000
+#define FPAR_LP_PI_SHIFT     12
+#define FPAR_LP_MS   0x00000800
+#define FPAR_LP_CI   0x000007FF
+#define FPAR_LP_CI_SHIFT      0
+	__be32 fbcr;            /**< Flash Byte Count Register */
+#define FBCR_BC      0x00000FFF
+};
+
+/*
+ * FSL UPM routines
+ */
+struct fsl_upm {
+	__be32 __iomem *mxmr;
+	int width;
+};
+
+extern u32 fsl_lbc_addr(phys_addr_t addr_base);
+extern int fsl_lbc_find(phys_addr_t addr_base);
+extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm);
+
+/**
+ * fsl_upm_start_pattern - start UPM patterns execution
+ * @upm:	pointer to the fsl_upm structure obtained via fsl_upm_find
+ * @pat_offset:	UPM pattern offset for the command to be executed
+ *
+ * This routine programmes UPM so the next memory access that hits an UPM
+ * will trigger pattern execution, starting at pat_offset.
+ */
+static inline void fsl_upm_start_pattern(struct fsl_upm *upm, u8 pat_offset)
+{
+	clrsetbits_be32(upm->mxmr, MxMR_MAD, MxMR_OP_RP | pat_offset);
+}
+
+/**
+ * fsl_upm_end_pattern - end UPM patterns execution
+ * @upm:	pointer to the fsl_upm structure obtained via fsl_upm_find
+ *
+ * This routine reverts UPM to normal operation mode.
+ */
+static inline void fsl_upm_end_pattern(struct fsl_upm *upm)
+{
+	clrbits32(upm->mxmr, MxMR_OP_RP);
+
+	while (in_be32(upm->mxmr) & MxMR_OP_RP)
+		cpu_relax();
+}
+
+/* overview of the fsl lbc controller */
+
+struct fsl_lbc_ctrl {
+	/* device info */
+	struct device			*dev;
+	struct fsl_lbc_regs __iomem	*regs;
+	int				irq[2];
+	wait_queue_head_t		irq_wait;
+	spinlock_t			lock;
+	void				*nand;
+
+	/* status read from LTESR by irq handler */
+	unsigned int			irq_status;
+
+#ifdef CONFIG_SUSPEND
+	/* save regs when system go to deep-sleep */
+	struct fsl_lbc_regs		*saved_regs;
+#endif
+};
+
+extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base,
+			       u32 mar);
+extern struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
+
+#endif /* __ASM_FSL_LBC_H */
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
new file mode 100644
index 0000000..38311c9
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -0,0 +1,39 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_PAMU_STASH_H
+#define __FSL_PAMU_STASH_H
+
+/* cache stash targets */
+enum pamu_stash_target {
+	PAMU_ATTR_CACHE_L1 = 1,
+	PAMU_ATTR_CACHE_L2,
+	PAMU_ATTR_CACHE_L3,
+};
+
+/*
+ * This attribute allows configuring stashig specific parameters
+ * in the PAMU hardware.
+ */
+
+struct pamu_stash_attribute {
+	u32	cpu;	/* cpu number */
+	u32	cache;	/* cache to stash to: L1,L2,L3 */
+};
+
+#endif  /* __FSL_PAMU_STASH_H */
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
new file mode 100644
index 0000000..47df55e
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -0,0 +1,51 @@
+/*
+ * Support Power Management
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __PPC_FSL_PM_H
+#define __PPC_FSL_PM_H
+
+#define E500_PM_PH10	1
+#define E500_PM_PH15	2
+#define E500_PM_PH20	3
+#define E500_PM_PH30	4
+#define E500_PM_DOZE	E500_PM_PH10
+#define E500_PM_NAP	E500_PM_PH15
+
+#define PLAT_PM_SLEEP	20
+#define PLAT_PM_LPM20	30
+
+#define FSL_PM_SLEEP		(1 << 0)
+#define FSL_PM_DEEP_SLEEP	(1 << 1)
+
+struct fsl_pm_ops {
+	/* mask pending interrupts to the RCPM from MPIC */
+	void (*irq_mask)(int cpu);
+
+	/* unmask pending interrupts to the RCPM from MPIC */
+	void (*irq_unmask)(int cpu);
+	void (*cpu_enter_state)(int cpu, int state);
+	void (*cpu_exit_state)(int cpu, int state);
+	void (*cpu_up_prepare)(int cpu);
+	void (*cpu_die)(int cpu);
+	int (*plat_enter_sleep)(void);
+	void (*freeze_time_base)(bool freeze);
+
+	/* keep the power of IP blocks during sleep/deep sleep */
+	void (*set_ip_power)(bool enable, u32 mask);
+
+	/* get platform supported power management modes */
+	unsigned int (*get_pm_modes)(void);
+};
+
+extern const struct fsl_pm_ops *qoriq_pm_ops;
+
+int __init fsl_rcpm_init(void);
+
+#endif /* __PPC_FSL_PM_H */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
new file mode 100644
index 0000000..3dfb80b
--- /dev/null
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_FTRACE
+#define _ASM_POWERPC_FTRACE
+
+#include <asm/types.h>
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_ADDR		((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
+
+#ifdef __ASSEMBLY__
+
+/* Based off of objdump optput from glibc */
+
+#define MCOUNT_SAVE_FRAME			\
+	stwu	r1,-48(r1);			\
+	stw	r3, 12(r1);			\
+	stw	r4, 16(r1);			\
+	stw	r5, 20(r1);			\
+	stw	r6, 24(r1);			\
+	mflr	r3;				\
+	lwz	r4, 52(r1);			\
+	mfcr	r5;				\
+	stw	r7, 28(r1);			\
+	stw	r8, 32(r1);			\
+	stw	r9, 36(r1);			\
+	stw	r10,40(r1);			\
+	stw	r3, 44(r1);			\
+	stw	r5, 8(r1)
+
+#define MCOUNT_RESTORE_FRAME			\
+	lwz	r6, 8(r1);			\
+	lwz	r0, 44(r1);			\
+	lwz	r3, 12(r1);			\
+	mtctr	r0;				\
+	lwz	r4, 16(r1);			\
+	mtcr	r6;				\
+	lwz	r5, 20(r1);			\
+	lwz	r6, 24(r1);			\
+	lwz	r0, 52(r1);			\
+	lwz	r7, 28(r1);			\
+	lwz	r8, 32(r1);			\
+	mtlr	r0;				\
+	lwz	r9, 36(r1);			\
+	lwz	r10,40(r1);			\
+	addi	r1, r1, 48
+
+#else /* !__ASSEMBLY__ */
+extern void _mcount(void);
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       /* reloction of mcount call site is the same as the address */
+       return addr;
+}
+
+struct dyn_arch_ftrace {
+	struct module *mod;
+};
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_FTRACE_SYSCALLS
+/*
+ * Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
+ * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
+ * those.
+ */
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+#ifdef PPC64_ELF_ABI_v1
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+	/* We need to skip past the initial dot, and the __se_sys alias */
+	return !strcmp(sym + 1, name) ||
+		(!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
+		(!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
+		(!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
+		(!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
+}
+#else
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+	return !strcmp(sym, name) ||
+		(!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
+		(!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
+		(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
+		(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
+}
+#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_FTRACE_SYSCALLS */
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline void this_cpu_disable_ftrace(void)
+{
+	get_paca()->ftrace_enabled = 0;
+}
+
+static inline void this_cpu_enable_ftrace(void)
+{
+	get_paca()->ftrace_enabled = 1;
+}
+#else /* CONFIG_PPC64 */
+static inline void this_cpu_disable_ftrace(void) { }
+static inline void this_cpu_enable_ftrace(void) { }
+#endif /* CONFIG_PPC64 */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
new file mode 100644
index 0000000..9454277
--- /dev/null
+++ b/arch/powerpc/include/asm/futex.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_FUTEX_H
+#define _ASM_POWERPC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+#include <asm/synch.h>
+#include <asm/asm-405.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+  __asm__ __volatile ( \
+	PPC_ATOMIC_ENTRY_BARRIER \
+"1:	lwarx	%0,0,%2\n" \
+	insn \
+	PPC405_ERR77(0, %2) \
+"2:	stwcx.	%1,0,%2\n" \
+	"bne-	1b\n" \
+	PPC_ATOMIC_EXIT_BARRIER \
+	"li	%1,0\n" \
+"3:	.section .fixup,\"ax\"\n" \
+"4:	li	%1,%3\n" \
+	"b	3b\n" \
+	".previous\n" \
+	EX_TABLE(1b, 4b) \
+	EX_TABLE(2b, 4b) \
+	: "=&r" (oldval), "=&r" (ret) \
+	: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+	: "cr0", "memory")
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
+{
+	int oldval = 0, ret;
+
+	pagefault_disable();
+
+	switch (op) {
+	case FUTEX_OP_SET:
+		__futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ADD:
+		__futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_OR:
+		__futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ANDN:
+		__futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_XOR:
+		__futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	pagefault_enable();
+
+	if (!ret)
+		*oval = oldval;
+
+	return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+			      u32 oldval, u32 newval)
+{
+	int ret = 0;
+	u32 prev;
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+        __asm__ __volatile__ (
+        PPC_ATOMIC_ENTRY_BARRIER
+"1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
+        cmpw    0,%1,%4\n\
+        bne-    3f\n"
+        PPC405_ERR77(0,%3)
+"2:     stwcx.  %5,0,%3\n\
+        bne-    1b\n"
+        PPC_ATOMIC_EXIT_BARRIER
+"3:	.section .fixup,\"ax\"\n\
+4:	li	%0,%6\n\
+	b	3b\n\
+	.previous\n"
+	EX_TABLE(1b, 4b)
+	EX_TABLE(2b, 4b)
+        : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
+        : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
+        : "cc", "memory");
+
+	*uval = prev;
+        return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/arch/powerpc/include/asm/grackle.h b/arch/powerpc/include/asm/grackle.h
new file mode 100644
index 0000000..7376e3f
--- /dev/null
+++ b/arch/powerpc/include/asm/grackle.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_GRACKLE_H
+#define _ASM_POWERPC_GRACKLE_H
+#ifdef __KERNEL__
+/*
+ * Functions for setting up and using a MPC106 northbridge
+ */
+
+#include <asm/pci-bridge.h>
+
+extern void setup_grackle(struct pci_controller *hose);
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_GRACKLE_H */
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
new file mode 100644
index 0000000..f1e9067
--- /dev/null
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HARDIRQ_H
+#define _ASM_POWERPC_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+	unsigned int __softirq_pending;
+	unsigned int timer_irqs_event;
+	unsigned int broadcast_irqs_event;
+	unsigned int timer_irqs_others;
+	unsigned int pmu_irqs;
+	unsigned int mce_exceptions;
+	unsigned int spurious_irqs;
+	unsigned int hmi_exceptions;
+	unsigned int sreset_irqs;
+#ifdef CONFIG_PPC_WATCHDOG
+	unsigned int soft_nmi_irqs;
+#endif
+#ifdef CONFIG_PPC_DOORBELL
+	unsigned int doorbell_irqs;
+#endif
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED
+
+static inline void ack_bad_irq(unsigned int irq)
+{
+	printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+
+extern u64 arch_irq_stat_cpu(unsigned int cpu);
+#define arch_irq_stat_cpu	arch_irq_stat_cpu
+
+#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
new file mode 100644
index 0000000..a4f9478
--- /dev/null
+++ b/arch/powerpc/include/asm/head-64.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HEAD_64_H
+#define _ASM_POWERPC_HEAD_64_H
+
+#include <asm/cache.h>
+
+#ifdef __ASSEMBLY__
+/*
+ * We can't do CPP stringification and concatination directly into the section
+ * name for some reason, so these macros can do it for us.
+ */
+.macro define_ftsec name
+	.section ".head.text.\name\()","ax",@progbits
+.endm
+.macro define_data_ftsec name
+	.section ".head.data.\name\()","a",@progbits
+.endm
+.macro use_ftsec name
+	.section ".head.text.\name\()"
+.endm
+
+/*
+ * Fixed (location) sections are used by opening fixed sections and emitting
+ * fixed section entries into them before closing them. Multiple fixed sections
+ * can be open at any time.
+ *
+ * Each fixed section created in a .S file must have corresponding linkage
+ * directives including location, added to  arch/powerpc/kernel/vmlinux.lds.S
+ *
+ * For each fixed section, code is generated into it in the order which it
+ * appears in the source.  Fixed section entries can be placed at a fixed
+ * location within the section using _LOCATION postifx variants. These must
+ * be ordered according to their relative placements within the section.
+ *
+ * OPEN_FIXED_SECTION(section_name, start_address, end_address)
+ * FIXED_SECTION_ENTRY_BEGIN(section_name, label1)
+ *
+ * USE_FIXED_SECTION(section_name)
+ * label3:
+ *     li  r10,128
+ *     mv  r11,r10
+
+ * FIXED_SECTION_ENTRY_BEGIN_LOCATION(section_name, label2, start_address, size)
+ * FIXED_SECTION_ENTRY_END_LOCATION(section_name, label2, start_address, size)
+ * CLOSE_FIXED_SECTION(section_name)
+ *
+ * ZERO_FIXED_SECTION can be used to emit zeroed data.
+ *
+ * Troubleshooting:
+ * - If the build dies with "Error: attempt to move .org backwards" at
+ *   CLOSE_FIXED_SECTION() or elsewhere, there may be something
+ *   unexpected being added there. Remove the '. = x_len' line, rebuild, and
+ *   check what is pushing the section down.
+ * - If the build dies in linking, check arch/powerpc/tools/head_check.sh
+ *   comments.
+ * - If the kernel crashes or hangs in very early boot, it could be linker
+ *   stubs at the start of the main text.
+ */
+
+#define OPEN_FIXED_SECTION(sname, start, end)			\
+	sname##_start = (start);				\
+	sname##_end = (end);					\
+	sname##_len = (end) - (start);				\
+	define_ftsec sname;					\
+	. = 0x0;						\
+start_##sname:
+
+/*
+ * .linker_stub_catch section is used to catch linker stubs from being
+ * inserted in our .text section, above the start_text label (which breaks
+ * the ABS_ADDR calculation). See kernel/vmlinux.lds.S and tools/head_check.sh
+ * for more details. We would prefer to just keep a cacheline (0x80), but
+ * 0x100 seems to be how the linker aligns branch stub groups.
+ */
+#ifdef CONFIG_LD_HEAD_STUB_CATCH
+#define OPEN_TEXT_SECTION(start)				\
+	.section ".linker_stub_catch","ax",@progbits;		\
+linker_stub_catch:						\
+	. = 0x4;						\
+	text_start = (start) + 0x100;				\
+	.section ".text","ax",@progbits;			\
+	.balign 0x100;						\
+start_text:
+#else
+#define OPEN_TEXT_SECTION(start)				\
+	text_start = (start);					\
+	.section ".text","ax",@progbits;			\
+	. = 0x0;						\
+start_text:
+#endif
+
+#define ZERO_FIXED_SECTION(sname, start, end)			\
+	sname##_start = (start);				\
+	sname##_end = (end);					\
+	sname##_len = (end) - (start);				\
+	define_data_ftsec sname;				\
+	. = 0x0;						\
+	. = sname##_len;
+
+#define USE_FIXED_SECTION(sname)				\
+	fs_label = start_##sname;				\
+	fs_start = sname##_start;				\
+	use_ftsec sname;
+
+#define USE_TEXT_SECTION()					\
+	fs_label = start_text;					\
+	fs_start = text_start;					\
+	.text
+
+#define CLOSE_FIXED_SECTION(sname)				\
+	USE_FIXED_SECTION(sname);				\
+	. = sname##_len;					\
+end_##sname:
+
+
+#define __FIXED_SECTION_ENTRY_BEGIN(sname, name, __align)	\
+	USE_FIXED_SECTION(sname);				\
+	.balign __align;					\
+	.global name;						\
+name:
+
+#define FIXED_SECTION_ENTRY_BEGIN(sname, name)			\
+	__FIXED_SECTION_ENTRY_BEGIN(sname, name, IFETCH_ALIGN_BYTES)
+
+#define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start, size) \
+	USE_FIXED_SECTION(sname);				\
+	name##_start = (start);					\
+	.if ((start) % (size) != 0);				\
+	.error "Fixed section exception vector misalignment";	\
+	.endif;							\
+	.if ((size) != 0x20) && ((size) != 0x80) && ((size) != 0x100); \
+	.error "Fixed section exception vector bad size";	\
+	.endif;							\
+	.if (start) < sname##_start;				\
+	.error "Fixed section underflow";			\
+	.abort;							\
+	.endif;							\
+	. = (start) - sname##_start;				\
+	.global name;						\
+name:
+
+#define FIXED_SECTION_ENTRY_END_LOCATION(sname, name, start, size) \
+	.if (start) + (size) > sname##_end;			\
+	.error "Fixed section overflow";			\
+	.abort;							\
+	.endif;							\
+	.if (. - name > (start) + (size) - name##_start);	\
+	.error "Fixed entry overflow";				\
+	.abort;							\
+	.endif;							\
+	. = ((start) + (size) - sname##_start);			\
+
+
+/*
+ * These macros are used to change symbols in other fixed sections to be
+ * absolute or related to our current fixed section.
+ *
+ * - DEFINE_FIXED_SYMBOL / FIXED_SYMBOL_ABS_ADDR is used to find the
+ *   absolute address of a symbol within a fixed section, from any section.
+ *
+ * - ABS_ADDR is used to find the absolute address of any symbol, from within
+ *   a fixed section.
+ */
+#define DEFINE_FIXED_SYMBOL(label)				\
+	label##_absolute = (label - fs_label + fs_start)
+
+#define FIXED_SYMBOL_ABS_ADDR(label)				\
+	(label##_absolute)
+
+#define ABS_ADDR(label) (label - fs_label + fs_start)
+
+/*
+ * Following are the BOOK3S exception handler helper macros.
+ * Handlers come in a number of types, and each type has a number of varieties.
+ *
+ * EXC_REAL_*     - real, unrelocated exception vectors
+ * EXC_VIRT_*     - virt (AIL), unrelocated exception vectors
+ * TRAMP_REAL_*   - real, unrelocated helpers (virt can call these)
+ * TRAMP_VIRT_*   - virt, unreloc helpers (in practice, real can use)
+ * TRAMP_KVM      - KVM handlers that get put into real, unrelocated
+ * EXC_COMMON     - virt, relocated common handlers
+ *
+ * The EXC handlers are given a name, and branch to name_common, or the
+ * appropriate KVM or masking function. Vector handler verieties are as
+ * follows:
+ *
+ * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
+ *
+ * EXC_{REAL|VIRT}  - standard exception
+ *
+ * EXC_{REAL|VIRT}_suffix
+ *     where _suffix is:
+ *   - _MASKABLE               - maskable exception
+ *   - _OOL                    - out of line with trampoline to common handler
+ *   - _HV                     - HV exception
+ *
+ * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
+ *
+ * The one unusual case is __EXC_REAL_OOL_HV_DIRECT, which is
+ * an OOL vector that branches to a specified handler rather than the usual
+ * trampoline that goes to common. It, and other underscore macros, should
+ * be used with care.
+ *
+ * KVM handlers come in the following verieties:
+ * TRAMP_KVM
+ * TRAMP_KVM_SKIP
+ * TRAMP_KVM_HV
+ * TRAMP_KVM_HV_SKIP
+ *
+ * COMMON handlers come in the following verieties:
+ * EXC_COMMON_BEGIN/END - used to open-code the handler
+ * EXC_COMMON
+ * EXC_COMMON_ASYNC
+ *
+ * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
+ * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
+ */
+
+#define EXC_REAL_BEGIN(name, start, size)			\
+	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
+
+#define EXC_REAL_END(name, start, size)				\
+	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
+
+#define EXC_VIRT_BEGIN(name, start, size)			\
+	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
+
+#define EXC_VIRT_END(name, start, size)				\
+	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
+
+#define EXC_COMMON_BEGIN(name)					\
+	USE_TEXT_SECTION();					\
+	.balign IFETCH_ALIGN_BYTES;				\
+	.global name;						\
+	_ASM_NOKPROBE_SYMBOL(name);				\
+	DEFINE_FIXED_SYMBOL(name);				\
+name:
+
+#define TRAMP_REAL_BEGIN(name)					\
+	FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
+
+#define TRAMP_VIRT_BEGIN(name)					\
+	FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#define TRAMP_KVM_BEGIN(name)					\
+	TRAMP_VIRT_BEGIN(name)
+#else
+#define TRAMP_KVM_BEGIN(name)
+#endif
+
+#define EXC_REAL_NONE(start, size)				\
+	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
+	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
+
+#define EXC_VIRT_NONE(start, size)				\
+	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
+	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size);
+
+
+#define EXC_REAL(name, start, size)					\
+	EXC_REAL_BEGIN(name, start, size);				\
+	STD_EXCEPTION(start, name##_common);				\
+	EXC_REAL_END(name, start, size);
+
+#define EXC_VIRT(name, start, size, realvec)				\
+	EXC_VIRT_BEGIN(name, start, size);				\
+	STD_RELON_EXCEPTION(start, realvec, name##_common);		\
+	EXC_VIRT_END(name, start, size);
+
+#define EXC_REAL_MASKABLE(name, start, size, bitmask)			\
+	EXC_REAL_BEGIN(name, start, size);				\
+	MASKABLE_EXCEPTION(start, name##_common, bitmask);		\
+	EXC_REAL_END(name, start, size);
+
+#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask)		\
+	EXC_VIRT_BEGIN(name, start, size);				\
+	MASKABLE_RELON_EXCEPTION(realvec, name##_common, bitmask);	\
+	EXC_VIRT_END(name, start, size);
+
+#define EXC_REAL_HV(name, start, size)					\
+	EXC_REAL_BEGIN(name, start, size);				\
+	STD_EXCEPTION_HV(start, start, name##_common);			\
+	EXC_REAL_END(name, start, size);
+
+#define EXC_VIRT_HV(name, start, size, realvec)				\
+	EXC_VIRT_BEGIN(name, start, size);				\
+	STD_RELON_EXCEPTION_HV(start, realvec, name##_common);		\
+	EXC_VIRT_END(name, start, size);
+
+#define __EXC_REAL_OOL(name, start, size)				\
+	EXC_REAL_BEGIN(name, start, size);				\
+	__OOL_EXCEPTION(start, label, tramp_real_##name);		\
+	EXC_REAL_END(name, start, size);
+
+#define __TRAMP_REAL_OOL(name, vec)					\
+	TRAMP_REAL_BEGIN(tramp_real_##name);				\
+	STD_EXCEPTION_OOL(vec, name##_common);
+
+#define EXC_REAL_OOL(name, start, size)					\
+	__EXC_REAL_OOL(name, start, size);				\
+	__TRAMP_REAL_OOL(name, start);
+
+#define __EXC_REAL_OOL_MASKABLE(name, start, size)			\
+	__EXC_REAL_OOL(name, start, size);
+
+#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask)			\
+	TRAMP_REAL_BEGIN(tramp_real_##name);				\
+	MASKABLE_EXCEPTION_OOL(vec, name##_common, bitmask);
+
+#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask)		\
+	__EXC_REAL_OOL_MASKABLE(name, start, size);			\
+	__TRAMP_REAL_OOL_MASKABLE(name, start, bitmask);
+
+#define __EXC_REAL_OOL_HV_DIRECT(name, start, size, handler)		\
+	EXC_REAL_BEGIN(name, start, size);				\
+	__OOL_EXCEPTION(start, label, handler);				\
+	EXC_REAL_END(name, start, size);
+
+#define __EXC_REAL_OOL_HV(name, start, size)				\
+	__EXC_REAL_OOL(name, start, size);
+
+#define __TRAMP_REAL_OOL_HV(name, vec)					\
+	TRAMP_REAL_BEGIN(tramp_real_##name);				\
+	STD_EXCEPTION_HV_OOL(vec, name##_common);			\
+
+#define EXC_REAL_OOL_HV(name, start, size)				\
+	__EXC_REAL_OOL_HV(name, start, size);				\
+	__TRAMP_REAL_OOL_HV(name, start);
+
+#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size)			\
+	__EXC_REAL_OOL(name, start, size);
+
+#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask)		\
+	TRAMP_REAL_BEGIN(tramp_real_##name);				\
+	MASKABLE_EXCEPTION_HV_OOL(vec, name##_common, bitmask);		\
+
+#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask)		\
+	__EXC_REAL_OOL_MASKABLE_HV(name, start, size);			\
+	__TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask);
+
+#define __EXC_VIRT_OOL(name, start, size)				\
+	EXC_VIRT_BEGIN(name, start, size);				\
+	__OOL_EXCEPTION(start, label, tramp_virt_##name);		\
+	EXC_VIRT_END(name, start, size);
+
+#define __TRAMP_VIRT_OOL(name, realvec)					\
+	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
+	STD_RELON_EXCEPTION_OOL(realvec, name##_common);
+
+#define EXC_VIRT_OOL(name, start, size, realvec)			\
+	__EXC_VIRT_OOL(name, start, size);				\
+	__TRAMP_VIRT_OOL(name, realvec);
+
+#define __EXC_VIRT_OOL_MASKABLE(name, start, size)			\
+	__EXC_VIRT_OOL(name, start, size);
+
+#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask)		\
+	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
+	MASKABLE_RELON_EXCEPTION_OOL(realvec, name##_common, bitmask);
+
+#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask)	\
+	__EXC_VIRT_OOL_MASKABLE(name, start, size);			\
+	__TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask);
+
+#define __EXC_VIRT_OOL_HV(name, start, size)				\
+	__EXC_VIRT_OOL(name, start, size);
+
+#define __TRAMP_VIRT_OOL_HV(name, realvec)				\
+	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
+	STD_RELON_EXCEPTION_HV_OOL(realvec, name##_common);		\
+
+#define EXC_VIRT_OOL_HV(name, start, size, realvec)			\
+	__EXC_VIRT_OOL_HV(name, start, size);				\
+	__TRAMP_VIRT_OOL_HV(name, realvec);
+
+#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size)			\
+	__EXC_VIRT_OOL(name, start, size);
+
+#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask)		\
+	TRAMP_VIRT_BEGIN(tramp_virt_##name);				\
+	MASKABLE_RELON_EXCEPTION_HV_OOL(realvec, name##_common, bitmask);\
+
+#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask)	\
+	__EXC_VIRT_OOL_MASKABLE_HV(name, start, size);			\
+	__TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask);
+
+#define TRAMP_KVM(area, n)						\
+	TRAMP_KVM_BEGIN(do_kvm_##n);					\
+	KVM_HANDLER(area, EXC_STD, n);					\
+
+#define TRAMP_KVM_SKIP(area, n)						\
+	TRAMP_KVM_BEGIN(do_kvm_##n);					\
+	KVM_HANDLER_SKIP(area, EXC_STD, n);				\
+
+/*
+ * HV variant exceptions get the 0x2 bit added to their trap number.
+ */
+#define TRAMP_KVM_HV(area, n)						\
+	TRAMP_KVM_BEGIN(do_kvm_H##n);					\
+	KVM_HANDLER(area, EXC_HV, n + 0x2);				\
+
+#define TRAMP_KVM_HV_SKIP(area, n)					\
+	TRAMP_KVM_BEGIN(do_kvm_H##n);					\
+	KVM_HANDLER_SKIP(area, EXC_HV, n + 0x2);			\
+
+#define EXC_COMMON(name, realvec, hdlr)					\
+	EXC_COMMON_BEGIN(name);						\
+	STD_EXCEPTION_COMMON(realvec, name, hdlr);			\
+
+#define EXC_COMMON_ASYNC(name, realvec, hdlr)				\
+	EXC_COMMON_BEGIN(name);						\
+	STD_EXCEPTION_COMMON_ASYNC(realvec, name, hdlr);		\
+
+#endif /* __ASSEMBLY__ */
+
+#endif	/* _ASM_POWERPC_HEAD_64_H */
diff --git a/arch/powerpc/include/asm/heathrow.h b/arch/powerpc/include/asm/heathrow.h
new file mode 100644
index 0000000..8bc5b16
--- /dev/null
+++ b/arch/powerpc/include/asm/heathrow.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HEATHROW_H
+#define _ASM_POWERPC_HEATHROW_H
+#ifdef __KERNEL__
+/*
+ * heathrow.h: definitions for using the "Heathrow" I/O controller chip.
+ *
+ * Grabbed from Open Firmware definitions on a PowerBook G3 Series
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+
+/* Front light color on Yikes/B&W G3. 32 bits */
+#define HEATHROW_FRONT_LIGHT		0x32 /* (set to 0 or 0xffffffff) */
+
+/* Brightness/contrast (gossamer iMac ?). 8 bits */
+#define HEATHROW_BRIGHTNESS_CNTL	0x32
+#define HEATHROW_CONTRAST_CNTL		0x33
+
+/* offset from ohare base for feature control register */
+#define HEATHROW_MBCR			0x34	/* Media bay control */
+#define HEATHROW_FCR			0x38	/* Feature control */
+#define HEATHROW_AUX_CNTL_REG		0x3c	/* Aux control */
+
+/*
+ * Bits in feature control register.
+ * Bits postfixed with a _N are in inverse logic
+ */
+#define HRW_SCC_TRANS_EN_N	0x00000001	/* Also controls modem power */
+#define HRW_BAY_POWER_N		0x00000002
+#define HRW_BAY_PCI_ENABLE	0x00000004
+#define HRW_BAY_IDE_ENABLE	0x00000008
+#define HRW_BAY_FLOPPY_ENABLE	0x00000010
+#define HRW_IDE0_ENABLE		0x00000020
+#define HRW_IDE0_RESET_N	0x00000040
+#define HRW_BAY_DEV_MASK	0x0000001c
+#define HRW_BAY_RESET_N		0x00000080
+#define HRW_IOBUS_ENABLE	0x00000100	/* Internal IDE ? */
+#define HRW_SCC_ENABLE		0x00000200
+#define HRW_MESH_ENABLE		0x00000400
+#define HRW_SWIM_ENABLE		0x00000800
+#define HRW_SOUND_POWER_N	0x00001000
+#define HRW_SOUND_CLK_ENABLE	0x00002000
+#define HRW_SCCA_IO		0x00004000
+#define HRW_SCCB_IO		0x00008000
+#define HRW_PORT_OR_DESK_VIA_N	0x00010000	/* This one is 0 on PowerBook */
+#define HRW_PWM_MON_ID_N	0x00020000	/* ??? (0) */
+#define HRW_HOOK_MB_CNT_N	0x00040000	/* ??? (0) */
+#define HRW_SWIM_CLONE_FLOPPY	0x00080000	/* ??? (0) */
+#define HRW_AUD_RUN22		0x00100000	/* ??? (1) */
+#define HRW_SCSI_LINK_MODE	0x00200000	/* Read ??? (1) */
+#define HRW_ARB_BYPASS		0x00400000	/* Disable internal PCI arbitrer */
+#define HRW_IDE1_RESET_N	0x00800000	/* Media bay */
+#define HRW_SLOW_SCC_PCLK	0x01000000	/* ??? (0) */
+#define HRW_RESET_SCC		0x02000000
+#define HRW_MFDC_CELL_ENABLE	0x04000000	/* ??? (0) */
+#define HRW_USE_MFDC		0x08000000	/* ??? (0) */
+#define HRW_BMAC_IO_ENABLE	0x60000000	/* two bits, not documented in OF */
+#define HRW_BMAC_RESET		0x80000000	/* not documented in OF */
+
+/* We OR those features at boot on desktop G3s */
+#define HRW_DEFAULTS		(HRW_SCCA_IO | HRW_SCCB_IO | HRW_SCC_ENABLE)
+
+/* Looks like Heathrow has some sort of GPIOs as well... */
+#define HRW_GPIO_MODEM_RESET	0x6d
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HEATHROW_H */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
new file mode 100644
index 0000000..a4b65b1
--- /dev/null
+++ b/arch/powerpc/include/asm/highmem.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * PowerPC version, stolen from the i386 version.
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ *		      Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/interrupt.h>
+#include <asm/kmap_types.h>
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/fixmap.h>
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+/*
+ * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
+ * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
+ * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
+ * in case of 16K/64K/256K page sizes.
+ */
+#ifdef CONFIG_PPC_4K_PAGES
+#define PKMAP_ORDER	PTE_SHIFT
+#else
+#define PKMAP_ORDER	9
+#endif
+#define LAST_PKMAP	(1 << PKMAP_ORDER)
+#ifndef CONFIG_PPC_4K_PAGES
+#define PKMAP_BASE	(FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
+#else
+#define PKMAP_BASE	((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#endif
+#define LAST_PKMAP_MASK	(LAST_PKMAP-1)
+#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+extern void __kunmap_atomic(void *kvaddr);
+
+static inline void *kmap(struct page *page)
+{
+	might_sleep();
+	if (!PageHighMem(page))
+		return page_address(page);
+	return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+	BUG_ON(in_interrupt());
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page);
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+	return kmap_atomic_prot(page, kmap_prot);
+}
+
+
+#define flush_cache_kmaps()	flush_cache_all()
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/powerpc/include/asm/hmi.h b/arch/powerpc/include/asm/hmi.h
new file mode 100644
index 0000000..9c14f7b
--- /dev/null
+++ b/arch/powerpc/include/asm/hmi.h
@@ -0,0 +1,49 @@
+/*
+ * Hypervisor Maintenance Interrupt header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * Copyright 2015 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_HMI_H__
+#define __ASM_PPC64_HMI_H__
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+#define	CORE_TB_RESYNC_REQ_BIT		63
+#define MAX_SUBCORE_PER_CORE		4
+
+/*
+ * sibling_subcore_state structure is used to co-ordinate all threads
+ * during HMI to avoid TB corruption. This structure is allocated once
+ * per each core and shared by all threads on that core.
+ */
+struct sibling_subcore_state {
+	unsigned long	flags;
+	u8		in_guest[MAX_SUBCORE_PER_CORE];
+};
+
+extern void wait_for_subcore_guest_exit(void);
+extern void wait_for_tb_resync(void);
+#else
+static inline void wait_for_subcore_guest_exit(void) { }
+static inline void wait_for_tb_resync(void) { }
+#endif
+
+struct pt_regs;
+extern long hmi_handle_debugtrig(struct pt_regs *regs);
+
+#endif /* __ASM_PPC64_HMI_H__ */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
new file mode 100644
index 0000000..2d00cc5
--- /dev/null
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HUGETLB_H
+#define _ASM_POWERPC_HUGETLB_H
+
+#ifdef CONFIG_HUGETLB_PAGE
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+extern struct kmem_cache *hugepte_cache;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#include <asm/book3s/64/hugetlb.h>
+/*
+ * This should work for other subarchs too. But right now we use the
+ * new format only for 64bit book3s
+ */
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+	BUG_ON(!hugepd_ok(hpd));
+	/*
+	 * We have only four bits to encode, MMU page size
+	 */
+	BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
+	return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
+}
+
+static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
+{
+	return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+	return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
+}
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+				      unsigned long vmaddr)
+{
+	if (radix_enabled())
+		return radix__flush_hugetlb_page(vma, vmaddr);
+}
+
+#else
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+	BUG_ON(!hugepd_ok(hpd));
+#ifdef CONFIG_PPC_8xx
+	return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
+#else
+	return (pte_t *)((hpd_val(hpd) &
+			  ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+#endif
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+#ifdef CONFIG_PPC_8xx
+	return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
+#else
+	return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
+#endif
+}
+
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+				    unsigned pdshift)
+{
+	/*
+	 * On FSL BookE, we have multiple higher-level table entries that
+	 * point to the same hugepte.  Just use the first one since they're all
+	 * identical.  So for that case, idx=0.
+	 */
+	unsigned long idx = 0;
+
+	pte_t *dir = hugepd_page(hpd);
+#ifndef CONFIG_PPC_FSL_BOOK3E
+	idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
+#endif
+
+	return dir + idx;
+}
+
+void flush_dcache_icache_hugepage(struct page *page);
+
+int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+			   unsigned long len);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+					 unsigned long addr,
+					 unsigned long len)
+{
+	if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
+		return slice_is_hugepage_only_range(mm, addr, len);
+	return 0;
+}
+
+void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
+			    pte_t pte);
+#ifdef CONFIG_PPC_8xx
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+				      unsigned long vmaddr)
+{
+	flush_tlb_page(vma, vmaddr);
+}
+#else
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+#endif
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+			    unsigned long end, unsigned long floor,
+			    unsigned long ceiling);
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(struct file *file,
+			unsigned long addr, unsigned long len)
+{
+	struct hstate *h = hstate_file(file);
+	if (len & ~huge_page_mask(h))
+		return -EINVAL;
+	if (addr & ~huge_page_mask(h))
+		return -EINVAL;
+	return 0;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+				   pte_t *ptep, pte_t pte)
+{
+	set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+					    unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_PPC64
+	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
+#else
+	return __pte(pte_update(ptep, ~0UL, 0));
+#endif
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+	pte_t pte;
+	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+	flush_hugetlb_page(vma, addr);
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+	return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+	return pte_wrprotect(pte);
+}
+
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+				      unsigned long addr, pte_t *ptep,
+				      pte_t pte, int dirty);
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+	return *ptep;
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#else /* ! CONFIG_HUGETLB_PAGE */
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+				      unsigned long vmaddr)
+{
+}
+
+#define hugepd_shift(x) 0
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+				    unsigned pdshift)
+{
+	return NULL;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
new file mode 100644
index 0000000..a0b17f9
--- /dev/null
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_HVCALL_H
+#define _ASM_POWERPC_HVCALL_H
+#ifdef __KERNEL__
+
+#define HVSC			.long 0x44000022
+
+#define H_SUCCESS	0
+#define H_BUSY		1	/* Hardware busy -- retry later */
+#define H_CLOSED	2	/* Resource closed */
+#define H_NOT_AVAILABLE 3
+#define H_CONSTRAINED	4	/* Resource request constrained to max allowed */
+#define H_PARTIAL       5
+#define H_IN_PROGRESS	14	/* Kind of like busy */
+#define H_PAGE_REGISTERED 15
+#define H_PARTIAL_STORE   16
+#define H_PENDING	17	/* returned from H_POLL_PENDING */
+#define H_CONTINUE	18	/* Returned from H_Join on success */
+#define H_LONG_BUSY_START_RANGE		9900  /* Start of long busy range */
+#define H_LONG_BUSY_ORDER_1_MSEC	9900  /* Long busy, hint that 1msec \
+						 is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_MSEC	9901  /* Long busy, hint that 10msec \
+						 is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_MSEC 	9902  /* Long busy, hint that 100msec \
+						 is a good time to retry */
+#define H_LONG_BUSY_ORDER_1_SEC		9903  /* Long busy, hint that 1sec \
+						 is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_SEC	9904  /* Long busy, hint that 10sec \
+						 is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_SEC	9905  /* Long busy, hint that 100sec \
+						 is a good time to retry */
+#define H_LONG_BUSY_END_RANGE		9905  /* End of long busy range */
+
+/* Internal value used in book3s_hv kvm support; not returned to guests */
+#define H_TOO_HARD	9999
+
+#define H_HARDWARE	-1	/* Hardware error */
+#define H_FUNCTION	-2	/* Function not supported */
+#define H_PRIVILEGE	-3	/* Caller not privileged */
+#define H_PARAMETER	-4	/* Parameter invalid, out-of-range or conflicting */
+#define H_BAD_MODE	-5	/* Illegal msr value */
+#define H_PTEG_FULL	-6	/* PTEG is full */
+#define H_NOT_FOUND	-7	/* PTE was not found" */
+#define H_RESERVED_DABR	-8	/* DABR address is reserved by the hypervisor on this processor" */
+#define H_NO_MEM	-9
+#define H_AUTHORITY	-10
+#define H_PERMISSION	-11
+#define H_DROPPED	-12
+#define H_SOURCE_PARM	-13
+#define H_DEST_PARM	-14
+#define H_REMOTE_PARM	-15
+#define H_RESOURCE	-16
+#define H_ADAPTER_PARM  -17
+#define H_RH_PARM       -18
+#define H_RCQ_PARM      -19
+#define H_SCQ_PARM      -20
+#define H_EQ_PARM       -21
+#define H_RT_PARM       -22
+#define H_ST_PARM       -23
+#define H_SIGT_PARM     -24
+#define H_TOKEN_PARM    -25
+#define H_MLENGTH_PARM  -27
+#define H_MEM_PARM      -28
+#define H_MEM_ACCESS_PARM -29
+#define H_ATTR_PARM     -30
+#define H_PORT_PARM     -31
+#define H_MCG_PARM      -32
+#define H_VL_PARM       -33
+#define H_TSIZE_PARM    -34
+#define H_TRACE_PARM    -35
+
+#define H_MASK_PARM     -37
+#define H_MCG_FULL      -38
+#define H_ALIAS_EXIST   -39
+#define H_P_COUNTER     -40
+#define H_TABLE_FULL    -41
+#define H_ALT_TABLE     -42
+#define H_MR_CONDITION  -43
+#define H_NOT_ENOUGH_RESOURCES -44
+#define H_R_STATE       -45
+#define H_RESCINDED     -46
+#define H_P2		-55
+#define H_P3		-56
+#define H_P4		-57
+#define H_P5		-58
+#define H_P6		-59
+#define H_P7		-60
+#define H_P8		-61
+#define H_P9		-62
+#define H_TOO_BIG	-64
+#define H_UNSUPPORTED	-67
+#define H_OVERLAP	-68
+#define H_INTERRUPT	-69
+#define H_BAD_DATA	-70
+#define H_NOT_ACTIVE	-71
+#define H_SG_LIST	-72
+#define H_OP_MODE	-73
+#define H_COP_HW	-74
+#define H_STATE		-75
+#define H_UNSUPPORTED_FLAG_START	-256
+#define H_UNSUPPORTED_FLAG_END		-511
+#define H_MULTI_THREADS_ACTIVE	-9005
+#define H_OUTSTANDING_COP_OPS	-9006
+
+
+/* Long Busy is a condition that can be returned by the firmware
+ * when a call cannot be completed now, but the identical call
+ * should be retried later.  This prevents calls blocking in the
+ * firmware for long periods of time.  Annoyingly the firmware can return
+ * a range of return codes, hinting at how long we should wait before
+ * retrying.  If you don't care for the hint, the macro below is a good
+ * way to check for the long_busy return codes
+ */
+#define H_IS_LONG_BUSY(x)  ((x >= H_LONG_BUSY_START_RANGE) \
+			     && (x <= H_LONG_BUSY_END_RANGE))
+
+/* Flags */
+#define H_LARGE_PAGE		(1UL<<(63-16))
+#define H_EXACT			(1UL<<(63-24))	/* Use exact PTE or return H_PTEG_FULL */
+#define H_R_XLATE		(1UL<<(63-25))	/* include a valid logical page num in the pte if the valid bit is set */
+#define H_READ_4		(1UL<<(63-26))	/* Return 4 PTEs */
+#define H_PAGE_STATE_CHANGE	(1UL<<(63-28))
+#define H_PAGE_UNUSED		((1UL<<(63-29)) | (1UL<<(63-30)))
+#define H_PAGE_SET_UNUSED	(H_PAGE_STATE_CHANGE | H_PAGE_UNUSED)
+#define H_PAGE_SET_LOANED	(H_PAGE_SET_UNUSED | (1UL<<(63-31)))
+#define H_PAGE_SET_ACTIVE	H_PAGE_STATE_CHANGE
+#define H_AVPN			(1UL<<(63-32))	/* An avpn is provided as a sanity test */
+#define H_ANDCOND		(1UL<<(63-33))
+#define H_LOCAL			(1UL<<(63-35))
+#define H_ICACHE_INVALIDATE	(1UL<<(63-40))	/* icbi, etc.  (ignored for IO pages) */
+#define H_ICACHE_SYNCHRONIZE	(1UL<<(63-41))	/* dcbst, icbi, etc (ignored for IO pages */
+#define H_COALESCE_CAND	(1UL<<(63-42))	/* page is a good candidate for coalescing */
+#define H_ZERO_PAGE		(1UL<<(63-48))	/* zero the page before mapping (ignored for IO pages) */
+#define H_COPY_PAGE		(1UL<<(63-49))
+#define H_N			(1UL<<(63-61))
+#define H_PP1			(1UL<<(63-62))
+#define H_PP2			(1UL<<(63-63))
+
+/* Flags for H_REGISTER_VPA subfunction field */
+#define H_VPA_FUNC_SHIFT	(63-18)	/* Bit posn of subfunction code */
+#define H_VPA_FUNC_MASK		7UL
+#define H_VPA_REG_VPA		1UL	/* Register Virtual Processor Area */
+#define H_VPA_REG_DTL		2UL	/* Register Dispatch Trace Log */
+#define H_VPA_REG_SLB		3UL	/* Register SLB shadow buffer */
+#define H_VPA_DEREG_VPA		5UL	/* Deregister Virtual Processor Area */
+#define H_VPA_DEREG_DTL		6UL	/* Deregister Dispatch Trace Log */
+#define H_VPA_DEREG_SLB		7UL	/* Deregister SLB shadow buffer */
+
+/* VASI States */
+#define H_VASI_INVALID          0
+#define H_VASI_ENABLED          1
+#define H_VASI_ABORTED          2
+#define H_VASI_SUSPENDING       3
+#define H_VASI_SUSPENDED        4
+#define H_VASI_RESUMED          5
+#define H_VASI_COMPLETED        6
+
+/* Each control block has to be on a 4K boundary */
+#define H_CB_ALIGNMENT          4096
+
+/* pSeries hypervisor opcodes */
+#define H_REMOVE		0x04
+#define H_ENTER			0x08
+#define H_READ			0x0c
+#define H_CLEAR_MOD		0x10
+#define H_CLEAR_REF		0x14
+#define H_PROTECT		0x18
+#define H_GET_TCE		0x1c
+#define H_PUT_TCE		0x20
+#define H_SET_SPRG0		0x24
+#define H_SET_DABR		0x28
+#define H_PAGE_INIT		0x2c
+#define H_SET_ASR		0x30
+#define H_ASR_ON		0x34
+#define H_ASR_OFF		0x38
+#define H_LOGICAL_CI_LOAD	0x3c
+#define H_LOGICAL_CI_STORE	0x40
+#define H_LOGICAL_CACHE_LOAD	0x44
+#define H_LOGICAL_CACHE_STORE	0x48
+#define H_LOGICAL_ICBI		0x4c
+#define H_LOGICAL_DCBF		0x50
+#define H_GET_TERM_CHAR		0x54
+#define H_PUT_TERM_CHAR		0x58
+#define H_REAL_TO_LOGICAL	0x5c
+#define H_HYPERVISOR_DATA	0x60
+#define H_EOI			0x64
+#define H_CPPR			0x68
+#define H_IPI			0x6c
+#define H_IPOLL			0x70
+#define H_XIRR			0x74
+#define H_PERFMON		0x7c
+#define H_MIGRATE_DMA		0x78
+#define H_REGISTER_VPA		0xDC
+#define H_CEDE			0xE0
+#define H_CONFER		0xE4
+#define H_PROD			0xE8
+#define H_GET_PPP		0xEC
+#define H_SET_PPP		0xF0
+#define H_PURR			0xF4
+#define H_PIC			0xF8
+#define H_REG_CRQ		0xFC
+#define H_FREE_CRQ		0x100
+#define H_VIO_SIGNAL		0x104
+#define H_SEND_CRQ		0x108
+#define H_COPY_RDMA		0x110
+#define H_REGISTER_LOGICAL_LAN	0x114
+#define H_FREE_LOGICAL_LAN	0x118
+#define H_ADD_LOGICAL_LAN_BUFFER 0x11C
+#define H_SEND_LOGICAL_LAN	0x120
+#define H_BULK_REMOVE		0x124
+#define H_MULTICAST_CTRL	0x130
+#define H_SET_XDABR		0x134
+#define H_STUFF_TCE		0x138
+#define H_PUT_TCE_INDIRECT	0x13C
+#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
+#define H_VTERM_PARTNER_INFO	0x150
+#define H_REGISTER_VTERM	0x154
+#define H_FREE_VTERM		0x158
+#define H_RESET_EVENTS          0x15C
+#define H_ALLOC_RESOURCE        0x160
+#define H_FREE_RESOURCE         0x164
+#define H_MODIFY_QP             0x168
+#define H_QUERY_QP              0x16C
+#define H_REREGISTER_PMR        0x170
+#define H_REGISTER_SMR          0x174
+#define H_QUERY_MR              0x178
+#define H_QUERY_MW              0x17C
+#define H_QUERY_HCA             0x180
+#define H_QUERY_PORT            0x184
+#define H_MODIFY_PORT           0x188
+#define H_DEFINE_AQP1           0x18C
+#define H_GET_TRACE_BUFFER      0x190
+#define H_DEFINE_AQP0           0x194
+#define H_RESIZE_MR             0x198
+#define H_ATTACH_MCQP           0x19C
+#define H_DETACH_MCQP           0x1A0
+#define H_CREATE_RPT            0x1A4
+#define H_REMOVE_RPT            0x1A8
+#define H_REGISTER_RPAGES       0x1AC
+#define H_DISABLE_AND_GETC      0x1B0
+#define H_ERROR_DATA            0x1B4
+#define H_GET_HCA_INFO          0x1B8
+#define H_GET_PERF_COUNT        0x1BC
+#define H_MANAGE_TRACE          0x1C0
+#define H_GET_CPU_CHARACTERISTICS 0x1C8
+#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
+#define H_QUERY_INT_STATE       0x1E4
+#define H_POLL_PENDING		0x1D8
+#define H_ILLAN_ATTRIBUTES	0x244
+#define H_MODIFY_HEA_QP		0x250
+#define H_QUERY_HEA_QP		0x254
+#define H_QUERY_HEA		0x258
+#define H_QUERY_HEA_PORT	0x25C
+#define H_MODIFY_HEA_PORT	0x260
+#define H_REG_BCMC		0x264
+#define H_DEREG_BCMC		0x268
+#define H_REGISTER_HEA_RPAGES	0x26C
+#define H_DISABLE_AND_GET_HEA	0x270
+#define H_GET_HEA_INFO		0x274
+#define H_ALLOC_HEA_RESOURCE	0x278
+#define H_ADD_CONN		0x284
+#define H_DEL_CONN		0x288
+#define H_JOIN			0x298
+#define H_VASI_STATE            0x2A4
+#define H_VIOCTL		0x2A8
+#define H_ENABLE_CRQ		0x2B0
+#define H_GET_EM_PARMS		0x2B8
+#define H_SET_MPP		0x2D0
+#define H_GET_MPP		0x2D4
+#define H_REG_SUB_CRQ		0x2DC
+#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
+#define H_FREE_SUB_CRQ		0x2E0
+#define H_SEND_SUB_CRQ		0x2E4
+#define H_SEND_SUB_CRQ_INDIRECT	0x2E8
+#define H_BEST_ENERGY		0x2F4
+#define H_XIRR_X		0x2FC
+#define H_RANDOM		0x300
+#define H_COP			0x304
+#define H_GET_MPP_X		0x314
+#define H_SET_MODE		0x31C
+#define H_CLEAR_HPT		0x358
+#define H_REQUEST_VMC		0x360
+#define H_RESIZE_HPT_PREPARE	0x36C
+#define H_RESIZE_HPT_COMMIT	0x370
+#define H_REGISTER_PROC_TBL	0x37C
+#define H_SIGNAL_SYS_RESET	0x380
+#define H_INT_GET_SOURCE_INFO   0x3A8
+#define H_INT_SET_SOURCE_CONFIG 0x3AC
+#define H_INT_GET_SOURCE_CONFIG 0x3B0
+#define H_INT_GET_QUEUE_INFO    0x3B4
+#define H_INT_SET_QUEUE_CONFIG  0x3B8
+#define H_INT_GET_QUEUE_CONFIG  0x3BC
+#define H_INT_SET_OS_REPORTING_LINE 0x3C0
+#define H_INT_GET_OS_REPORTING_LINE 0x3C4
+#define H_INT_ESB               0x3C8
+#define H_INT_SYNC              0x3CC
+#define H_INT_RESET             0x3D0
+#define MAX_HCALL_OPCODE	H_INT_RESET
+
+/* H_VIOCTL functions */
+#define H_GET_VIOA_DUMP_SIZE	0x01
+#define H_GET_VIOA_DUMP		0x02
+#define H_GET_ILLAN_NUM_VLAN_IDS 0x03
+#define H_GET_ILLAN_VLAN_ID_LIST 0x04
+#define H_GET_ILLAN_SWITCH_ID	0x05
+#define H_DISABLE_MIGRATION	0x06
+#define H_ENABLE_MIGRATION	0x07
+#define H_GET_PARTNER_INFO	0x08
+#define H_GET_PARTNER_WWPN_LIST	0x09
+#define H_DISABLE_ALL_VIO_INTS	0x0A
+#define H_DISABLE_VIO_INTERRUPT	0x0B
+#define H_ENABLE_VIO_INTERRUPT	0x0C
+#define H_GET_SESSION_TOKEN	0x19
+#define H_SESSION_ERR_DETECTED	0x1A
+
+
+/* Platform specific hcalls, used by KVM */
+#define H_RTAS			0xf000
+
+/* "Platform specific hcalls", provided by PHYP */
+#define H_GET_24X7_CATALOG_PAGE	0xF078
+#define H_GET_24X7_DATA		0xF07C
+#define H_GET_PERF_COUNTER_INFO	0xF080
+
+/* Values for 2nd argument to H_SET_MODE */
+#define H_SET_MODE_RESOURCE_SET_CIABR		1
+#define H_SET_MODE_RESOURCE_SET_DAWR		2
+#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE	3
+#define H_SET_MODE_RESOURCE_LE			4
+
+/* Values for argument to H_SIGNAL_SYS_RESET */
+#define H_SIGNAL_SYS_RESET_ALL			-1
+#define H_SIGNAL_SYS_RESET_ALL_OTHERS		-2
+/* >= 0 values are CPU number */
+
+/* H_GET_CPU_CHARACTERISTICS return values */
+#define H_CPU_CHAR_SPEC_BAR_ORI31	(1ull << 63) // IBM bit 0
+#define H_CPU_CHAR_BCCTRL_SERIALISED	(1ull << 62) // IBM bit 1
+#define H_CPU_CHAR_L1D_FLUSH_ORI30	(1ull << 61) // IBM bit 2
+#define H_CPU_CHAR_L1D_FLUSH_TRIG2	(1ull << 60) // IBM bit 3
+#define H_CPU_CHAR_L1D_THREAD_PRIV	(1ull << 59) // IBM bit 4
+#define H_CPU_CHAR_BRANCH_HINTS_HONORED	(1ull << 58) // IBM bit 5
+#define H_CPU_CHAR_THREAD_RECONFIG_CTRL	(1ull << 57) // IBM bit 6
+#define H_CPU_CHAR_COUNT_CACHE_DISABLED	(1ull << 56) // IBM bit 7
+#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST	(1ull << 54) // IBM bit 9
+
+#define H_CPU_BEHAV_FAVOUR_SECURITY	(1ull << 63) // IBM bit 0
+#define H_CPU_BEHAV_L1D_FLUSH_PR	(1ull << 62) // IBM bit 1
+#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR	(1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FLUSH_COUNT_CACHE	(1ull << 58) // IBM bit 5
+
+/* Flag values used in H_REGISTER_PROC_TBL hcall */
+#define PROC_TABLE_OP_MASK	0x18
+#define PROC_TABLE_DEREG	0x10
+#define PROC_TABLE_NEW		0x18
+#define PROC_TABLE_TYPE_MASK	0x06
+#define PROC_TABLE_HPT_SLB	0x00
+#define PROC_TABLE_HPT_PT	0x02
+#define PROC_TABLE_RADIX	0x04
+#define PROC_TABLE_GTSE		0x01
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/**
+ * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
+ * @opcode: The hypervisor call to make.
+ *
+ * This call supports up to 7 arguments and only returns the status of
+ * the hcall. Use this version where possible, its slightly faster than
+ * the other plpar_hcalls.
+ */
+long plpar_hcall_norets(unsigned long opcode, ...);
+
+/**
+ * plpar_hcall: - Make a pseries hypervisor call
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 4 return arguments in.
+ *
+ * This call supports up to 6 arguments and 4 return arguments. Use
+ * PLPAR_HCALL_BUFSIZE to size the return argument buffer.
+ *
+ * Used for all but the craziest of phyp interfaces (see plpar_hcall9)
+ */
+#define PLPAR_HCALL_BUFSIZE 4
+long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+
+/**
+ * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 4 return arguments in.
+ *
+ * This call supports up to 6 arguments and 4 return arguments. Use
+ * PLPAR_HCALL_BUFSIZE to size the return argument buffer.
+ *
+ * Used when phyp interface needs to be called in real mode. Similar to
+ * plpar_hcall, but plpar_hcall_raw works in real mode and does not
+ * calculate hypervisor call statistics.
+ */
+long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+
+/**
+ * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 9 return arguments in.
+ *
+ * This call supports up to 9 arguments and 9 return arguments. Use
+ * PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
+ */
+#define PLPAR_HCALL9_BUFSIZE 9
+long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
+
+struct hvcall_mpp_data {
+	unsigned long entitled_mem;
+	unsigned long mapped_mem;
+	unsigned short group_num;
+	unsigned short pool_num;
+	unsigned char mem_weight;
+	unsigned char unallocated_mem_weight;
+	unsigned long unallocated_entitlement;  /* value in bytes */
+	unsigned long pool_size;
+	signed long loan_request;
+	unsigned long backing_mem;
+};
+
+int h_get_mpp(struct hvcall_mpp_data *);
+
+struct hvcall_mpp_x_data {
+	unsigned long coalesced_bytes;
+	unsigned long pool_coalesced_bytes;
+	unsigned long pool_purr_cycles;
+	unsigned long pool_spurr_cycles;
+	unsigned long reserved[3];
+};
+
+int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data);
+
+static inline unsigned int get_longbusy_msecs(int longbusy_rc)
+{
+	switch (longbusy_rc) {
+	case H_LONG_BUSY_ORDER_1_MSEC:
+		return 1;
+	case H_LONG_BUSY_ORDER_10_MSEC:
+		return 10;
+	case H_LONG_BUSY_ORDER_100_MSEC:
+		return 100;
+	case H_LONG_BUSY_ORDER_1_SEC:
+		return 1000;
+	case H_LONG_BUSY_ORDER_10_SEC:
+		return 10000;
+	case H_LONG_BUSY_ORDER_100_SEC:
+		return 100000;
+	default:
+		return 1;
+	}
+}
+
+struct h_cpu_char_result {
+	u64 character;
+	u64 behaviour;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h
new file mode 100644
index 0000000..35ea69e
--- /dev/null
+++ b/arch/powerpc/include/asm/hvconsole.h
@@ -0,0 +1,41 @@
+/*
+ * hvconsole.h
+ * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
+ *
+ * LPAR console support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _PPC64_HVCONSOLE_H
+#define _PPC64_HVCONSOLE_H
+#ifdef __KERNEL__
+
+/*
+ * PSeries firmware will only send/recv up to 16 bytes of character data per
+ * hcall.
+ */
+#define MAX_VIO_PUT_CHARS	16
+#define SIZE_VIO_GET_CHARS	16
+
+/*
+ * Vio firmware always attempts to fetch MAX_VIO_GET_CHARS chars.  The 'count'
+ * parm is included to conform to put_chars() function pointer template
+ */
+extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
+extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_HVCONSOLE_H */
diff --git a/arch/powerpc/include/asm/hvcserver.h b/arch/powerpc/include/asm/hvcserver.h
new file mode 100644
index 0000000..67d7da3
--- /dev/null
+++ b/arch/powerpc/include/asm/hvcserver.h
@@ -0,0 +1,59 @@
+/*
+ * hvcserver.h
+ * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
+ *
+ * PPC64 virtual I/O console server support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _PPC64_HVCSERVER_H
+#define _PPC64_HVCSERVER_H
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+
+/* Converged Location Code length */
+#define HVCS_CLC_LENGTH	79
+
+/**
+ * hvcs_partner_info - an element in a list of partner info
+ * @node: list_head denoting this partner_info struct's position in the list of
+ *	partner info.
+ * @unit_address: The partner unit address of this entry.
+ * @partition_ID: The partner partition ID of this entry.
+ * @location_code: The converged location code of this entry + 1 char for the
+ *	null-term.
+ *
+ * This structure outlines the format that partner info is presented to a caller
+ * of the hvcs partner info fetching functions.  These are strung together into
+ * a list using linux kernel lists.
+ */
+struct hvcs_partner_info {
+	struct list_head node;
+	uint32_t unit_address;
+	uint32_t partition_ID;
+	char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */
+};
+
+extern int hvcs_free_partner_info(struct list_head *head);
+extern int hvcs_get_partner_info(uint32_t unit_address,
+		struct list_head *head, unsigned long *pi_buff);
+extern int hvcs_register_connection(uint32_t unit_address,
+		uint32_t p_partition_ID, uint32_t p_unit_address);
+extern int hvcs_free_connection(uint32_t unit_address);
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_HVCSERVER_H */
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
new file mode 100644
index 0000000..3fdc54d
--- /dev/null
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _HVSI_H
+#define _HVSI_H
+
+#define VS_DATA_PACKET_HEADER           0xff
+#define VS_CONTROL_PACKET_HEADER        0xfe
+#define VS_QUERY_PACKET_HEADER          0xfd
+#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
+
+/* control verbs */
+#define VSV_SET_MODEM_CTL    1 /* to service processor only */
+#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
+#define VSV_CLOSE_PROTOCOL   3
+
+/* query verbs */
+#define VSV_SEND_VERSION_NUMBER 1
+#define VSV_SEND_MODEM_CTL_STATUS 2
+
+/* yes, these masks are not consecutive. */
+#define HVSI_TSDTR 0x01
+#define HVSI_TSCD  0x20
+
+#define HVSI_MAX_OUTGOING_DATA 12
+#define HVSI_VERSION 1
+
+struct hvsi_header {
+	uint8_t  type;
+	uint8_t  len;
+	__be16 seqno;
+} __attribute__((packed));
+
+struct hvsi_data {
+	struct hvsi_header hdr;
+	uint8_t  data[HVSI_MAX_OUTGOING_DATA];
+} __attribute__((packed));
+
+struct hvsi_control {
+	struct hvsi_header hdr;
+	__be16 verb;
+	/* optional depending on verb: */
+	__be32 word;
+	__be32 mask;
+} __attribute__((packed));
+
+struct hvsi_query {
+	struct hvsi_header hdr;
+	__be16 verb;
+} __attribute__((packed));
+
+struct hvsi_query_response {
+	struct hvsi_header hdr;
+	__be16 verb;
+	__be16 query_seqno;
+	union {
+		uint8_t  version;
+		__be32 mctrl_word;
+	} u;
+} __attribute__((packed));
+
+/* hvsi lib struct definitions */
+#define HVSI_INBUF_SIZE		255
+struct tty_struct;
+struct hvsi_priv {
+	unsigned int	inbuf_len;	/* data in input buffer */
+	unsigned char	inbuf[HVSI_INBUF_SIZE];
+	unsigned int	inbuf_cur;	/* Cursor in input buffer */
+	unsigned int	inbuf_pktlen;	/* packet lenght from cursor */
+	atomic_t	seqno;		/* packet sequence number */
+	unsigned int	opened:1;	/* driver opened */
+	unsigned int	established:1;	/* protocol established */
+	unsigned int 	is_console:1;	/* used as a kernel console device */
+	unsigned int	mctrl_update:1;	/* modem control updated */
+	unsigned short	mctrl;		/* modem control */
+	struct tty_struct *tty;		/* tty structure */
+	int (*get_chars)(uint32_t termno, char *buf, int count);
+	int (*put_chars)(uint32_t termno, const char *buf, int count);
+	uint32_t	termno;
+};
+
+/* hvsi lib functions */
+struct hvc_struct;
+extern void hvsilib_init(struct hvsi_priv *pv,
+			 int (*get_chars)(uint32_t termno, char *buf, int count),
+			 int (*put_chars)(uint32_t termno, const char *buf,
+					  int count),
+			 int termno, int is_console);
+extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern int hvsilib_read_mctrl(struct hvsi_priv *pv);
+extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr);
+extern void hvsilib_establish(struct hvsi_priv *pv);
+extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count);
+extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count);
+
+#endif /* _HVSI_H */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
new file mode 100644
index 0000000..ece4dc8
--- /dev/null
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -0,0 +1,99 @@
+/*
+ * PowerPC BookIII S hardware breakpoint definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2010, IBM Corporation.
+ * Author: K.Prasad <prasad@linux.vnet.ibm.com>
+ *
+ */
+
+#ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H
+#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
+
+#ifdef	__KERNEL__
+struct arch_hw_breakpoint {
+	unsigned long	address;
+	u16		type;
+	u16		len; /* length of the target data symbol */
+};
+
+/* Note: Don't change the the first 6 bits below as they are in the same order
+ * as the dabr and dabrx.
+ */
+#define HW_BRK_TYPE_READ		0x01
+#define HW_BRK_TYPE_WRITE		0x02
+#define HW_BRK_TYPE_TRANSLATE		0x04
+#define HW_BRK_TYPE_USER		0x08
+#define HW_BRK_TYPE_KERNEL		0x10
+#define HW_BRK_TYPE_HYP			0x20
+#define HW_BRK_TYPE_EXTRANEOUS_IRQ	0x80
+
+/* bits that overlap with the bottom 3 bits of the dabr */
+#define HW_BRK_TYPE_RDWR	(HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)
+#define HW_BRK_TYPE_DABR	(HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE)
+#define HW_BRK_TYPE_PRIV_ALL	(HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
+				 HW_BRK_TYPE_HYP)
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <linux/kdebug.h>
+#include <asm/reg.h>
+#include <asm/debug.h>
+
+struct perf_event_attr;
+struct perf_event;
+struct pmu;
+struct perf_sample_data;
+struct task_struct;
+
+#define HW_BREAKPOINT_ALIGN 0x7
+
+extern int hw_breakpoint_slots(int type);
+extern int arch_bp_generic_fields(int type, int *gen_bp_type);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+				    const struct perf_event_attr *attr,
+				    struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+						unsigned long val, void *data);
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void arch_unregister_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+extern struct pmu perf_ops_bp;
+extern void ptrace_triggered(struct perf_event *bp,
+			struct perf_sample_data *data, struct pt_regs *regs);
+static inline void hw_breakpoint_disable(void)
+{
+	struct arch_hw_breakpoint brk;
+
+	brk.address = 0;
+	brk.type = 0;
+	brk.len = 0;
+	if (ppc_breakpoint_available())
+		__set_breakpoint(&brk);
+}
+extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
+int hw_breakpoint_handler(struct die_args *args);
+
+#else	/* CONFIG_HAVE_HW_BREAKPOINT */
+static inline void hw_breakpoint_disable(void) { }
+static inline void thread_change_pc(struct task_struct *tsk,
+					struct pt_regs *regs) { }
+#endif	/* CONFIG_HAVE_HW_BREAKPOINT */
+#endif	/* __KERNEL__ */
+#endif	/* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
new file mode 100644
index 0000000..32a18f2
--- /dev/null
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_HW_IRQ_H
+#define _ASM_POWERPC_HW_IRQ_H
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_PPC64
+
+/*
+ * PACA flags in paca->irq_happened.
+ *
+ * This bits are set when interrupts occur while soft-disabled
+ * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
+ * is set whenever we manually hard disable.
+ */
+#define PACA_IRQ_HARD_DIS	0x01
+#define PACA_IRQ_DBELL		0x02
+#define PACA_IRQ_EE		0x04
+#define PACA_IRQ_DEC		0x08 /* Or FIT */
+#define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
+#define PACA_IRQ_HMI		0x20
+#define PACA_IRQ_PMI		0x40
+
+/*
+ * Some soft-masked interrupts must be hard masked until they are replayed
+ * (e.g., because the soft-masked handler does not clear the exception).
+ */
+#ifdef CONFIG_PPC_BOOK3S
+#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
+#else
+#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
+#endif
+
+/*
+ * flags for paca->irq_soft_mask
+ */
+#define IRQS_ENABLED		0
+#define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
+#define IRQS_PMI_DISABLED	2
+#define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
+
+#endif /* CONFIG_PPC64 */
+
+#ifndef __ASSEMBLY__
+
+extern void replay_system_reset(void);
+extern void __replay_interrupt(unsigned int vector);
+
+extern void timer_interrupt(struct pt_regs *);
+extern void timer_broadcast_interrupt(void);
+extern void performance_monitor_exception(struct pt_regs *regs);
+extern void WatchdogException(struct pt_regs *regs);
+extern void unknown_exception(struct pt_regs *regs);
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline notrace unsigned long irq_soft_mask_return(void)
+{
+	unsigned long flags;
+
+	asm volatile(
+		"lbz %0,%1(13)"
+		: "=r" (flags)
+		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
+
+	return flags;
+}
+
+/*
+ * The "memory" clobber acts as both a compiler barrier
+ * for the critical section and as a clobber because
+ * we changed paca->irq_soft_mask
+ */
+static inline notrace void irq_soft_mask_set(unsigned long mask)
+{
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+	/*
+	 * The irq mask must always include the STD bit if any are set.
+	 *
+	 * and interrupts don't get replayed until the standard
+	 * interrupt (local_irq_disable()) is unmasked.
+	 *
+	 * Other masks must only provide additional masking beyond
+	 * the standard, and they are also not replayed until the
+	 * standard interrupt becomes unmasked.
+	 *
+	 * This could be changed, but it will require partial
+	 * unmasks to be replayed, among other things. For now, take
+	 * the simple approach.
+	 */
+	WARN_ON(mask && !(mask & IRQS_DISABLED));
+#endif
+
+	asm volatile(
+		"stb %0,%1(13)"
+		:
+		: "r" (mask),
+		  "i" (offsetof(struct paca_struct, irq_soft_mask))
+		: "memory");
+}
+
+static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
+{
+	unsigned long flags;
+
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+	WARN_ON(mask && !(mask & IRQS_DISABLED));
+#endif
+
+	asm volatile(
+		"lbz %0,%1(13); stb %2,%1(13)"
+		: "=&r" (flags)
+		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
+		  "r" (mask)
+		: "memory");
+
+	return flags;
+}
+
+static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
+{
+	unsigned long flags, tmp;
+
+	asm volatile(
+		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
+		: "=&r" (flags), "=r" (tmp)
+		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
+		  "r" (mask)
+		: "memory");
+
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
+#endif
+
+	return flags;
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	return irq_soft_mask_return();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+	irq_soft_mask_set(IRQS_DISABLED);
+}
+
+extern void arch_local_irq_restore(unsigned long);
+
+static inline void arch_local_irq_enable(void)
+{
+	arch_local_irq_restore(IRQS_ENABLED);
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	return irq_soft_mask_set_return(IRQS_DISABLED);
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+	return flags & IRQS_DISABLED;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * To support disabling and enabling of irq with PMI, set of
+ * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
+ * functions are added. These macros are implemented using generic
+ * linux local_irq_* code from include/linux/irqflags.h.
+ */
+#define raw_local_irq_pmu_save(flags)					\
+	do {								\
+		typecheck(unsigned long, flags);			\
+		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
+				IRQS_PMI_DISABLED);			\
+	} while(0)
+
+#define raw_local_irq_pmu_restore(flags)				\
+	do {								\
+		typecheck(unsigned long, flags);			\
+		arch_local_irq_restore(flags);				\
+	} while(0)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#define powerpc_local_irq_pmu_save(flags)			\
+	 do {							\
+		raw_local_irq_pmu_save(flags);			\
+		trace_hardirqs_off();				\
+	} while(0)
+#define powerpc_local_irq_pmu_restore(flags)			\
+	do {							\
+		if (raw_irqs_disabled_flags(flags)) {		\
+			raw_local_irq_pmu_restore(flags);	\
+			trace_hardirqs_off();			\
+		} else {					\
+			trace_hardirqs_on();			\
+			raw_local_irq_pmu_restore(flags);	\
+		}						\
+	} while(0)
+#else
+#define powerpc_local_irq_pmu_save(flags)			\
+	do {							\
+		raw_local_irq_pmu_save(flags);			\
+	} while(0)
+#define powerpc_local_irq_pmu_restore(flags)			\
+	do {							\
+		raw_local_irq_pmu_restore(flags);		\
+	} while (0)
+#endif  /* CONFIG_TRACE_IRQFLAGS */
+
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_BOOK3E
+#define __hard_irq_enable()	asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable()	asm volatile("wrteei 0" : : : "memory")
+#else
+#define __hard_irq_enable()	__mtmsrd(MSR_EE|MSR_RI, 1)
+#define __hard_irq_disable()	__mtmsrd(MSR_RI, 1)
+#endif
+
+#define hard_irq_disable()	do {					\
+	unsigned long flags;						\
+	__hard_irq_disable();						\
+	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
+	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
+	if (!arch_irqs_disabled_flags(flags)) {				\
+		asm ("stdx %%r1, 0, %1 ;"				\
+		     : "=m" (local_paca->saved_r1)			\
+		     : "b" (&local_paca->saved_r1));			\
+		trace_hardirqs_off();					\
+	}								\
+} while(0)
+
+static inline bool lazy_irq_pending(void)
+{
+	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
+/*
+ * This is called by asynchronous interrupts to conditionally
+ * re-enable hard interrupts after having cleared the source
+ * of the interrupt. They are kept disabled if there is a different
+ * soft-masked interrupt pending that requires hard masking.
+ */
+static inline void may_hard_irq_enable(void)
+{
+	if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
+		get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+		__hard_irq_enable();
+	}
+}
+
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+	return (regs->softe & IRQS_DISABLED);
+}
+
+extern bool prep_irq_for_idle(void);
+extern bool prep_irq_for_idle_irqsoff(void);
+extern void irq_set_pending_from_srr1(unsigned long srr1);
+
+#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
+
+extern void force_external_irq_replay(void);
+
+#else /* CONFIG_PPC64 */
+
+#define SET_MSR_EE(x)	mtmsr(x)
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	return mfmsr();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+#if defined(CONFIG_BOOKE)
+	asm volatile("wrtee %0" : : "r" (flags) : "memory");
+#else
+	mtmsr(flags);
+#endif
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags = arch_local_save_flags();
+#ifdef CONFIG_BOOKE
+	asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+	wrtspr(SPRN_EID);
+#else
+	SET_MSR_EE(flags & ~MSR_EE);
+#endif
+	return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+#ifdef CONFIG_BOOKE
+	asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+	wrtspr(SPRN_EID);
+#else
+	arch_local_irq_save();
+#endif
+}
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_BOOKE
+	asm volatile("wrteei 1" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+	wrtspr(SPRN_EIE);
+#else
+	unsigned long msr = mfmsr();
+	SET_MSR_EE(msr | MSR_EE);
+#endif
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+	return (flags & MSR_EE) == 0;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#define hard_irq_disable()		arch_local_irq_disable()
+
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+	return !(regs->msr & MSR_EE);
+}
+
+static inline void may_hard_irq_enable(void) { }
+
+#endif /* CONFIG_PPC64 */
+
+#define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
+
+/*
+ * interrupt-retrigger: should we handle this via lost interrupts and IPIs
+ * or should we not care like we do now ? --BenH.
+ */
+struct irq_chip;
+
+#endif  /* __ASSEMBLY__ */
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
new file mode 100644
index 0000000..b3b0f2d
--- /dev/null
+++ b/arch/powerpc/include/asm/hydra.h
@@ -0,0 +1,101 @@
+/*
+ *  include/asm-ppc/hydra.h -- Mac I/O `Hydra' definitions
+ *
+ *  Copyright (C) 1997 Geert Uytterhoeven
+ *
+ *  This file is based on the following documentation:
+ *
+ *	Macintosh Technology in the Common Hardware Reference Platform
+ *	Apple Computer, Inc.
+ *
+ *	© Copyright 1995 Apple Computer, Inc. All rights reserved.
+ *
+ *  It's available online from http://www.cpu.lu/~mlan/ftp/MacTech.pdf
+ *  You can obtain paper copies of this book from computer bookstores or by
+ *  writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San
+ *  Francisco, CA 94104. Reference ISBN 1-55860-393-X.
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License.  See the file COPYING in the main directory of this archive
+ *  for more details.
+ */
+
+#ifndef _ASMPPC_HYDRA_H
+#define _ASMPPC_HYDRA_H
+
+#ifdef __KERNEL__
+
+struct Hydra {
+    /* DBDMA Controller Register Space */
+    char Pad1[0x30];
+    u_int CachePD;
+    u_int IDs;
+    u_int Feature_Control;
+    char Pad2[0x7fc4];
+    /* DBDMA Channel Register Space */
+    char SCSI_DMA[0x100];
+    char Pad3[0x300];
+    char SCCA_Tx_DMA[0x100];
+    char SCCA_Rx_DMA[0x100];
+    char SCCB_Tx_DMA[0x100];
+    char SCCB_Rx_DMA[0x100];
+    char Pad4[0x7800];
+    /* Device Register Space */
+    char SCSI[0x1000];
+    char ADB[0x1000];
+    char SCC_Legacy[0x1000];
+    char SCC[0x1000];
+    char Pad9[0x2000];
+    char VIA[0x2000];
+    char Pad10[0x28000];
+    char OpenPIC[0x40000];
+};
+
+extern volatile struct Hydra __iomem *Hydra;
+
+
+    /*
+     *  Feature Control Register
+     */
+
+#define HYDRA_FC_SCC_CELL_EN	0x00000001	/* Enable SCC Clock */
+#define HYDRA_FC_SCSI_CELL_EN	0x00000002	/* Enable SCSI Clock */
+#define HYDRA_FC_SCCA_ENABLE	0x00000004	/* Enable SCC A Lines */
+#define HYDRA_FC_SCCB_ENABLE	0x00000008	/* Enable SCC B Lines */
+#define HYDRA_FC_ARB_BYPASS	0x00000010	/* Bypass Internal Arbiter */
+#define HYDRA_FC_RESET_SCC	0x00000020	/* Reset SCC */
+#define HYDRA_FC_MPIC_ENABLE	0x00000040	/* Enable OpenPIC */
+#define HYDRA_FC_SLOW_SCC_PCLK	0x00000080	/* 1=15.6672, 0=25 MHz */
+#define HYDRA_FC_MPIC_IS_MASTER	0x00000100	/* OpenPIC Master Mode */
+
+
+    /*
+     *  OpenPIC Interrupt Sources
+     */
+
+#define HYDRA_INT_SIO		0
+#define HYDRA_INT_SCSI_DMA	1
+#define HYDRA_INT_SCCA_TX_DMA	2
+#define HYDRA_INT_SCCA_RX_DMA	3
+#define HYDRA_INT_SCCB_TX_DMA	4
+#define HYDRA_INT_SCCB_RX_DMA	5
+#define HYDRA_INT_SCSI		6
+#define HYDRA_INT_SCCA		7
+#define HYDRA_INT_SCCB		8
+#define HYDRA_INT_VIA		9
+#define HYDRA_INT_ADB		10
+#define HYDRA_INT_ADB_NMI	11
+#define HYDRA_INT_EXT1		12	/* PCI IRQW */
+#define HYDRA_INT_EXT2		13	/* PCI IRQX */
+#define HYDRA_INT_EXT3		14	/* PCI IRQY */
+#define HYDRA_INT_EXT4		15	/* PCI IRQZ */
+#define HYDRA_INT_EXT5		16	/* IDE Primary/Secondary */
+#define HYDRA_INT_EXT6		17	/* IDE Secondary */
+#define HYDRA_INT_EXT7		18	/* Power Off Request */
+#define HYDRA_INT_SPARE		19
+
+extern int hydra_init(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASMPPC_HYDRA_H */
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
new file mode 100644
index 0000000..d7f08ae
--- /dev/null
+++ b/arch/powerpc/include/asm/i8259.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_I8259_H
+#define _ASM_POWERPC_I8259_H
+#ifdef __KERNEL__
+
+#include <linux/irq.h>
+
+extern void i8259_init(struct device_node *node, unsigned long intack_addr);
+extern unsigned int i8259_irq(void);
+extern struct irq_domain *i8259_get_host(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h
new file mode 100644
index 0000000..088f95b
--- /dev/null
+++ b/arch/powerpc/include/asm/ibmebus.h
@@ -0,0 +1,60 @@
+/*
+ * IBM PowerPC eBus Infrastructure Support.
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *  Joachim Fenkes <fenkes@de.ibm.com>
+ *  Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_EBUS_H
+#define _ASM_EBUS_H
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+extern struct bus_type ibmebus_bus_type;
+
+int ibmebus_register_driver(struct platform_driver *drv);
+void ibmebus_unregister_driver(struct platform_driver *drv);
+
+int ibmebus_request_irq(u32 ist, irq_handler_t handler,
+			unsigned long irq_flags, const char *devname,
+			void *dev_id);
+void ibmebus_free_irq(u32 ist, void *dev_id);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_IBMEBUS_H */
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
new file mode 100644
index 0000000..6a2c875
--- /dev/null
+++ b/arch/powerpc/include/asm/icswx.h
@@ -0,0 +1,188 @@
+/*
+ * ICSWX api
+ *
+ * Copyright (C) 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This provides the Initiate Coprocessor Store Word Indexed (ICSWX)
+ * instruction.  This instruction is used to communicate with PowerPC
+ * coprocessors.  This also provides definitions of the structures used
+ * to communicate with the coprocessor.
+ *
+ * The RFC02130: Coprocessor Architecture document is the reference for
+ * everything in this file unless otherwise noted.
+ */
+#ifndef _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+#define _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+
+#include <asm/ppc-opcode.h> /* for PPC_ICSWX */
+
+/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */
+
+#define CCB_VALUE		(0x3fffffffffffffff)
+#define CCB_ADDRESS		(0xfffffffffffffff8)
+#define CCB_CM			(0x0000000000000007)
+#define CCB_CM0			(0x0000000000000004)
+#define CCB_CM12		(0x0000000000000003)
+
+#define CCB_CM0_ALL_COMPLETIONS	(0x0)
+#define CCB_CM0_LAST_IN_CHAIN	(0x4)
+#define CCB_CM12_STORE		(0x0)
+#define CCB_CM12_INTERRUPT	(0x1)
+
+#define CCB_SIZE		(0x10)
+#define CCB_ALIGN		CCB_SIZE
+
+struct coprocessor_completion_block {
+	__be64 value;
+	__be64 address;
+} __packed __aligned(CCB_ALIGN);
+
+
+/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */
+
+#define CSB_V			(0x80)
+#define CSB_F			(0x04)
+#define CSB_CH			(0x03)
+#define CSB_CE_INCOMPLETE	(0x80)
+#define CSB_CE_TERMINATION	(0x40)
+#define CSB_CE_TPBC		(0x20)
+
+#define CSB_CC_SUCCESS		(0)
+#define CSB_CC_INVALID_ALIGN	(1)
+#define CSB_CC_OPERAND_OVERLAP	(2)
+#define CSB_CC_DATA_LENGTH	(3)
+#define CSB_CC_TRANSLATION	(5)
+#define CSB_CC_PROTECTION	(6)
+#define CSB_CC_RD_EXTERNAL	(7)
+#define CSB_CC_INVALID_OPERAND	(8)
+#define CSB_CC_PRIVILEGE	(9)
+#define CSB_CC_INTERNAL		(10)
+#define CSB_CC_WR_EXTERNAL	(12)
+#define CSB_CC_NOSPC		(13)
+#define CSB_CC_EXCESSIVE_DDE	(14)
+#define CSB_CC_WR_TRANSLATION	(15)
+#define CSB_CC_WR_PROTECTION	(16)
+#define CSB_CC_UNKNOWN_CODE	(17)
+#define CSB_CC_ABORT		(18)
+#define CSB_CC_EXCEED_BYTE_COUNT	(19)	/* P9 or later */
+#define CSB_CC_TRANSPORT	(20)
+#define CSB_CC_INVALID_CRB	(21)	/* P9 or later */
+#define CSB_CC_INVALID_DDE	(30)	/* P9 or later */
+#define CSB_CC_SEGMENTED_DDL	(31)
+#define CSB_CC_PROGRESS_POINT	(32)
+#define CSB_CC_DDE_OVERFLOW	(33)
+#define CSB_CC_SESSION		(34)
+#define CSB_CC_PROVISION	(36)
+#define CSB_CC_CHAIN		(37)
+#define CSB_CC_SEQUENCE		(38)
+#define CSB_CC_HW		(39)
+
+#define CSB_SIZE		(0x10)
+#define CSB_ALIGN		CSB_SIZE
+
+struct coprocessor_status_block {
+	u8 flags;
+	u8 cs;
+	u8 cc;
+	u8 ce;
+	__be32 count;
+	__be64 address;
+} __packed __aligned(CSB_ALIGN);
+
+
+/* Chapter 6.5.10 Data-Descriptor List (DDL)
+ * each list contains one or more Data-Descriptor Entries (DDE)
+ */
+
+#define DDE_P			(0x8000)
+
+#define DDE_SIZE		(0x10)
+#define DDE_ALIGN		DDE_SIZE
+
+struct data_descriptor_entry {
+	__be16 flags;
+	u8 count;
+	u8 index;
+	__be32 length;
+	__be64 address;
+} __packed __aligned(DDE_ALIGN);
+
+
+/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
+
+#define CRB_SIZE		(0x80)
+#define CRB_ALIGN		(0x100) /* Errata: requires 256 alignment */
+
+/* Coprocessor Status Block field
+ *   ADDRESS	address of CSB
+ *   C		CCB is valid
+ *   AT		0 = addrs are virtual, 1 = addrs are phys
+ *   M		enable perf monitor
+ */
+#define CRB_CSB_ADDRESS		(0xfffffffffffffff0)
+#define CRB_CSB_C		(0x0000000000000008)
+#define CRB_CSB_AT		(0x0000000000000002)
+#define CRB_CSB_M		(0x0000000000000001)
+
+struct coprocessor_request_block {
+	__be32 ccw;
+	__be32 flags;
+	__be64 csb_addr;
+
+	struct data_descriptor_entry source;
+	struct data_descriptor_entry target;
+
+	struct coprocessor_completion_block ccb;
+
+	u8 reserved[48];
+
+	struct coprocessor_status_block csb;
+} __packed __aligned(CRB_ALIGN);
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1.1.1 RS
+ * Chapter 8.2.3 Coprocessor Directive
+ * Chapter 8.2.4 Execution
+ *
+ * The CCW must be converted to BE before passing to icswx()
+ */
+
+#define CCW_PS			(0xff000000)
+#define CCW_CT			(0x00ff0000)
+#define CCW_CD			(0x0000ffff)
+#define CCW_CL			(0x0000c000)
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1 Initiate Coprocessor Store Word Indexed (ICSWX)
+ * Chapter 8.2.4.1 Condition Register 0
+ */
+
+#define ICSWX_INITIATED		(0x8)
+#define ICSWX_BUSY		(0x4)
+#define ICSWX_REJECTED		(0x2)
+#define ICSWX_XERS0		(0x1)	/* undefined or set from XERSO. */
+
+static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
+{
+	__be64 ccw_reg = ccw;
+	u32 cr;
+
+	__asm__ __volatile__(
+	PPC_ICSWX(%1,0,%2) "\n"
+	"mfcr %0\n"
+	: "=r" (cr)
+	: "r" (ccw_reg), "r" (crb)
+	: "cr0", "memory");
+
+	return (int)((cr >> 28) & 0xf);
+}
+
+
+#endif /* _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ */
diff --git a/arch/powerpc/include/asm/ide.h b/arch/powerpc/include/asm/ide.h
new file mode 100644
index 0000000..ce87a44
--- /dev/null
+++ b/arch/powerpc/include/asm/ide.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ *  This file contains the powerpc architecture specific IDE code.
+ */
+#ifndef _ASM_POWERPC_IDE_H
+#define _ASM_POWERPC_IDE_H
+
+#include <linux/compiler.h>
+#include <asm/io.h>
+
+#define __ide_mm_insw(p, a, c)	readsw((void __iomem *)(p), (a), (c))
+#define __ide_mm_insl(p, a, c)	readsl((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsw(p, a, c)	writesw((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsl(p, a, c)	writesl((void __iomem *)(p), (a), (c))
+
+#endif /* _ASM_POWERPC_IDE_H */
diff --git a/arch/powerpc/include/asm/ima.h b/arch/powerpc/include/asm/ima.h
new file mode 100644
index 0000000..ead488c
--- /dev/null
+++ b/arch/powerpc/include/asm/ima.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_IMA_H
+#define _ASM_POWERPC_IMA_H
+
+struct kimage;
+
+int ima_get_kexec_buffer(void **addr, size_t *size);
+int ima_free_kexec_buffer(void);
+
+#ifdef CONFIG_IMA
+void remove_ima_buffer(void *fdt, int chosen_node);
+#else
+static inline void remove_ima_buffer(void *fdt, int chosen_node) {}
+#endif
+
+#ifdef CONFIG_IMA_KEXEC
+int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
+			      size_t size);
+
+int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node);
+#else
+static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
+				   int chosen_node)
+{
+	remove_ima_buffer(fdt, chosen_node);
+	return 0;
+}
+#endif /* CONFIG_IMA_KEXEC */
+
+#endif /* _ASM_POWERPC_IMA_H */
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
new file mode 100644
index 0000000..69f516e
--- /dev/null
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -0,0 +1,132 @@
+#ifndef __ASM_POWERPC_IMC_PMU_H
+#define __ASM_POWERPC_IMC_PMU_H
+
+/*
+ * IMC Nest Performance Monitor counter support.
+ *
+ * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
+ *           (C) 2017 Anju T Sudhakar, IBM Corporation.
+ *           (C) 2017 Hemant K Shaw, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or later version.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <asm/opal.h>
+
+/*
+ * Compatibility macros for IMC devices
+ */
+#define IMC_DTB_COMPAT			"ibm,opal-in-memory-counters"
+#define IMC_DTB_UNIT_COMPAT		"ibm,imc-counters"
+
+
+/*
+ * LDBAR: Counter address and Enable/Disable macro.
+ * perf/imc-pmu.c has the LDBAR layout information.
+ */
+#define THREAD_IMC_LDBAR_MASK           0x0003ffffffffe000ULL
+#define THREAD_IMC_ENABLE               0x8000000000000000ULL
+
+/*
+ * For debugfs interface for imc-mode and imc-command
+ */
+#define IMC_CNTL_BLK_OFFSET		0x3FC00
+#define IMC_CNTL_BLK_CMD_OFFSET		8
+#define IMC_CNTL_BLK_MODE_OFFSET	32
+
+/*
+ * Structure to hold memory address information for imc units.
+ */
+struct imc_mem_info {
+	u64 *vbase;
+	u32 id;
+};
+
+/*
+ * Place holder for nest pmu events and values.
+ */
+struct imc_events {
+	u32 value;
+	char *name;
+	char *unit;
+	char *scale;
+};
+
+/* Event attribute array index */
+#define IMC_FORMAT_ATTR		0
+#define IMC_EVENT_ATTR		1
+#define IMC_CPUMASK_ATTR	2
+#define IMC_NULL_ATTR		3
+
+/* PMU Format attribute macros */
+#define IMC_EVENT_OFFSET_MASK	0xffffffffULL
+
+/*
+ * Device tree parser code detects IMC pmu support and
+ * registers new IMC pmus. This structure will hold the
+ * pmu functions, events, counter memory information
+ * and attrs for each imc pmu and will be referenced at
+ * the time of pmu registration.
+ */
+struct imc_pmu {
+	struct pmu pmu;
+	struct imc_mem_info *mem_info;
+	struct imc_events *events;
+	/*
+	 * Attribute groups for the PMU. Slot 0 used for
+	 * format attribute, slot 1 used for cpusmask attribute,
+	 * slot 2 used for event attribute. Slot 3 keep as
+	 * NULL.
+	 */
+	const struct attribute_group *attr_groups[4];
+	u32 counter_mem_size;
+	int domain;
+	/*
+	 * flag to notify whether the memory is mmaped
+	 * or allocated by kernel.
+	 */
+	bool imc_counter_mmaped;
+};
+
+/*
+ * Structure to hold id, lock and reference count for the imc events which
+ * are inited.
+ */
+struct imc_pmu_ref {
+	struct mutex lock;
+	unsigned int id;
+	int refc;
+};
+
+/*
+ * In-Memory Collection Counters type.
+ * Data comes from Device tree.
+ * Three device type are supported.
+ */
+
+enum {
+	IMC_TYPE_THREAD		= 0x1,
+	IMC_TYPE_CORE		= 0x4,
+	IMC_TYPE_CHIP           = 0x10,
+};
+
+/*
+ * Domains for IMC PMUs
+ */
+#define IMC_DOMAIN_NEST		1
+#define IMC_DOMAIN_CORE		2
+#define IMC_DOMAIN_THREAD	3
+
+extern int init_imc_pmu(struct device_node *parent,
+				struct imc_pmu *pmu_ptr, int pmu_id);
+extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
+extern void unregister_thread_imc(void);
+#endif /* __ASM_POWERPC_IMC_PMU_H */
diff --git a/arch/powerpc/include/asm/immap_cpm2.h b/arch/powerpc/include/asm/immap_cpm2.h
new file mode 100644
index 0000000..845d5b3
--- /dev/null
+++ b/arch/powerpc/include/asm/immap_cpm2.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPM2 Internal Memory Map
+ * Copyright (c) 1999 Dan Malek (dmalek@jlc.net)
+ *
+ * The Internal Memory Map for devices with CPM2 on them.  This
+ * is the superset of all CPM2 devices (8260, 8266, 8280, 8272,
+ * 8560).
+ */
+#ifdef __KERNEL__
+#ifndef __IMMAP_CPM2__
+#define __IMMAP_CPM2__
+
+#include <linux/types.h>
+
+/* System configuration registers.
+*/
+typedef	struct sys_82xx_conf {
+	u32	sc_siumcr;
+	u32	sc_sypcr;
+	u8	res1[6];
+	u16	sc_swsr;
+	u8	res2[20];
+	u32	sc_bcr;
+	u8	sc_ppc_acr;
+	u8	res3[3];
+	u32	sc_ppc_alrh;
+	u32	sc_ppc_alrl;
+	u8	sc_lcl_acr;
+	u8	res4[3];
+	u32	sc_lcl_alrh;
+	u32	sc_lcl_alrl;
+	u32	sc_tescr1;
+	u32	sc_tescr2;
+	u32	sc_ltescr1;
+	u32	sc_ltescr2;
+	u32	sc_pdtea;
+	u8	sc_pdtem;
+	u8	res5[3];
+	u32	sc_ldtea;
+	u8	sc_ldtem;
+	u8	res6[163];
+} sysconf_82xx_cpm2_t;
+
+typedef	struct sys_85xx_conf {
+	u32	sc_cear;
+	u16	sc_ceer;
+	u16	sc_cemr;
+	u8	res1[70];
+	u32	sc_smaer;
+	u8	res2[4];
+	u32	sc_smevr;
+	u32	sc_smctr;
+	u32	sc_lmaer;
+	u8	res3[4];
+	u32	sc_lmevr;
+	u32	sc_lmctr;
+	u8	res4[144];
+} sysconf_85xx_cpm2_t;
+
+typedef union sys_conf {
+	sysconf_82xx_cpm2_t	siu_82xx;
+	sysconf_85xx_cpm2_t	siu_85xx;
+} sysconf_cpm2_t;
+
+
+
+/* Memory controller registers.
+*/
+typedef struct	mem_ctlr {
+	u32	memc_br0;
+	u32	memc_or0;
+	u32	memc_br1;
+	u32	memc_or1;
+	u32	memc_br2;
+	u32	memc_or2;
+	u32	memc_br3;
+	u32	memc_or3;
+	u32	memc_br4;
+	u32	memc_or4;
+	u32	memc_br5;
+	u32	memc_or5;
+	u32	memc_br6;
+	u32	memc_or6;
+	u32	memc_br7;
+	u32	memc_or7;
+	u32	memc_br8;
+	u32	memc_or8;
+	u32	memc_br9;
+	u32	memc_or9;
+	u32	memc_br10;
+	u32	memc_or10;
+	u32	memc_br11;
+	u32	memc_or11;
+	u8	res1[8];
+	u32	memc_mar;
+	u8	res2[4];
+	u32	memc_mamr;
+	u32	memc_mbmr;
+	u32	memc_mcmr;
+	u8	res3[8];
+	u16	memc_mptpr;
+	u8	res4[2];
+	u32	memc_mdr;
+	u8	res5[4];
+	u32	memc_psdmr;
+	u32	memc_lsdmr;
+	u8	memc_purt;
+	u8	res6[3];
+	u8	memc_psrt;
+	u8	res7[3];
+	u8	memc_lurt;
+	u8	res8[3];
+	u8	memc_lsrt;
+	u8	res9[3];
+	u32	memc_immr;
+	u32	memc_pcibr0;
+	u32	memc_pcibr1;
+	u8	res10[16];
+	u32	memc_pcimsk0;
+	u32	memc_pcimsk1;
+	u8	res11[52];
+} memctl_cpm2_t;
+
+/* System Integration Timers.
+*/
+typedef struct	sys_int_timers {
+	u8	res1[32];
+	u16	sit_tmcntsc;
+	u8	res2[2];
+	u32	sit_tmcnt;
+	u8	res3[4];
+	u32	sit_tmcntal;
+	u8	res4[16];
+	u16	sit_piscr;
+	u8	res5[2];
+	u32	sit_pitc;
+	u32	sit_pitr;
+	u8      res6[94];
+	u8	res7[390];
+} sit_cpm2_t;
+
+#define PISCR_PIRQ_MASK		((u16)0xff00)
+#define PISCR_PS		((u16)0x0080)
+#define PISCR_PIE		((u16)0x0004)
+#define PISCR_PTF		((u16)0x0002)
+#define PISCR_PTE		((u16)0x0001)
+
+/* PCI Controller.
+*/
+typedef struct pci_ctlr {
+	u32	pci_omisr;
+	u32	pci_omimr;
+	u8	res1[8];
+	u32	pci_ifqpr;
+	u32	pci_ofqpr;
+	u8	res2[8];
+	u32	pci_imr0;
+	u32	pci_imr1;
+	u32	pci_omr0;
+	u32	pci_omr1;
+	u32	pci_odr;
+	u8	res3[4];
+	u32	pci_idr;
+	u8	res4[20];
+	u32	pci_imisr;
+	u32	pci_imimr;
+	u8	res5[24];
+	u32	pci_ifhpr;
+	u8	res6[4];
+	u32	pci_iftpr;
+	u8	res7[4];
+	u32	pci_iphpr;
+	u8	res8[4];
+	u32	pci_iptpr;
+	u8	res9[4];
+	u32	pci_ofhpr;
+	u8	res10[4];
+	u32	pci_oftpr;
+	u8	res11[4];
+	u32	pci_ophpr;
+	u8	res12[4];
+	u32	pci_optpr;
+	u8	res13[8];
+	u32	pci_mucr;
+	u8	res14[8];
+	u32	pci_qbar;
+	u8	res15[12];
+	u32	pci_dmamr0;
+	u32	pci_dmasr0;
+	u32	pci_dmacdar0;
+	u8	res16[4];
+	u32	pci_dmasar0;
+	u8	res17[4];
+	u32	pci_dmadar0;
+	u8	res18[4];
+	u32	pci_dmabcr0;
+	u32	pci_dmandar0;
+	u8	res19[86];
+	u32	pci_dmamr1;
+	u32	pci_dmasr1;
+	u32	pci_dmacdar1;
+	u8	res20[4];
+	u32	pci_dmasar1;
+	u8	res21[4];
+	u32	pci_dmadar1;
+	u8	res22[4];
+	u32	pci_dmabcr1;
+	u32	pci_dmandar1;
+	u8	res23[88];
+	u32	pci_dmamr2;
+	u32	pci_dmasr2;
+	u32	pci_dmacdar2;
+	u8	res24[4];
+	u32	pci_dmasar2;
+	u8	res25[4];
+	u32	pci_dmadar2;
+	u8	res26[4];
+	u32	pci_dmabcr2;
+	u32	pci_dmandar2;
+	u8	res27[88];
+	u32	pci_dmamr3;
+	u32	pci_dmasr3;
+	u32	pci_dmacdar3;
+	u8	res28[4];
+	u32	pci_dmasar3;
+	u8	res29[4];
+	u32	pci_dmadar3;
+	u8	res30[4];
+	u32	pci_dmabcr3;
+	u32	pci_dmandar3;
+	u8	res31[344];
+	u32	pci_potar0;
+	u8	res32[4];
+	u32	pci_pobar0;
+	u8	res33[4];
+	u32	pci_pocmr0;
+	u8	res34[4];
+	u32	pci_potar1;
+	u8	res35[4];
+	u32	pci_pobar1;
+	u8	res36[4];
+	u32	pci_pocmr1;
+	u8	res37[4];
+	u32	pci_potar2;
+	u8	res38[4];
+	u32	pci_pobar2;
+	u8	res39[4];
+	u32	pci_pocmr2;
+	u8	res40[50];
+	u32	pci_ptcr;
+	u32	pci_gpcr;
+	u32	pci_gcr;
+	u32	pci_esr;
+	u32	pci_emr;
+	u32	pci_ecr;
+	u32	pci_eacr;
+	u8	res41[4];
+	u32	pci_edcr;
+	u8	res42[4];
+	u32	pci_eccr;
+	u8	res43[44];
+	u32	pci_pitar1;
+	u8	res44[4];
+	u32	pci_pibar1;
+	u8	res45[4];
+	u32	pci_picmr1;
+	u8	res46[4];
+	u32	pci_pitar0;
+	u8	res47[4];
+	u32	pci_pibar0;
+	u8	res48[4];
+	u32	pci_picmr0;
+	u8	res49[4];
+	u32	pci_cfg_addr;
+	u32	pci_cfg_data;
+	u32	pci_int_ack;
+	u8	res50[756];
+} pci_cpm2_t;
+
+/* Interrupt Controller.
+*/
+typedef struct interrupt_controller {
+	u16	ic_sicr;
+	u8	res1[2];
+	u32	ic_sivec;
+	u32	ic_sipnrh;
+	u32	ic_sipnrl;
+	u32	ic_siprr;
+	u32	ic_scprrh;
+	u32	ic_scprrl;
+	u32	ic_simrh;
+	u32	ic_simrl;
+	u32	ic_siexr;
+	u8	res2[88];
+} intctl_cpm2_t;
+
+/* Clocks and Reset.
+*/
+typedef struct clk_and_reset {
+	u32	car_sccr;
+	u8	res1[4];
+	u32	car_scmr;
+	u8	res2[4];
+	u32	car_rsr;
+	u32	car_rmr;
+	u8	res[104];
+} car_cpm2_t;
+
+/* Input/Output Port control/status registers.
+ * Names consistent with processor manual, although they are different
+ * from the original 8xx names.......
+ */
+typedef struct io_port {
+	u32	iop_pdira;
+	u32	iop_ppara;
+	u32	iop_psora;
+	u32	iop_podra;
+	u32	iop_pdata;
+	u8	res1[12];
+	u32	iop_pdirb;
+	u32	iop_pparb;
+	u32	iop_psorb;
+	u32	iop_podrb;
+	u32	iop_pdatb;
+	u8	res2[12];
+	u32	iop_pdirc;
+	u32	iop_pparc;
+	u32	iop_psorc;
+	u32	iop_podrc;
+	u32	iop_pdatc;
+	u8	res3[12];
+	u32	iop_pdird;
+	u32	iop_ppard;
+	u32	iop_psord;
+	u32	iop_podrd;
+	u32	iop_pdatd;
+	u8	res4[12];
+} iop_cpm2_t;
+
+/* Communication Processor Module Timers
+*/
+typedef struct cpm_timers {
+	u8	cpmt_tgcr1;
+	u8	res1[3];
+	u8	cpmt_tgcr2;
+	u8	res2[11];
+	u16	cpmt_tmr1;
+	u16	cpmt_tmr2;
+	u16	cpmt_trr1;
+	u16	cpmt_trr2;
+	u16	cpmt_tcr1;
+	u16	cpmt_tcr2;
+	u16	cpmt_tcn1;
+	u16	cpmt_tcn2;
+	u16	cpmt_tmr3;
+	u16	cpmt_tmr4;
+	u16	cpmt_trr3;
+	u16	cpmt_trr4;
+	u16	cpmt_tcr3;
+	u16	cpmt_tcr4;
+	u16	cpmt_tcn3;
+	u16	cpmt_tcn4;
+	u16	cpmt_ter1;
+	u16	cpmt_ter2;
+	u16	cpmt_ter3;
+	u16	cpmt_ter4;
+	u8	res3[584];
+} cpmtimer_cpm2_t;
+
+/* DMA control/status registers.
+*/
+typedef struct sdma_csr {
+	u8	res0[24];
+	u8	sdma_sdsr;
+	u8	res1[3];
+	u8	sdma_sdmr;
+	u8	res2[3];
+	u8	sdma_idsr1;
+	u8	res3[3];
+	u8	sdma_idmr1;
+	u8	res4[3];
+	u8	sdma_idsr2;
+	u8	res5[3];
+	u8	sdma_idmr2;
+	u8	res6[3];
+	u8	sdma_idsr3;
+	u8	res7[3];
+	u8	sdma_idmr3;
+	u8	res8[3];
+	u8	sdma_idsr4;
+	u8	res9[3];
+	u8	sdma_idmr4;
+	u8	res10[707];
+} sdma_cpm2_t;
+
+/* Fast controllers
+*/
+typedef struct fcc {
+	u32	fcc_gfmr;
+	u32	fcc_fpsmr;
+	u16	fcc_ftodr;
+	u8	res1[2];
+	u16	fcc_fdsr;
+	u8	res2[2];
+	u16	fcc_fcce;
+	u8	res3[2];
+	u16	fcc_fccm;
+	u8	res4[2];
+	u8	fcc_fccs;
+	u8	res5[3];
+	u8	fcc_ftirr_phy[4];
+} fcc_t;
+
+/* Fast controllers continued
+ */
+typedef struct fcc_c {
+	u32	fcc_firper;
+	u32	fcc_firer;
+	u32	fcc_firsr_hi;
+	u32	fcc_firsr_lo;
+	u8	fcc_gfemr;
+	u8	res1[15];
+} fcc_c_t;
+
+/* TC Layer
+ */
+typedef struct tclayer {
+	u16	tc_tcmode;
+	u16	tc_cdsmr;
+	u16	tc_tcer;
+	u16	tc_rcc;
+	u16	tc_tcmr;
+	u16	tc_fcc;
+	u16	tc_ccc;
+	u16	tc_icc;
+	u16	tc_tcc;
+	u16	tc_ecc;
+	u8	res1[12];
+} tclayer_t;
+
+
+/* I2C
+*/
+typedef struct i2c {
+	u8	i2c_i2mod;
+	u8	res1[3];
+	u8	i2c_i2add;
+	u8	res2[3];
+	u8	i2c_i2brg;
+	u8	res3[3];
+	u8	i2c_i2com;
+	u8	res4[3];
+	u8	i2c_i2cer;
+	u8	res5[3];
+	u8	i2c_i2cmr;
+	u8	res6[331];
+} i2c_cpm2_t;
+
+typedef struct scc {		/* Serial communication channels */
+	u32	scc_gsmrl;
+	u32	scc_gsmrh;
+	u16	scc_psmr;
+	u8	res1[2];
+	u16	scc_todr;
+	u16	scc_dsr;
+	u16	scc_scce;
+	u8	res2[2];
+	u16	scc_sccm;
+	u8	res3;
+	u8	scc_sccs;
+	u8	res4[8];
+} scc_t;
+
+typedef struct smc {		/* Serial management channels */
+	u8	res1[2];
+	u16	smc_smcmr;
+	u8	res2[2];
+	u8	smc_smce;
+	u8	res3[3];
+	u8	smc_smcm;
+	u8	res4[5];
+} smc_t;
+
+/* Serial Peripheral Interface.
+*/
+typedef struct spi_ctrl {
+	u16	spi_spmode;
+	u8	res1[4];
+	u8	spi_spie;
+	u8	res2[3];
+	u8	spi_spim;
+	u8	res3[2];
+	u8	spi_spcom;
+	u8	res4[82];
+} spictl_cpm2_t;
+
+/* CPM Mux.
+*/
+typedef struct cpmux {
+	u8	cmx_si1cr;
+	u8	res1;
+	u8	cmx_si2cr;
+	u8	res2;
+	u32	cmx_fcr;
+	u32	cmx_scr;
+	u8	cmx_smr;
+	u8	res3;
+	u16	cmx_uar;
+	u8	res4[16];
+} cpmux_t;
+
+/* SIRAM control
+*/
+typedef struct siram {
+	u16	si_amr;
+	u16	si_bmr;
+	u16	si_cmr;
+	u16	si_dmr;
+	u8	si_gmr;
+	u8	res1;
+	u8	si_cmdr;
+	u8	res2;
+	u8	si_str;
+	u8	res3;
+	u16	si_rsr;
+} siramctl_t;
+
+typedef struct mcc {
+	u16	mcc_mcce;
+	u8	res1[2];
+	u16	mcc_mccm;
+	u8	res2[2];
+	u8	mcc_mccf;
+	u8	res3[7];
+} mcc_t;
+
+typedef struct comm_proc {
+	u32	cp_cpcr;
+	u32	cp_rccr;
+	u8	res1[14];
+	u16	cp_rter;
+	u8	res2[2];
+	u16	cp_rtmr;
+	u16	cp_rtscr;
+	u8	res3[2];
+	u32	cp_rtsr;
+	u8	res4[12];
+} cpm_cpm2_t;
+
+/* USB Controller.
+*/
+typedef struct cpm_usb_ctlr {
+	u8	usb_usmod;
+	u8	usb_usadr;
+	u8	usb_uscom;
+	u8	res1[1];
+	__be16  usb_usep[4];
+	u8	res2[4];
+	__be16  usb_usber;
+	u8	res3[2];
+	__be16  usb_usbmr;
+	u8	usb_usbs;
+	u8	res4[7];
+} usb_cpm2_t;
+
+/* ...and the whole thing wrapped up....
+*/
+
+typedef struct immap {
+	/* Some references are into the unique and known dpram spaces,
+	 * others are from the generic base.
+	 */
+#define im_dprambase	im_dpram1
+	u8		im_dpram1[16*1024];
+	u8		res1[16*1024];
+	u8		im_dpram2[4*1024];
+	u8		res2[8*1024];
+	u8		im_dpram3[4*1024];
+	u8		res3[16*1024];
+
+	sysconf_cpm2_t	im_siu_conf;	/* SIU Configuration */
+	memctl_cpm2_t	im_memctl;	/* Memory Controller */
+	sit_cpm2_t	im_sit;		/* System Integration Timers */
+	pci_cpm2_t	im_pci;		/* PCI Controller */
+	intctl_cpm2_t	im_intctl;	/* Interrupt Controller */
+	car_cpm2_t	im_clkrst;	/* Clocks and reset */
+	iop_cpm2_t	im_ioport;	/* IO Port control/status */
+	cpmtimer_cpm2_t	im_cpmtimer;	/* CPM timers */
+	sdma_cpm2_t	im_sdma;	/* SDMA control/status */
+
+	fcc_t		im_fcc[3];	/* Three FCCs */
+	u8		res4z[32];
+	fcc_c_t		im_fcc_c[3];	/* Continued FCCs */
+
+	u8		res4[32];
+
+	tclayer_t	im_tclayer[8];	/* Eight TCLayers */
+	u16		tc_tcgsr;
+	u16		tc_tcger;
+
+	/* First set of baud rate generators.
+	*/
+	u8		res[236];
+	u32		im_brgc5;
+	u32		im_brgc6;
+	u32		im_brgc7;
+	u32		im_brgc8;
+
+	u8		res5[608];
+
+	i2c_cpm2_t	im_i2c;		/* I2C control/status */
+	cpm_cpm2_t	im_cpm;		/* Communication processor */
+
+	/* Second set of baud rate generators.
+	*/
+	u32		im_brgc1;
+	u32		im_brgc2;
+	u32		im_brgc3;
+	u32		im_brgc4;
+
+	scc_t		im_scc[4];	/* Four SCCs */
+	smc_t		im_smc[2];	/* Couple of SMCs */
+	spictl_cpm2_t	im_spi;		/* A SPI */
+	cpmux_t		im_cpmux;	/* CPM clock route mux */
+	siramctl_t	im_siramctl1;	/* First SI RAM Control */
+	mcc_t		im_mcc1;	/* First MCC */
+	siramctl_t	im_siramctl2;	/* Second SI RAM Control */
+	mcc_t		im_mcc2;	/* Second MCC */
+	usb_cpm2_t	im_usb;		/* USB Controller */
+
+	u8		res6[1153];
+
+	u16		im_si1txram[256];
+	u8		res7[512];
+	u16		im_si1rxram[256];
+	u8		res8[512];
+	u16		im_si2txram[256];
+	u8		res9[512];
+	u16		im_si2rxram[256];
+	u8		res10[512];
+	u8		res11[4096];
+} cpm2_map_t;
+
+extern cpm2_map_t __iomem *cpm2_immr;
+
+#endif /* __IMMAP_CPM2__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/io-defs.h b/arch/powerpc/include/asm/io-defs.h
new file mode 100644
index 0000000..faf8617
--- /dev/null
+++ b/arch/powerpc/include/asm/io-defs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* This file is meant to be include multiple times by other headers */
+/* last 2 argments are used by platforms/cell/io-workarounds.[ch] */
+
+DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+
+#ifdef __powerpc64__
+DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+#endif /* __powerpc64__ */
+
+DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port), pio, port)
+DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port), pio, port)
+DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port), pio, port)
+
+DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c),
+		 (a, b, c), mem, a)
+DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c),
+		 (a, b, c), mem, a)
+DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c),
+		 (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c),
+		 (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c),
+		 (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c),
+		 (a, b, c), mem, a)
+
+DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c),
+		 (p, b, c), pio, p)
+DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c),
+		 (p, b, c), pio, p)
+DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c),
+		 (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c),
+		 (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c),
+		 (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c),
+		 (p, b, c), pio, p)
+
+DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n),
+		 (a, c, n), mem, a)
+DEF_PCI_AC_NORET(memcpy_fromio, (void *d, const PCI_IO_ADDR s, unsigned long n),
+		 (d, s, n), mem, s)
+DEF_PCI_AC_NORET(memcpy_toio, (PCI_IO_ADDR d, const void *s, unsigned long n),
+		 (d, s, n), mem, d)
diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h
new file mode 100644
index 0000000..f96dd09
--- /dev/null
+++ b/arch/powerpc/include/asm/io-workarounds.h
@@ -0,0 +1,48 @@
+/*
+ * Support PCI IO workaround
+ *
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _IO_WORKAROUNDS_H
+#define _IO_WORKAROUNDS_H
+
+#include <linux/io.h>
+#include <asm/pci-bridge.h>
+
+/* Bus info */
+struct iowa_bus {
+	struct pci_controller *phb;
+	struct ppc_pci_io *ops;
+	void   *private;
+};
+
+void iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
+		       int (*)(struct iowa_bus *, void *), void *);
+struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
+struct iowa_bus *iowa_pio_find_bus(unsigned long);
+
+extern struct ppc_pci_io spiderpci_ops;
+extern int spiderpci_iowa_init(struct iowa_bus *, void *);
+
+#define SPIDER_PCI_REG_BASE		0xd000
+#define SPIDER_PCI_REG_SIZE		0x1000
+#define SPIDER_PCI_VCI_CNTL_STAT	0x0110
+#define SPIDER_PCI_DUMMY_READ		0x0810
+#define SPIDER_PCI_DUMMY_READ_BASE	0x0814
+
+#endif /* _IO_WORKAROUNDS_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
new file mode 100644
index 0000000..b855f56
--- /dev/null
+++ b/arch/powerpc/include/asm/io.h
@@ -0,0 +1,904 @@
+#ifndef _ASM_POWERPC_IO_H
+#define _ASM_POWERPC_IO_H
+#ifdef __KERNEL__
+
+#define ARCH_HAS_IOREMAP_WC
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Check of existence of legacy devices */
+extern int check_legacy_ioport(unsigned long base_port);
+#define I8042_DATA_REG	0x60
+#define FDC_BASE	0x3f0
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
+extern struct pci_dev *isa_bridge_pcidev;
+/*
+ * has legacy ISA devices ?
+ */
+#define arch_has_dev_port()	(isa_bridge_pcidev != NULL || isa_io_special)
+#endif
+
+#include <linux/device.h>
+#include <linux/compiler.h>
+#include <asm/page.h>
+#include <asm/byteorder.h>
+#include <asm/synch.h>
+#include <asm/delay.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+
+#define SIO_CONFIG_RA	0x398
+#define SIO_CONFIG_RD	0x399
+
+#define SLOW_DOWN_IO
+
+/* 32 bits uses slightly different variables for the various IO
+ * bases. Most of this file only uses _IO_BASE though which we
+ * define properly based on the platform
+ */
+#ifndef CONFIG_PCI
+#define _IO_BASE	0
+#define _ISA_MEM_BASE	0
+#define PCI_DRAM_OFFSET 0
+#elif defined(CONFIG_PPC32)
+#define _IO_BASE	isa_io_base
+#define _ISA_MEM_BASE	isa_mem_base
+#define PCI_DRAM_OFFSET	pci_dram_offset
+#else
+#define _IO_BASE	pci_io_base
+#define _ISA_MEM_BASE	isa_mem_base
+#define PCI_DRAM_OFFSET	0
+#endif
+
+extern unsigned long isa_io_base;
+extern unsigned long pci_io_base;
+extern unsigned long pci_dram_offset;
+
+extern resource_size_t isa_mem_base;
+
+/* Boolean set by platform if PIO accesses are suppored while _IO_BASE
+ * is not set or addresses cannot be translated to MMIO. This is typically
+ * set when the platform supports "special" PIO accesses via a non memory
+ * mapped mechanism, and allows things like the early udbg UART code to
+ * function.
+ */
+extern bool isa_io_special;
+
+#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
+#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits
+#endif
+#endif
+
+/*
+ *
+ * Low level MMIO accessors
+ *
+ * This provides the non-bus specific accessors to MMIO. Those are PowerPC
+ * specific and thus shouldn't be used in generic code. The accessors
+ * provided here are:
+ *
+ *	in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64
+ *	out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64
+ *	_insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns
+ *
+ * Those operate directly on a kernel virtual address. Note that the prototype
+ * for the out_* accessors has the arguments in opposite order from the usual
+ * linux PCI accessors. Unlike those, they take the address first and the value
+ * next.
+ *
+ * Note: I might drop the _ns suffix on the stream operations soon as it is
+ * simply normal for stream operations to not swap in the first place.
+ *
+ */
+
+#ifdef CONFIG_PPC64
+#define IO_SET_SYNC_FLAG()	do { local_paca->io_sync = 1; } while(0)
+#else
+#define IO_SET_SYNC_FLAG()
+#endif
+
+/* gcc 4.0 and older doesn't have 'Z' constraint */
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
+#define DEF_MMIO_IN_X(name, size, insn)				\
+static inline u##size name(const volatile u##size __iomem *addr)	\
+{									\
+	u##size ret;							\
+	__asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync"	\
+		: "=r" (ret) : "r" (addr), "m" (*addr) : "memory");	\
+	return ret;							\
+}
+
+#define DEF_MMIO_OUT_X(name, size, insn)				\
+static inline void name(volatile u##size __iomem *addr, u##size val)	\
+{									\
+	__asm__ __volatile__("sync;"#insn" %1,0,%2"			\
+		: "=m" (*addr) : "r" (val), "r" (addr) : "memory");	\
+	IO_SET_SYNC_FLAG();						\
+}
+#else /* newer gcc */
+#define DEF_MMIO_IN_X(name, size, insn)				\
+static inline u##size name(const volatile u##size __iomem *addr)	\
+{									\
+	u##size ret;							\
+	__asm__ __volatile__("sync;"#insn" %0,%y1;twi 0,%0,0;isync"	\
+		: "=r" (ret) : "Z" (*addr) : "memory");			\
+	return ret;							\
+}
+
+#define DEF_MMIO_OUT_X(name, size, insn)				\
+static inline void name(volatile u##size __iomem *addr, u##size val)	\
+{									\
+	__asm__ __volatile__("sync;"#insn" %1,%y0"			\
+		: "=Z" (*addr) : "r" (val) : "memory");			\
+	IO_SET_SYNC_FLAG();						\
+}
+#endif
+
+#define DEF_MMIO_IN_D(name, size, insn)				\
+static inline u##size name(const volatile u##size __iomem *addr)	\
+{									\
+	u##size ret;							\
+	__asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\
+		: "=r" (ret) : "m" (*addr) : "memory");			\
+	return ret;							\
+}
+
+#define DEF_MMIO_OUT_D(name, size, insn)				\
+static inline void name(volatile u##size __iomem *addr, u##size val)	\
+{									\
+	__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0"			\
+		: "=m" (*addr) : "r" (val) : "memory");			\
+	IO_SET_SYNC_FLAG();						\
+}
+
+DEF_MMIO_IN_D(in_8,     8, lbz);
+DEF_MMIO_OUT_D(out_8,   8, stb);
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_IN_D(in_be16, 16, lhz);
+DEF_MMIO_IN_D(in_be32, 32, lwz);
+DEF_MMIO_IN_X(in_le16, 16, lhbrx);
+DEF_MMIO_IN_X(in_le32, 32, lwbrx);
+
+DEF_MMIO_OUT_D(out_be16, 16, sth);
+DEF_MMIO_OUT_D(out_be32, 32, stw);
+DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
+#else
+DEF_MMIO_IN_X(in_be16, 16, lhbrx);
+DEF_MMIO_IN_X(in_be32, 32, lwbrx);
+DEF_MMIO_IN_D(in_le16, 16, lhz);
+DEF_MMIO_IN_D(in_le32, 32, lwz);
+
+DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_le16, 16, sth);
+DEF_MMIO_OUT_D(out_le32, 32, stw);
+
+#endif /* __BIG_ENDIAN */
+
+#ifdef __powerpc64__
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_OUT_D(out_be64, 64, std);
+DEF_MMIO_IN_D(in_be64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_le64(const volatile u64 __iomem *addr)
+{
+	return swab64(in_be64(addr));
+}
+
+static inline void out_le64(volatile u64 __iomem *addr, u64 val)
+{
+	out_be64(addr, swab64(val));
+}
+#else
+DEF_MMIO_OUT_D(out_le64, 64, std);
+DEF_MMIO_IN_D(in_le64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_be64(const volatile u64 __iomem *addr)
+{
+	return swab64(in_le64(addr));
+}
+
+static inline void out_be64(volatile u64 __iomem *addr, u64 val)
+{
+	out_le64(addr, swab64(val));
+}
+
+#endif
+#endif /* __powerpc64__ */
+
+/*
+ * Low level IO stream instructions are defined out of line for now
+ */
+extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
+extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
+extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
+
+/* The _ns naming is historical and will be removed. For now, just #define
+ * the non _ns equivalent names
+ */
+#define _insw	_insw_ns
+#define _insl	_insl_ns
+#define _outsw	_outsw_ns
+#define _outsl	_outsl_ns
+
+
+/*
+ * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line
+ */
+
+extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n);
+extern void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+			   unsigned long n);
+extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
+			 unsigned long n);
+
+/*
+ *
+ * PCI and standard ISA accessors
+ *
+ * Those are globally defined linux accessors for devices on PCI or ISA
+ * busses. They follow the Linux defined semantics. The current implementation
+ * for PowerPC is as close as possible to the x86 version of these, and thus
+ * provides fairly heavy weight barriers for the non-raw versions
+ *
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO
+ * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its
+ * own implementation of some or all of the accessors.
+ */
+
+/*
+ * Include the EEH definitions when EEH is enabled only so they don't get
+ * in the way when building for 32 bits
+ */
+#ifdef CONFIG_EEH
+#include <asm/eeh.h>
+#endif
+
+/* Shortcut to the MMIO argument pointer */
+#define PCI_IO_ADDR	volatile void __iomem *
+
+/* Indirect IO address tokens:
+ *
+ * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
+ * on all MMIOs. (Note that this is all 64 bits only for now)
+ *
+ * To help platforms who may need to differentiate MMIO addresses in
+ * their hooks, a bitfield is reserved for use by the platform near the
+ * top of MMIO addresses (not PIO, those have to cope the hard way).
+ *
+ * The highest address in the kernel virtual space are:
+ *
+ *  d0003fffffffffff	# with Hash MMU
+ *  c00fffffffffffff	# with Radix MMU
+ *
+ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
+ * that can be used for the field.
+ *
+ * The direct IO mapping operations will then mask off those bits
+ * before doing the actual access, though that only happen when
+ * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that
+ * mechanism
+ *
+ * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes
+ * all PIO functions call through a hook.
+ */
+
+#ifdef CONFIG_PPC_INDIRECT_MMIO
+#define PCI_IO_IND_TOKEN_SHIFT	52
+#define PCI_IO_IND_TOKEN_MASK	(0xfful << PCI_IO_IND_TOKEN_SHIFT)
+#define PCI_FIX_ADDR(addr)						\
+	((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
+#define PCI_GET_ADDR_TOKEN(addr)					\
+	(((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> 		\
+		PCI_IO_IND_TOKEN_SHIFT)
+#define PCI_SET_ADDR_TOKEN(addr, token) 				\
+do {									\
+	unsigned long __a = (unsigned long)(addr);			\
+	__a &= ~PCI_IO_IND_TOKEN_MASK;					\
+	__a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT;	\
+	(addr) = (void __iomem *)__a;					\
+} while(0)
+#else
+#define PCI_FIX_ADDR(addr) (addr)
+#endif
+
+
+/*
+ * Non ordered and non-swapping "raw" accessors
+ */
+
+static inline unsigned char __raw_readb(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr);
+}
+static inline unsigned short __raw_readw(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr);
+}
+static inline unsigned int __raw_readl(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr);
+}
+static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
+{
+	*(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v;
+}
+static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
+{
+	*(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v;
+}
+static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
+{
+	*(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v;
+}
+
+#ifdef __powerpc64__
+static inline unsigned long __raw_readq(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
+}
+static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
+{
+	*(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
+}
+
+static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
+{
+	__raw_writeq((__force unsigned long)cpu_to_be64(v), addr);
+}
+
+/*
+ * Real mode versions of the above. Those instructions are only supposed
+ * to be used in hypervisor real mode as per the architecture spec.
+ */
+static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr)
+{
+	__asm__ __volatile__("stbcix %0,0,%1"
+		: : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr)
+{
+	__asm__ __volatile__("sthcix %0,0,%1"
+		: : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr)
+{
+	__asm__ __volatile__("stwcix %0,0,%1"
+		: : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
+{
+	__asm__ __volatile__("stdcix %0,0,%1"
+		: : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr)
+{
+	__raw_rm_writeq((__force u64)cpu_to_be64(val), paddr);
+}
+
+static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
+{
+	u8 ret;
+	__asm__ __volatile__("lbzcix %0,0, %1"
+			     : "=r" (ret) : "r" (paddr) : "memory");
+	return ret;
+}
+
+static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
+{
+	u16 ret;
+	__asm__ __volatile__("lhzcix %0,0, %1"
+			     : "=r" (ret) : "r" (paddr) : "memory");
+	return ret;
+}
+
+static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
+{
+	u32 ret;
+	__asm__ __volatile__("lwzcix %0,0, %1"
+			     : "=r" (ret) : "r" (paddr) : "memory");
+	return ret;
+}
+
+static inline u64 __raw_rm_readq(volatile void __iomem *paddr)
+{
+	u64 ret;
+	__asm__ __volatile__("ldcix %0,0, %1"
+			     : "=r" (ret) : "r" (paddr) : "memory");
+	return ret;
+}
+#endif /* __powerpc64__ */
+
+/*
+ *
+ * PCI PIO and MMIO accessors.
+ *
+ *
+ * On 32 bits, PIO operations have a recovery mechanism in case they trigger
+ * machine checks (which they occasionally do when probing non existing
+ * IO ports on some platforms, like PowerMac and 8xx).
+ * I always found it to be of dubious reliability and I am tempted to get
+ * rid of it one of these days. So if you think it's important to keep it,
+ * please voice up asap. We never had it for 64 bits and I do not intend
+ * to port it over
+ */
+
+#ifdef CONFIG_PPC32
+
+#define __do_in_asm(name, op)				\
+static inline unsigned int name(unsigned int port)	\
+{							\
+	unsigned int x;					\
+	__asm__ __volatile__(				\
+		"sync\n"				\
+		"0:"	op "	%0,0,%1\n"		\
+		"1:	twi	0,%0,0\n"		\
+		"2:	isync\n"			\
+		"3:	nop\n"				\
+		"4:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"5:	li	%0,-1\n"		\
+		"	b	4b\n"			\
+		".previous\n"				\
+		EX_TABLE(0b, 5b)			\
+		EX_TABLE(1b, 5b)			\
+		EX_TABLE(2b, 5b)			\
+		EX_TABLE(3b, 5b)			\
+		: "=&r" (x)				\
+		: "r" (port + _IO_BASE)			\
+		: "memory");  				\
+	return x;					\
+}
+
+#define __do_out_asm(name, op)				\
+static inline void name(unsigned int val, unsigned int port) \
+{							\
+	__asm__ __volatile__(				\
+		"sync\n"				\
+		"0:" op " %0,0,%1\n"			\
+		"1:	sync\n"				\
+		"2:\n"					\
+		EX_TABLE(0b, 2b)			\
+		EX_TABLE(1b, 2b)			\
+		: : "r" (val), "r" (port + _IO_BASE)	\
+		: "memory");   	   	   		\
+}
+
+__do_in_asm(_rec_inb, "lbzx")
+__do_in_asm(_rec_inw, "lhbrx")
+__do_in_asm(_rec_inl, "lwbrx")
+__do_out_asm(_rec_outb, "stbx")
+__do_out_asm(_rec_outw, "sthbrx")
+__do_out_asm(_rec_outl, "stwbrx")
+
+#endif /* CONFIG_PPC32 */
+
+/* The "__do_*" operations below provide the actual "base" implementation
+ * for each of the defined accessors. Some of them use the out_* functions
+ * directly, some of them still use EEH, though we might change that in the
+ * future. Those macros below provide the necessary argument swapping and
+ * handling of the IO base for PIO.
+ *
+ * They are themselves used by the macros that define the actual accessors
+ * and can be used by the hooks if any.
+ *
+ * Note that PIO operations are always defined in terms of their corresonding
+ * MMIO operations. That allows platforms like iSeries who want to modify the
+ * behaviour of both to only hook on the MMIO version and get both. It's also
+ * possible to hook directly at the toplevel PIO operation if they have to
+ * be handled differently
+ */
+#define __do_writeb(val, addr)	out_8(PCI_FIX_ADDR(addr), val)
+#define __do_writew(val, addr)	out_le16(PCI_FIX_ADDR(addr), val)
+#define __do_writel(val, addr)	out_le32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq(val, addr)	out_le64(PCI_FIX_ADDR(addr), val)
+#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val)
+#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val)
+
+#ifdef CONFIG_EEH
+#define __do_readb(addr)	eeh_readb(PCI_FIX_ADDR(addr))
+#define __do_readw(addr)	eeh_readw(PCI_FIX_ADDR(addr))
+#define __do_readl(addr)	eeh_readl(PCI_FIX_ADDR(addr))
+#define __do_readq(addr)	eeh_readq(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr)	eeh_readw_be(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr)	eeh_readl_be(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr)	eeh_readq_be(PCI_FIX_ADDR(addr))
+#else /* CONFIG_EEH */
+#define __do_readb(addr)	in_8(PCI_FIX_ADDR(addr))
+#define __do_readw(addr)	in_le16(PCI_FIX_ADDR(addr))
+#define __do_readl(addr)	in_le32(PCI_FIX_ADDR(addr))
+#define __do_readq(addr)	in_le64(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr)	in_be16(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr)	in_be32(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr)	in_be64(PCI_FIX_ADDR(addr))
+#endif /* !defined(CONFIG_EEH) */
+
+#ifdef CONFIG_PPC32
+#define __do_outb(val, port)	_rec_outb(val, port)
+#define __do_outw(val, port)	_rec_outw(val, port)
+#define __do_outl(val, port)	_rec_outl(val, port)
+#define __do_inb(port)		_rec_inb(port)
+#define __do_inw(port)		_rec_inw(port)
+#define __do_inl(port)		_rec_inl(port)
+#else /* CONFIG_PPC32 */
+#define __do_outb(val, port)	writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outw(val, port)	writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outl(val, port)	writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_inb(port)		readb((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inw(port)		readw((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inl(port)		readl((PCI_IO_ADDR)_IO_BASE + port);
+#endif /* !CONFIG_PPC32 */
+
+#ifdef CONFIG_EEH
+#define __do_readsb(a, b, n)	eeh_readsb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n)	eeh_readsw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n)	eeh_readsl(PCI_FIX_ADDR(a), (b), (n))
+#else /* CONFIG_EEH */
+#define __do_readsb(a, b, n)	_insb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n)	_insw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n)	_insl(PCI_FIX_ADDR(a), (b), (n))
+#endif /* !CONFIG_EEH */
+#define __do_writesb(a, b, n)	_outsb(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesw(a, b, n)	_outsw(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesl(a, b, n)	_outsl(PCI_FIX_ADDR(a),(b),(n))
+
+#define __do_insb(p, b, n)	readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insw(p, b, n)	readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insl(p, b, n)	readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_outsb(p, b, n)	writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsw(p, b, n)	writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsl(p, b, n)	writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+
+#define __do_memset_io(addr, c, n)	\
+				_memset_io(PCI_FIX_ADDR(addr), c, n)
+#define __do_memcpy_toio(dst, src, n)	\
+				_memcpy_toio(PCI_FIX_ADDR(dst), src, n)
+
+#ifdef CONFIG_EEH
+#define __do_memcpy_fromio(dst, src, n)	\
+				eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n)
+#else /* CONFIG_EEH */
+#define __do_memcpy_fromio(dst, src, n)	\
+				_memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
+#endif /* !CONFIG_EEH */
+
+#ifdef CONFIG_PPC_INDIRECT_PIO
+#define DEF_PCI_HOOK_pio(x)	x
+#else
+#define DEF_PCI_HOOK_pio(x)	NULL
+#endif
+
+#ifdef CONFIG_PPC_INDIRECT_MMIO
+#define DEF_PCI_HOOK_mem(x)	x
+#else
+#define DEF_PCI_HOOK_mem(x)	NULL
+#endif
+
+/* Structure containing all the hooks */
+extern struct ppc_pci_io {
+
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa)	ret (*name) at;
+#define DEF_PCI_AC_NORET(name, at, al, space, aa)	void (*name) at;
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+} ppc_pci_io;
+
+/* The inline wrappers */
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa)		\
+static inline ret name at					\
+{								\
+	if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL)	\
+		return ppc_pci_io.name al;			\
+	return __do_##name al;					\
+}
+
+#define DEF_PCI_AC_NORET(name, at, al, space, aa)		\
+static inline void name at					\
+{								\
+	if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL)		\
+		ppc_pci_io.name al;				\
+	else							\
+		__do_##name al;					\
+}
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+/* Some drivers check for the presence of readq & writeq with
+ * a #ifdef, so we make them happy here.
+ */
+#ifdef __powerpc64__
+#define readq	readq
+#define writeq	writeq
+#endif
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+/*
+ * We don't do relaxed operations yet, at least not with this semantic
+ */
+#define readb_relaxed(addr)	readb(addr)
+#define readw_relaxed(addr)	readw(addr)
+#define readl_relaxed(addr)	readl(addr)
+#define readq_relaxed(addr)	readq(addr)
+#define writeb_relaxed(v, addr)	writeb(v, addr)
+#define writew_relaxed(v, addr)	writew(v, addr)
+#define writel_relaxed(v, addr)	writel(v, addr)
+#define writeq_relaxed(v, addr)	writeq(v, addr)
+
+#include <asm-generic/iomap.h>
+
+#ifdef CONFIG_PPC32
+#define mmiowb()
+#else
+/*
+ * Enforce synchronisation of stores vs. spin_unlock
+ * (this does it explicitly, though our implementation of spin_unlock
+ * does it implicitely too)
+ */
+static inline void mmiowb(void)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
+	: "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
+	: "memory");
+}
+#endif /* !CONFIG_PPC32 */
+
+static inline void iosync(void)
+{
+        __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* Enforce in-order execution of data I/O.
+ * No distinction between read/write on PPC; use eieio for all three.
+ * Those are fairly week though. They don't provide a barrier between
+ * MMIO and cacheable storage nor do they provide a barrier vs. locks,
+ * they only provide barriers between 2 __raw MMIO operations and
+ * possibly break write combining.
+ */
+#define iobarrier_rw() eieio()
+#define iobarrier_r()  eieio()
+#define iobarrier_w()  eieio()
+
+
+/*
+ * output pause versions need a delay at least for the
+ * w83c105 ide controller in a p610.
+ */
+#define inb_p(port)             inb(port)
+#define outb_p(val, port)       (udelay(1), outb((val), (port)))
+#define inw_p(port)             inw(port)
+#define outw_p(val, port)       (udelay(1), outw((val), (port)))
+#define inl_p(port)             inl(port)
+#define outl_p(val, port)       (udelay(1), outl((val), (port)))
+
+
+#define IO_SPACE_LIMIT ~(0UL)
+
+
+/**
+ * ioremap     -   map bus memory into CPU space
+ * @address:   bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * We provide a few variations of it:
+ *
+ * * ioremap is the standard one and provides non-cacheable guarded mappings
+ *   and can be hooked by the platform via ppc_md
+ *
+ * * ioremap_prot allows to specify the page flags as an argument and can
+ *   also be hooked by the platform via ppc_md.
+ *
+ * * ioremap_nocache is identical to ioremap
+ *
+ * * ioremap_wc enables write combining
+ *
+ * * iounmap undoes such a mapping and can be hooked
+ *
+ * * __ioremap_at (and the pending __iounmap_at) are low level functions to
+ *   create hand-made mappings for use only by the PCI code and cannot
+ *   currently be hooked. Must be page aligned.
+ *
+ * * __ioremap is the low level implementation used by ioremap and
+ *   ioremap_prot and cannot be hooked (but can be used by a hook on one
+ *   of the previous ones)
+ *
+ * * __ioremap_caller is the same as above but takes an explicit caller
+ *   reference rather than using __builtin_return_address(0)
+ *
+ * * __iounmap, is the low level implementation used by iounmap and cannot
+ *   be hooked (but can be used by a hook on iounmap)
+ *
+ */
+extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
+				  unsigned long flags);
+extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
+#define ioremap_nocache(addr, size)	ioremap((addr), (size))
+#define ioremap_uc(addr, size)		ioremap((addr), (size))
+#define ioremap_cache(addr, size) \
+	ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
+
+extern void iounmap(volatile void __iomem *addr);
+
+extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
+			       unsigned long flags);
+extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
+				      unsigned long flags, void *caller);
+
+extern void __iounmap(volatile void __iomem *addr);
+
+extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
+				   unsigned long size, unsigned long flags);
+extern void __iounmap_at(void *ea, unsigned long size);
+
+/*
+ * When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
+ * which needs some additional definitions here. They basically allow PIO
+ * space overall to be 1GB. This will work as long as we never try to use
+ * iomap to map MMIO below 1GB which should be fine on ppc64
+ */
+#define HAVE_ARCH_PIO_SIZE		1
+#define PIO_OFFSET			0x00000000UL
+#define PIO_MASK			(FULL_IO_SIZE - 1)
+#define PIO_RESERVED			(FULL_IO_SIZE)
+
+#define mmio_read16be(addr)		readw_be(addr)
+#define mmio_read32be(addr)		readl_be(addr)
+#define mmio_write16be(val, addr)	writew_be(val, addr)
+#define mmio_write32be(val, addr)	writel_be(val, addr)
+#define mmio_insb(addr, dst, count)	readsb(addr, dst, count)
+#define mmio_insw(addr, dst, count)	readsw(addr, dst, count)
+#define mmio_insl(addr, dst, count)	readsl(addr, dst, count)
+#define mmio_outsb(addr, src, count)	writesb(addr, src, count)
+#define mmio_outsw(addr, src, count)	writesw(addr, src, count)
+#define mmio_outsl(addr, src, count)	writesl(addr, src, count)
+
+/**
+ *	virt_to_phys	-	map virtual addresses to physical
+ *	@address: address to remap
+ *
+ *	The returned physical address is the physical (CPU) mapping for
+ *	the memory address given. It is only valid to use this function on
+ *	addresses directly mapped or allocated via kmalloc.
+ *
+ *	This function does not give bus mappings for DMA transfers. In
+ *	almost all conceivable cases a device driver should not be using
+ *	this function
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+	return __pa((unsigned long)address);
+}
+
+/**
+ *	phys_to_virt	-	map physical address to virtual
+ *	@address: address to remap
+ *
+ *	The returned virtual address is a current CPU mapping for
+ *	the memory address given. It is only valid to use this function on
+ *	addresses that have a kernel mapping
+ *
+ *	This function does not handle bus mappings for DMA transfers. In
+ *	almost all conceivable cases a device driver should not be using
+ *	this function
+ */
+static inline void * phys_to_virt(unsigned long address)
+{
+	return (void *)__va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page)	((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * 32 bits still uses virt_to_bus() for it's implementation of DMA
+ * mappings se we have to keep it defined here. We also have some old
+ * drivers (shame shame shame) that use bus_to_virt() and haven't been
+ * fixed yet so I need to define it here.
+ */
+#ifdef CONFIG_PPC32
+
+static inline unsigned long virt_to_bus(volatile void * address)
+{
+        if (address == NULL)
+		return 0;
+        return __pa(address) + PCI_DRAM_OFFSET;
+}
+
+static inline void * bus_to_virt(unsigned long address)
+{
+        if (address == 0)
+		return NULL;
+        return __va(address - PCI_DRAM_OFFSET);
+}
+
+#define page_to_bus(page)	(page_to_phys(page) + PCI_DRAM_OFFSET)
+
+#endif /* CONFIG_PPC32 */
+
+/* access ports */
+#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) |  (_v))
+#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
+
+#define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) |  (_v))
+#define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v))
+
+#define setbits8(_addr, _v) out_8((_addr), in_8(_addr) |  (_v))
+#define clrbits8(_addr, _v) out_8((_addr), in_8(_addr) & ~(_v))
+
+/* Clear and set bits in one shot.  These macros can be used to clear and
+ * set multiple bits in a register using a single read-modify-write.  These
+ * macros can also be used to set a multiple-bit bit pattern using a mask,
+ * by specifying the mask in the 'clear' parameter and the new bit pattern
+ * in the 'set' parameter.
+ */
+
+#define clrsetbits(type, addr, clear, set) \
+	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#ifdef __powerpc64__
+#define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set)
+#define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set)
+#endif
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+
+#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
+#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
+
+#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_IO_H */
diff --git a/arch/powerpc/include/asm/io_event_irq.h b/arch/powerpc/include/asm/io_event_irq.h
new file mode 100644
index 0000000..b1a9a1b
--- /dev/null
+++ b/arch/powerpc/include/asm/io_event_irq.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2010, 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_IO_EVENT_IRQ_H
+#define _ASM_POWERPC_IO_EVENT_IRQ_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+
+#define PSERIES_IOEI_RPC_MAX_LEN 216
+
+#define PSERIES_IOEI_TYPE_ERR_DETECTED		0x01
+#define PSERIES_IOEI_TYPE_ERR_RECOVERED		0x02
+#define PSERIES_IOEI_TYPE_EVENT			0x03
+#define PSERIES_IOEI_TYPE_RPC_PASS_THRU		0x04
+
+#define PSERIES_IOEI_SUBTYPE_NOT_APP		0x00
+#define PSERIES_IOEI_SUBTYPE_REBALANCE_REQ	0x01
+#define PSERIES_IOEI_SUBTYPE_NODE_ONLINE	0x03
+#define PSERIES_IOEI_SUBTYPE_NODE_OFFLINE	0x04
+#define PSERIES_IOEI_SUBTYPE_DUMP_SIZE_CHANGE	0x05
+#define PSERIES_IOEI_SUBTYPE_TORRENT_IRV_UPDATE	0x06
+#define PSERIES_IOEI_SUBTYPE_TORRENT_HFI_CFGED	0x07
+
+#define PSERIES_IOEI_SCOPE_NOT_APP		0x00
+#define PSERIES_IOEI_SCOPE_RIO_HUB		0x36
+#define PSERIES_IOEI_SCOPE_RIO_BRIDGE		0x37
+#define PSERIES_IOEI_SCOPE_PHB			0x38
+#define PSERIES_IOEI_SCOPE_EADS_GLOBAL		0x39
+#define PSERIES_IOEI_SCOPE_EADS_SLOT		0x3A
+#define PSERIES_IOEI_SCOPE_TORRENT_HUB		0x3B
+#define PSERIES_IOEI_SCOPE_SERVICE_PROC		0x51
+
+/* Platform Event Log Format, Version 6, data portition of IO event section */
+struct pseries_io_event {
+	uint8_t event_type;		/* 0x00 IO-Event Type		*/
+	uint8_t rpc_data_len;		/* 0x01 RPC data length		*/
+	uint8_t scope;			/* 0x02 Error/Event Scope	*/
+	uint8_t event_subtype;		/* 0x03 I/O-Event Sub-Type	*/
+	uint32_t drc_index;		/* 0x04 DRC Index		*/
+	uint8_t rpc_data[PSERIES_IOEI_RPC_MAX_LEN];
+					/* 0x08 RPC Data (0-216 bytes,	*/
+					/* padded to 4 bytes alignment)	*/
+};
+
+extern struct atomic_notifier_head pseries_ioei_notifier_list;
+
+#endif /* _ASM_POWERPC_IO_EVENT_IRQ_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
new file mode 100644
index 0000000..3d4b88c
--- /dev/null
+++ b/arch/powerpc/include/asm/iommu.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_IOMMU_H
+#define _ASM_IOMMU_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/machdep.h>
+#include <asm/types.h>
+#include <asm/pci-bridge.h>
+#include <asm/asm-const.h>
+
+#define IOMMU_PAGE_SHIFT_4K      12
+#define IOMMU_PAGE_SIZE_4K       (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
+#define IOMMU_PAGE_MASK_4K       (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
+#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
+
+#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
+#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
+#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
+
+/* Boot time flags */
+extern int iommu_is_off;
+extern int iommu_force_on;
+
+struct iommu_table_ops {
+	/*
+	 * When called with direction==DMA_NONE, it is equal to clear().
+	 * uaddr is a linear map address.
+	 */
+	int (*set)(struct iommu_table *tbl,
+			long index, long npages,
+			unsigned long uaddr,
+			enum dma_data_direction direction,
+			unsigned long attrs);
+#ifdef CONFIG_IOMMU_API
+	/*
+	 * Exchanges existing TCE with new TCE plus direction bits;
+	 * returns old TCE and DMA direction mask.
+	 * @tce is a physical address.
+	 */
+	int (*exchange)(struct iommu_table *tbl,
+			long index,
+			unsigned long *hpa,
+			enum dma_data_direction *direction);
+	/* Real mode */
+	int (*exchange_rm)(struct iommu_table *tbl,
+			long index,
+			unsigned long *hpa,
+			enum dma_data_direction *direction);
+
+	__be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
+#endif
+	void (*clear)(struct iommu_table *tbl,
+			long index, long npages);
+	/* get() returns a physical address */
+	unsigned long (*get)(struct iommu_table *tbl, long index);
+	void (*flush)(struct iommu_table *tbl);
+	void (*free)(struct iommu_table *tbl);
+};
+
+/* These are used by VIO */
+extern struct iommu_table_ops iommu_table_lpar_multi_ops;
+extern struct iommu_table_ops iommu_table_pseries_ops;
+
+/*
+ * IOMAP_MAX_ORDER defines the largest contiguous block
+ * of dma space we can get.  IOMAP_MAX_ORDER = 13
+ * allows up to 2**12 pages (4096 * 4096) = 16 MB
+ */
+#define IOMAP_MAX_ORDER		13
+
+#define IOMMU_POOL_HASHBITS	2
+#define IOMMU_NR_POOLS		(1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+	unsigned long start;
+	unsigned long end;
+	unsigned long hint;
+	spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
+struct iommu_table {
+	unsigned long  it_busno;     /* Bus number this table belongs to */
+	unsigned long  it_size;      /* Size of iommu table in entries */
+	unsigned long  it_indirect_levels;
+	unsigned long  it_level_size;
+	unsigned long  it_allocated_size;
+	unsigned long  it_offset;    /* Offset into global table */
+	unsigned long  it_base;      /* mapped address of tce table */
+	unsigned long  it_index;     /* which iommu table this is */
+	unsigned long  it_type;      /* type: PCI or Virtual Bus */
+	unsigned long  it_blocksize; /* Entries in each block (cacheline) */
+	unsigned long  poolsize;
+	unsigned long  nr_pools;
+	struct iommu_pool large_pool;
+	struct iommu_pool pools[IOMMU_NR_POOLS];
+	unsigned long *it_map;       /* A simple allocation bitmap for now */
+	unsigned long  it_page_shift;/* table iommu page size */
+	struct list_head it_group_list;/* List of iommu_table_group_link */
+	__be64 *it_userspace; /* userspace view of the table */
+	struct iommu_table_ops *it_ops;
+	struct kref    it_kref;
+	int it_nid;
+};
+
+#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \
+		((tbl)->it_ops->useraddrptr((tbl), (entry), false))
+#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
+		((tbl)->it_ops->useraddrptr((tbl), (entry), true))
+
+/* Pure 2^n version of get_order */
+static inline __attribute_const__
+int get_iommu_order(unsigned long size, struct iommu_table *tbl)
+{
+	return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
+}
+
+
+struct scatterlist;
+
+#ifdef CONFIG_PPC64
+
+#define IOMMU_MAPPING_ERROR		(~(dma_addr_t)0x0)
+
+static inline void set_iommu_table_base(struct device *dev,
+					struct iommu_table *base)
+{
+	dev->archdata.iommu_table_base = base;
+}
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+	return dev->archdata.iommu_table_base;
+}
+
+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
+
+extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
+extern int iommu_tce_table_put(struct iommu_table *tbl);
+
+/* Initializes an iommu_table based in values set in the passed-in
+ * structure
+ */
+extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
+					    int nid);
+#define IOMMU_TABLE_GROUP_MAX_TABLES	2
+
+struct iommu_table_group;
+
+struct iommu_table_group_ops {
+	unsigned long (*get_table_size)(
+			__u32 page_shift,
+			__u64 window_size,
+			__u32 levels);
+	long (*create_table)(struct iommu_table_group *table_group,
+			int num,
+			__u32 page_shift,
+			__u64 window_size,
+			__u32 levels,
+			struct iommu_table **ptbl);
+	long (*set_window)(struct iommu_table_group *table_group,
+			int num,
+			struct iommu_table *tblnew);
+	long (*unset_window)(struct iommu_table_group *table_group,
+			int num);
+	/* Switch ownership from platform code to external user (e.g. VFIO) */
+	void (*take_ownership)(struct iommu_table_group *table_group);
+	/* Switch ownership from external user (e.g. VFIO) back to core */
+	void (*release_ownership)(struct iommu_table_group *table_group);
+};
+
+struct iommu_table_group_link {
+	struct list_head next;
+	struct rcu_head rcu;
+	struct iommu_table_group *table_group;
+};
+
+struct iommu_table_group {
+	/* IOMMU properties */
+	__u32 tce32_start;
+	__u32 tce32_size;
+	__u64 pgsizes; /* Bitmap of supported page sizes */
+	__u32 max_dynamic_windows_supported;
+	__u32 max_levels;
+
+	struct iommu_group *group;
+	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
+	struct iommu_table_group_ops *ops;
+};
+
+#ifdef CONFIG_IOMMU_API
+
+extern void iommu_register_group(struct iommu_table_group *table_group,
+				 int pci_domain_number, unsigned long pe_num);
+extern int iommu_add_device(struct device *dev);
+extern void iommu_del_device(struct device *dev);
+extern int __init tce_iommu_bus_notifier_init(void);
+extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
+		unsigned long *hpa, enum dma_data_direction *direction);
+#else
+static inline void iommu_register_group(struct iommu_table_group *table_group,
+					int pci_domain_number,
+					unsigned long pe_num)
+{
+}
+
+static inline int iommu_add_device(struct device *dev)
+{
+	return 0;
+}
+
+static inline void iommu_del_device(struct device *dev)
+{
+}
+
+static inline int __init tce_iommu_bus_notifier_init(void)
+{
+        return 0;
+}
+#endif /* !CONFIG_IOMMU_API */
+
+int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+#else
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+	return NULL;
+}
+
+static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
+extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
+			    struct scatterlist *sglist, int nelems,
+			    unsigned long mask,
+			    enum dma_data_direction direction,
+			    unsigned long attrs);
+extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
+			       struct scatterlist *sglist,
+			       int nelems,
+			       enum dma_data_direction direction,
+			       unsigned long attrs);
+
+extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+				  size_t size, dma_addr_t *dma_handle,
+				  unsigned long mask, gfp_t flag, int node);
+extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
+				void *vaddr, dma_addr_t dma_handle);
+extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
+				 struct page *page, unsigned long offset,
+				 size_t size, unsigned long mask,
+				 enum dma_data_direction direction,
+				 unsigned long attrs);
+extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
+			     size_t size, enum dma_data_direction direction,
+			     unsigned long attrs);
+
+extern void iommu_init_early_pSeries(void);
+extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
+extern void iommu_init_early_pasemi(void);
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
+static inline void iommu_save(void)
+{
+	if (ppc_md.iommu_save)
+		ppc_md.iommu_save();
+}
+
+static inline void iommu_restore(void)
+{
+	if (ppc_md.iommu_restore)
+		ppc_md.iommu_restore();
+}
+#endif
+
+/* The API to support IOMMU operations for VFIO */
+extern int iommu_tce_check_ioba(unsigned long page_shift,
+		unsigned long offset, unsigned long size,
+		unsigned long ioba, unsigned long npages);
+extern int iommu_tce_check_gpa(unsigned long page_shift,
+		unsigned long gpa);
+
+#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
+		(iommu_tce_check_ioba((tbl)->it_page_shift,       \
+				(tbl)->it_offset, (tbl)->it_size, \
+				(ioba), (npages)) || (tce_value))
+#define iommu_tce_put_param_check(tbl, ioba, gpa)                 \
+		(iommu_tce_check_ioba((tbl)->it_page_shift,       \
+				(tbl)->it_offset, (tbl)->it_size, \
+				(ioba), 1) ||                     \
+		iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
+
+extern void iommu_flush_tce(struct iommu_table *tbl);
+extern int iommu_take_ownership(struct iommu_table *tbl);
+extern void iommu_release_ownership(struct iommu_table *tbl);
+
+extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
+extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_IOMMU_H */
diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h
new file mode 100644
index 0000000..fb59829
--- /dev/null
+++ b/arch/powerpc/include/asm/ipic.h
@@ -0,0 +1,84 @@
+/*
+ * IPIC external definitions and structure.
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * Copyright 2005 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_IPIC_H__
+#define __ASM_IPIC_H__
+
+#include <linux/irq.h>
+
+/* Flags when we init the IPIC */
+#define IPIC_SPREADMODE_GRP_A	0x00000001
+#define IPIC_SPREADMODE_GRP_B	0x00000002
+#define IPIC_SPREADMODE_GRP_C	0x00000004
+#define IPIC_SPREADMODE_GRP_D	0x00000008
+#define IPIC_SPREADMODE_MIX_A	0x00000010
+#define IPIC_SPREADMODE_MIX_B	0x00000020
+#define IPIC_DISABLE_MCP_OUT	0x00000040
+#define IPIC_IRQ0_MCP		0x00000080
+
+/* IPIC registers offsets */
+#define IPIC_SICFR	0x00	/* System Global Interrupt Configuration Register */
+#define IPIC_SIVCR	0x04	/* System Global Interrupt Vector Register */
+#define IPIC_SIPNR_H	0x08	/* System Internal Interrupt Pending Register (HIGH) */
+#define IPIC_SIPNR_L	0x0C	/* System Internal Interrupt Pending Register (LOW) */
+#define IPIC_SIPRR_A	0x10	/* System Internal Interrupt group A Priority Register */
+#define IPIC_SIPRR_B	0x14	/* System Internal Interrupt group B Priority Register */
+#define IPIC_SIPRR_C	0x18	/* System Internal Interrupt group C Priority Register */
+#define IPIC_SIPRR_D	0x1C	/* System Internal Interrupt group D Priority Register */
+#define IPIC_SIMSR_H	0x20	/* System Internal Interrupt Mask Register (HIGH) */
+#define IPIC_SIMSR_L	0x24	/* System Internal Interrupt Mask Register (LOW) */
+#define IPIC_SICNR	0x28	/* System Internal Interrupt Control Register */
+#define IPIC_SEPNR	0x2C	/* System External Interrupt Pending Register */
+#define IPIC_SMPRR_A	0x30	/* System Mixed Interrupt group A Priority Register */
+#define IPIC_SMPRR_B	0x34	/* System Mixed Interrupt group B Priority Register */
+#define IPIC_SEMSR	0x38	/* System External Interrupt Mask Register */
+#define IPIC_SECNR	0x3C	/* System External Interrupt Control Register */
+#define IPIC_SERSR	0x40	/* System Error Status Register */
+#define IPIC_SERMR	0x44	/* System Error Mask Register */
+#define IPIC_SERCR	0x48	/* System Error Control Register */
+#define IPIC_SIFCR_H	0x50	/* System Internal Interrupt Force Register (HIGH) */
+#define IPIC_SIFCR_L	0x54	/* System Internal Interrupt Force Register (LOW) */
+#define IPIC_SEFCR	0x58	/* System External Interrupt Force Register */
+#define IPIC_SERFR	0x5C	/* System Error Force Register */
+#define IPIC_SCVCR	0x60	/* System Critical Interrupt Vector Register */
+#define IPIC_SMVCR	0x64	/* System Management Interrupt Vector Register */
+
+enum ipic_prio_grp {
+	IPIC_INT_GRP_A = IPIC_SIPRR_A,
+	IPIC_INT_GRP_D = IPIC_SIPRR_D,
+	IPIC_MIX_GRP_A = IPIC_SMPRR_A,
+	IPIC_MIX_GRP_B = IPIC_SMPRR_B,
+};
+
+enum ipic_mcp_irq {
+	IPIC_MCP_IRQ0 = 0,
+	IPIC_MCP_WDT  = 1,
+	IPIC_MCP_SBA  = 2,
+	IPIC_MCP_PCI1 = 5,
+	IPIC_MCP_PCI2 = 6,
+	IPIC_MCP_MU   = 7,
+};
+
+extern int ipic_set_priority(unsigned int irq, unsigned int priority);
+extern void ipic_set_highest_priority(unsigned int irq);
+extern void ipic_set_default_priority(void);
+extern void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq);
+extern void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq);
+extern u32 ipic_get_mcp_status(void);
+extern void ipic_clear_mcp_status(u32 mask);
+
+extern struct ipic * ipic_init(struct device_node *node, unsigned int flags);
+extern unsigned int ipic_get_irq(void);
+
+#endif /* __ASM_IPIC_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
new file mode 100644
index 0000000..ee39ce5
--- /dev/null
+++ b/arch/powerpc/include/asm/irq.h
@@ -0,0 +1,75 @@
+#ifdef __KERNEL__
+#ifndef _ASM_POWERPC_IRQ_H
+#define _ASM_POWERPC_IRQ_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+
+#include <asm/types.h>
+#include <linux/atomic.h>
+
+
+extern atomic_t ppc_n_lost_interrupts;
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ			(0)
+
+/* Total number of virq in the platform */
+#define NR_IRQS		CONFIG_NR_IRQS
+
+/* Same thing, used by the generic IRQ code */
+#define NR_IRQS_LEGACY		NUM_ISA_INTERRUPTS
+
+extern irq_hw_number_t virq_to_hw(unsigned int virq);
+
+static __inline__ int irq_canonicalize(int irq)
+{
+	return irq;
+}
+
+extern int distribute_irqs;
+
+struct irqaction;
+struct pt_regs;
+
+#define __ARCH_HAS_DO_SOFTIRQ
+
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+/*
+ * Per-cpu stacks for handling critical, debug and machine check
+ * level interrupts.
+ */
+extern struct thread_info *critirq_ctx[NR_CPUS];
+extern struct thread_info *dbgirq_ctx[NR_CPUS];
+extern struct thread_info *mcheckirq_ctx[NR_CPUS];
+extern void exc_lvl_ctx_init(void);
+#else
+#define exc_lvl_ctx_init()
+#endif
+
+/*
+ * Per-cpu stacks for handling hard and soft interrupts.
+ */
+extern struct thread_info *hardirq_ctx[NR_CPUS];
+extern struct thread_info *softirq_ctx[NR_CPUS];
+
+extern void irq_ctx_init(void);
+extern void call_do_softirq(struct thread_info *tp);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
+extern void do_IRQ(struct pt_regs *regs);
+extern void __init init_IRQ(void);
+extern void __do_irq(struct pt_regs *regs);
+
+int irq_choose_cpu(const struct cpumask *mask);
+
+#endif /* _ASM_IRQ_H */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
new file mode 100644
index 0000000..b8b0be8
--- /dev/null
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_IRQ_WORK_H
+#define _ASM_POWERPC_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+	return true;
+}
+extern void arch_irq_work_raise(void);
+
+#endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
new file mode 100644
index 0000000..1a6c1ce
--- /dev/null
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IRQ flags handling
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+/*
+ * Get definitions for arch_local_save_flags(x), etc.
+ */
+#include <asm/hw_irq.h>
+
+#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQSOFF_TRACER
+/*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+#define TRACE_WITH_FRAME_BUFFER(func)		\
+	mflr	r0;				\
+	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
+	std	r0, 16(r1);			\
+	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
+	bl func;				\
+	ld	r1, 0(r1);			\
+	ld	r1, 0(r1);
+#else
+#define TRACE_WITH_FRAME_BUFFER(func)		\
+	bl func;
+#endif
+
+/*
+ * These are calls to C code, so the caller must be prepared for volatiles to
+ * be clobbered.
+ */
+#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
+#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
+
+/*
+ * This is used by assembly code to soft-disable interrupts first and
+ * reconcile irq state.
+ *
+ * NB: This may call C code, so the caller must be prepared for volatiles to
+ * be clobbered.
+ */
+#define RECONCILE_IRQ_STATE(__rA, __rB)		\
+	lbz	__rA,PACAIRQSOFTMASK(r13);	\
+	lbz	__rB,PACAIRQHAPPENED(r13);	\
+	andi.	__rA,__rA,IRQS_DISABLED;	\
+	li	__rA,IRQS_DISABLED;		\
+	ori	__rB,__rB,PACA_IRQ_HARD_DIS;	\
+	stb	__rB,PACAIRQHAPPENED(r13);	\
+	bne	44f;				\
+	stb	__rA,PACAIRQSOFTMASK(r13);	\
+	TRACE_DISABLE_INTS;			\
+44:
+
+#else
+#define TRACE_ENABLE_INTS
+#define TRACE_DISABLE_INTS
+
+#define RECONCILE_IRQ_STATE(__rA, __rB)		\
+	lbz	__rA,PACAIRQHAPPENED(r13);	\
+	li	__rB,IRQS_DISABLED;		\
+	ori	__rA,__rA,PACA_IRQ_HARD_DIS;	\
+	stb	__rB,PACAIRQSOFTMASK(r13);	\
+	stb	__rA,PACAIRQHAPPENED(r13)
+#endif
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/isa-bridge.h b/arch/powerpc/include/asm/isa-bridge.h
new file mode 100644
index 0000000..4729589
--- /dev/null
+++ b/arch/powerpc/include/asm/isa-bridge.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ISA_BRIDGE_H
+#define __ISA_BRIDGE_H
+
+#ifdef CONFIG_PPC64
+
+extern void isa_bridge_find_early(struct pci_controller *hose);
+extern void isa_bridge_init_non_pci(struct device_node *np);
+
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+	/* Check if address hits the reserved legacy IO range */
+	unsigned long ea = (unsigned long)address;
+	return ea >= ISA_IO_BASE && ea < ISA_IO_END;
+}
+
+#else
+
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+	/* No specific ISA handling on ppc32 at this stage, it
+	 * all goes through PCI
+	 */
+	return 0;
+}
+
+#endif
+
+#endif /* __ISA_BRIDGE_H */
+
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
new file mode 100644
index 0000000..a3b2cf9
--- /dev/null
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -0,0 +1,70 @@
+#ifndef _ASM_POWERPC_JUMP_LABEL_H
+#define _ASM_POWERPC_JUMP_LABEL_H
+
+/*
+ * Copyright 2010 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
+
+#define JUMP_ENTRY_TYPE		stringify_in_c(FTR_ENTRY_LONG)
+#define JUMP_LABEL_NOP_SIZE	4
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "nop # arch_static_branch\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "b %l[l_yes] # arch_static_branch_jump\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+#ifdef CONFIG_PPC64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+	jump_label_t code;
+	jump_label_t target;
+	jump_label_t key;
+};
+
+#else
+#define ARCH_STATIC_BRANCH(LABEL, KEY)		\
+1098:	nop;					\
+	.pushsection __jump_table, "aw";	\
+	FTR_ENTRY_LONG 1098b, LABEL, KEY;	\
+	.popsection
+#endif
+
+#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/kdebug.h b/arch/powerpc/include/asm/kdebug.h
new file mode 100644
index 0000000..0f7c1ef
--- /dev/null
+++ b/arch/powerpc/include/asm/kdebug.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KDEBUG_H
+#define _ASM_POWERPC_KDEBUG_H
+#ifdef __KERNEL__
+
+/* Grossly misnamed. */
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_IABR_MATCH,
+	DIE_DABR_MATCH,
+	DIE_BPT,
+	DIE_SSTEP,
+};
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
new file mode 100644
index 0000000..fd128d1
--- /dev/null
+++ b/arch/powerpc/include/asm/kdump.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PPC64_KDUMP_H
+#define _PPC64_KDUMP_H
+
+#include <asm/page.h>
+
+#define KDUMP_KERNELBASE	0x2000000
+
+/* How many bytes to reserve at zero for kdump. The reserve limit should
+ * be greater or equal to the trampoline's end address.
+ * Reserve to the end of the FWNMI area, see head_64.S */
+#define KDUMP_RESERVE_LIMIT	0x10000 /* 64K */
+
+#ifdef CONFIG_CRASH_DUMP
+
+/*
+ * On PPC64 translation is disabled during trampoline setup, so we use
+ * physical addresses. Though on PPC32 translation is already enabled,
+ * so we can't do the same. Luckily create_trampoline() creates relative
+ * branches, so we can just add the PAGE_OFFSET and don't worry about it.
+ */
+#ifdef __powerpc64__
+#define KDUMP_TRAMPOLINE_START	0x0100
+#define KDUMP_TRAMPOLINE_END	0x3000
+#else
+#define KDUMP_TRAMPOLINE_START	(0x0100 + PAGE_OFFSET)
+#define KDUMP_TRAMPOLINE_END	(0x3000 + PAGE_OFFSET)
+#endif /* __powerpc64__ */
+
+#define KDUMP_MIN_TCE_ENTRIES	2048
+
+#endif /* CONFIG_CRASH_DUMP */
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
+extern void reserve_kdump_trampoline(void);
+extern void setup_kdump_trampoline(void);
+#else
+/* !CRASH_DUMP || !NONSTATIC_KERNEL */
+static inline void reserve_kdump_trampoline(void) { ; }
+static inline void setup_kdump_trampoline(void) { ; }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __PPC64_KDUMP_H */
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
new file mode 100644
index 0000000..4a585cb
--- /dev/null
+++ b/arch/powerpc/include/asm/kexec.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KEXEC_H
+#define _ASM_POWERPC_KEXEC_H
+#ifdef __KERNEL__
+
+#if defined(CONFIG_FSL_BOOKE) || defined(CONFIG_44x)
+
+/*
+ * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
+ * and therefore we can only deal with memory within this range
+ */
+#define KEXEC_SOURCE_MEMORY_LIMIT	(2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_DESTINATION_MEMORY_LIMIT	(2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_CONTROL_MEMORY_LIMIT	(2 * 1024 * 1024 * 1024UL - 1)
+
+#else
+
+/*
+ * Maximum page that is mapped directly into kernel memory.
+ * XXX: Since we copy virt we can use any page we allocate
+ */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/*
+ * Maximum address we can reach in physical address mode.
+ * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR.
+ */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#ifdef __powerpc64__
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+#else
+/* TASK_SIZE, probably left over from use_mm ?? */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#endif
+#endif
+
+#define KEXEC_CONTROL_PAGE_SIZE 4096
+
+/* The native architecture */
+#ifdef __powerpc64__
+#define KEXEC_ARCH KEXEC_ARCH_PPC64
+#else
+#define KEXEC_ARCH KEXEC_ARCH_PPC
+#endif
+
+#define KEXEC_STATE_NONE 0
+#define KEXEC_STATE_IRQS_OFF 1
+#define KEXEC_STATE_REAL_MODE 2
+
+#ifndef __ASSEMBLY__
+#include <asm/reg.h>
+
+typedef void (*crash_shutdown_t)(void);
+
+#ifdef CONFIG_KEXEC_CORE
+
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic or invoking dump using sysrq-trigger.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+					struct pt_regs *oldregs)
+{
+	if (oldregs)
+		memcpy(newregs, oldregs, sizeof(*newregs));
+	else
+		ppc_save_regs(newregs);
+}
+
+extern void kexec_smp_wait(void);	/* get and clear naca physid, wait for
+					  master to copy new code to 0 */
+extern int crashing_cpu;
+extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
+extern void crash_ipi_callback(struct pt_regs *);
+extern int crash_wake_offline;
+
+struct kimage;
+struct pt_regs;
+extern void default_machine_kexec(struct kimage *image);
+extern int default_machine_kexec_prepare(struct kimage *image);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+extern int crash_shutdown_register(crash_shutdown_t handler);
+extern int crash_shutdown_unregister(crash_shutdown_t handler);
+
+extern void crash_kexec_secondary(struct pt_regs *regs);
+extern int overlaps_crashkernel(unsigned long start, unsigned long size);
+extern void reserve_crashkernel(void);
+extern void machine_kexec_mask_interrupts(void);
+
+static inline bool kdump_in_progress(void)
+{
+	return crashing_cpu >= 0;
+}
+
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops kexec_elf64_ops;
+
+#ifdef CONFIG_IMA_KEXEC
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+	phys_addr_t ima_buffer_addr;
+	size_t ima_buffer_size;
+};
+#endif
+
+int setup_purgatory(struct kimage *image, const void *slave_code,
+		    const void *fdt, unsigned long kernel_load_addr,
+		    unsigned long fdt_load_addr);
+int setup_new_fdt(const struct kimage *image, void *fdt,
+		  unsigned long initrd_load_addr, unsigned long initrd_len,
+		  const char *cmdline);
+int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size);
+#endif /* CONFIG_KEXEC_FILE */
+
+#else /* !CONFIG_KEXEC_CORE */
+static inline void crash_kexec_secondary(struct pt_regs *regs) { }
+
+static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
+{
+	return 0;
+}
+
+static inline void reserve_crashkernel(void) { ; }
+
+static inline int crash_shutdown_register(crash_shutdown_t handler)
+{
+	return 0;
+}
+
+static inline int crash_shutdown_unregister(crash_shutdown_t handler)
+{
+	return 0;
+}
+
+static inline bool kdump_in_progress(void)
+{
+	return false;
+}
+
+static inline void crash_ipi_callback(struct pt_regs *regs) { }
+
+static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
+{
+}
+
+#endif /* CONFIG_KEXEC_CORE */
+#endif /* ! __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
new file mode 100644
index 0000000..debdf54
--- /dev/null
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KEYLARGO_H
+#define _ASM_POWERPC_KEYLARGO_H
+#ifdef __KERNEL__
+/*
+ * keylargo.h: definitions for using the "KeyLargo" I/O controller chip.
+ *
+ */
+
+/* "Pangea" chipset has keylargo device-id 0x25 while core99
+ * has device-id 0x22. The rev. of the pangea one is 0, so we
+ * fake an artificial rev. in keylargo_rev by oring 0x100
+ */
+#define KL_PANGEA_REV		0x100
+
+/* offset from base for feature control registers */
+#define KEYLARGO_MBCR		0x34	/* KL Only, Media bay control/status */
+#define KEYLARGO_FCR0		0x38
+#define KEYLARGO_FCR1		0x3c
+#define KEYLARGO_FCR2		0x40
+#define KEYLARGO_FCR3		0x44
+#define KEYLARGO_FCR4		0x48
+#define KEYLARGO_FCR5		0x4c	/* Pangea only */
+
+/* K2 additional FCRs */
+#define K2_FCR6			0x34
+#define K2_FCR7			0x30
+#define K2_FCR8			0x2c
+#define K2_FCR9			0x28
+#define K2_FCR10		0x24
+
+/* GPIO registers */
+#define KEYLARGO_GPIO_LEVELS0		0x50
+#define KEYLARGO_GPIO_LEVELS1		0x54
+#define KEYLARGO_GPIO_EXTINT_0		0x58
+#define KEYLARGO_GPIO_EXTINT_CNT	18
+#define KEYLARGO_GPIO_0			0x6A
+#define KEYLARGO_GPIO_CNT		17
+#define KEYLARGO_GPIO_EXTINT_DUAL_EDGE	0x80
+#define KEYLARGO_GPIO_OUTPUT_ENABLE	0x04
+#define KEYLARGO_GPIO_OUTOUT_DATA	0x01
+#define KEYLARGO_GPIO_INPUT_DATA	0x02
+
+/* K2 does only extint GPIOs and does 51 of them */
+#define K2_GPIO_EXTINT_0		0x58
+#define K2_GPIO_EXTINT_CNT		51
+
+/* Specific GPIO regs */
+
+#define KL_GPIO_MODEM_RESET		(KEYLARGO_GPIO_0+0x03)
+#define KL_GPIO_MODEM_POWER		(KEYLARGO_GPIO_0+0x02) /* Pangea */
+
+#define KL_GPIO_SOUND_POWER		(KEYLARGO_GPIO_0+0x05)
+
+/* Hrm... this one is only to be used on Pismo. It seems to also
+ * control the timebase enable on other machines. Still to be
+ * experimented... --BenH.
+ */
+#define KL_GPIO_FW_CABLE_POWER		(KEYLARGO_GPIO_0+0x09)
+#define KL_GPIO_TB_ENABLE		(KEYLARGO_GPIO_0+0x09)
+
+#define KL_GPIO_ETH_PHY_RESET		(KEYLARGO_GPIO_0+0x10)
+
+#define KL_GPIO_EXTINT_CPU1		(KEYLARGO_GPIO_0+0x0a)
+#define KL_GPIO_EXTINT_CPU1_ASSERT	0x04
+#define KL_GPIO_EXTINT_CPU1_RELEASE	0x38
+
+#define KL_GPIO_RESET_CPU0		(KEYLARGO_GPIO_EXTINT_0+0x03)
+#define KL_GPIO_RESET_CPU1		(KEYLARGO_GPIO_EXTINT_0+0x04)
+#define KL_GPIO_RESET_CPU2		(KEYLARGO_GPIO_EXTINT_0+0x0f)
+#define KL_GPIO_RESET_CPU3		(KEYLARGO_GPIO_EXTINT_0+0x10)
+
+#define KL_GPIO_PMU_MESSAGE_IRQ		(KEYLARGO_GPIO_EXTINT_0+0x09)
+#define KL_GPIO_PMU_MESSAGE_BIT		KEYLARGO_GPIO_INPUT_DATA
+
+#define KL_GPIO_MEDIABAY_IRQ		(KEYLARGO_GPIO_EXTINT_0+0x0e)
+
+#define KL_GPIO_AIRPORT_0		(KEYLARGO_GPIO_EXTINT_0+0x0a)
+#define KL_GPIO_AIRPORT_1		(KEYLARGO_GPIO_EXTINT_0+0x0d)
+#define KL_GPIO_AIRPORT_2		(KEYLARGO_GPIO_0+0x0d)
+#define KL_GPIO_AIRPORT_3		(KEYLARGO_GPIO_0+0x0e)
+#define KL_GPIO_AIRPORT_4		(KEYLARGO_GPIO_0+0x0f)
+
+/*
+ * Bits in feature control register. Those bits different for K2 are
+ * listed separately
+ */
+#define KL_MBCR_MB0_PCI_ENABLE		0x00000800	/* exist ? */
+#define KL_MBCR_MB0_IDE_ENABLE		0x00001000
+#define KL_MBCR_MB0_FLOPPY_ENABLE	0x00002000	/* exist ? */
+#define KL_MBCR_MB0_SOUND_ENABLE	0x00004000	/* hrm... */
+#define KL_MBCR_MB0_DEV_MASK		0x00007800
+#define KL_MBCR_MB0_DEV_POWER		0x00000400
+#define KL_MBCR_MB0_DEV_RESET		0x00000200
+#define KL_MBCR_MB0_ENABLE		0x00000100
+#define KL_MBCR_MB1_PCI_ENABLE		0x08000000	/* exist ? */
+#define KL_MBCR_MB1_IDE_ENABLE		0x10000000
+#define KL_MBCR_MB1_FLOPPY_ENABLE	0x20000000	/* exist ? */
+#define KL_MBCR_MB1_SOUND_ENABLE	0x40000000	/* hrm... */
+#define KL_MBCR_MB1_DEV_MASK		0x78000000
+#define KL_MBCR_MB1_DEV_POWER		0x04000000
+#define KL_MBCR_MB1_DEV_RESET		0x02000000
+#define KL_MBCR_MB1_ENABLE		0x01000000
+
+#define KL0_SCC_B_INTF_ENABLE		0x00000001	/* (KL Only) */
+#define KL0_SCC_A_INTF_ENABLE		0x00000002
+#define KL0_SCC_SLOWPCLK		0x00000004
+#define KL0_SCC_RESET			0x00000008
+#define KL0_SCCA_ENABLE			0x00000010
+#define KL0_SCCB_ENABLE			0x00000020
+#define KL0_SCC_CELL_ENABLE		0x00000040
+#define KL0_IRDA_HIGH_BAND		0x00000100	/* (KL Only) */
+#define KL0_IRDA_SOURCE2_SEL		0x00000200	/* (KL Only) */
+#define KL0_IRDA_SOURCE1_SEL		0x00000400	/* (KL Only) */
+#define KL0_PG_USB0_PMI_ENABLE		0x00000400	/* (Pangea/Intrepid Only) */
+#define KL0_IRDA_RESET			0x00000800	/* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND_SEL	0x00000800	/* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT1		0x00001000	/* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND		0x00001000	/* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT0		0x00002000	/* (KL Only) */
+#define KL0_PG_USB0_PAD_SUSPEND		0x00002000	/* (Pangea/Intrepid Only) */
+#define KL0_IRDA_FAST_CONNECT		0x00004000	/* (KL Only) */
+#define KL0_PG_USB1_PMI_ENABLE		0x00004000	/* (Pangea/Intrepid Only) */
+#define KL0_IRDA_ENABLE			0x00008000	/* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND_SEL	0x00008000	/* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK32_ENABLE		0x00010000	/* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND		0x00010000	/* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK19_ENABLE		0x00020000	/* (KL Only) */
+#define KL0_PG_USB1_PAD_SUSPEND		0x00020000	/* (Pangea/Intrepid Only) */
+#define KL0_USB0_PAD_SUSPEND0		0x00040000
+#define KL0_USB0_PAD_SUSPEND1		0x00080000
+#define KL0_USB0_CELL_ENABLE		0x00100000
+#define KL0_USB1_PAD_SUSPEND0		0x00400000
+#define KL0_USB1_PAD_SUSPEND1		0x00800000
+#define KL0_USB1_CELL_ENABLE		0x01000000
+#define KL0_USB_REF_SUSPEND		0x10000000	/* (KL Only) */
+
+#define KL0_SERIAL_ENABLE		(KL0_SCC_B_INTF_ENABLE | \
+					KL0_SCC_SLOWPCLK | \
+					KL0_SCC_CELL_ENABLE | KL0_SCCA_ENABLE)
+
+#define KL1_USB2_PMI_ENABLE		0x00000001	/* Intrepid only */
+#define KL1_AUDIO_SEL_22MCLK		0x00000002	/* KL/Pangea only */
+#define KL1_USB2_REF_SUSPEND_SEL	0x00000002	/* Intrepid only */
+#define KL1_USB2_REF_SUSPEND		0x00000004	/* Intrepid only */
+#define KL1_AUDIO_CLK_ENABLE_BIT	0x00000008	/* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND_SEL	0x00000008	/* Intrepid only */
+#define KL1_USB2_PAD_SUSPEND0		0x00000010	/* Intrepid only */
+#define KL1_AUDIO_CLK_OUT_ENABLE	0x00000020	/* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND1		0x00000020	/* Intrepid only */
+#define KL1_AUDIO_CELL_ENABLE		0x00000040	/* KL/Pangea only */
+#define KL1_USB2_CELL_ENABLE		0x00000040	/* Intrepid only */
+#define KL1_AUDIO_CHOOSE		0x00000080	/* KL/Pangea only */
+#define KL1_I2S0_CHOOSE			0x00000200	/* KL Only */
+#define KL1_I2S0_CELL_ENABLE		0x00000400
+#define KL1_I2S0_CLK_ENABLE_BIT		0x00001000
+#define KL1_I2S0_ENABLE			0x00002000
+#define KL1_I2S1_CELL_ENABLE		0x00020000
+#define KL1_I2S1_CLK_ENABLE_BIT		0x00080000
+#define KL1_I2S1_ENABLE			0x00100000
+#define KL1_EIDE0_ENABLE		0x00800000	/* KL/Intrepid Only */
+#define KL1_EIDE0_RESET_N		0x01000000	/* KL/Intrepid Only */
+#define KL1_EIDE1_ENABLE		0x04000000	/* KL Only */
+#define KL1_EIDE1_RESET_N		0x08000000	/* KL Only */
+#define KL1_UIDE_ENABLE			0x20000000	/* KL/Pangea Only */
+#define KL1_UIDE_RESET_N		0x40000000	/* KL/Pangea Only */
+
+#define KL2_IOBUS_ENABLE		0x00000002
+#define KL2_SLEEP_STATE_BIT		0x00000100	/* KL Only */
+#define KL2_PG_STOP_ALL_CLOCKS		0x00000100	/* Pangea Only */
+#define KL2_MPIC_ENABLE			0x00020000
+#define KL2_CARDSLOT_RESET		0x00040000	/* Pangea/Intrepid Only */
+#define KL2_ALT_DATA_OUT		0x02000000	/* KL Only ??? */
+#define KL2_MEM_IS_BIG			0x04000000
+#define KL2_CARDSEL_16			0x08000000
+
+#define KL3_SHUTDOWN_PLL_TOTAL		0x00000001	/* KL/Pangea only */
+#define KL3_SHUTDOWN_PLLKW6		0x00000002	/* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL3		0x00000002	/* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW4		0x00000004	/* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL2		0x00000004	/* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW35		0x00000008	/* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL1		0x00000008	/* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW12		0x00000010	/* KL Only */
+#define KL3_IT_ENABLE_PLL3_SHUTDOWN	0x00000010	/* Intrepid only */
+#define KL3_PLL_RESET			0x00000020	/* KL/Pangea only */
+#define KL3_IT_ENABLE_PLL2_SHUTDOWN	0x00000020	/* Intrepid only */
+#define KL3_IT_ENABLE_PLL1_SHUTDOWN	0x00000010	/* Intrepid only */
+#define KL3_SHUTDOWN_PLL2X		0x00000080	/* KL Only */
+#define KL3_CLK66_ENABLE		0x00000100	/* KL Only */
+#define KL3_CLK49_ENABLE		0x00000200
+#define KL3_CLK45_ENABLE		0x00000400
+#define KL3_CLK31_ENABLE		0x00000800	/* KL/Pangea only */
+#define KL3_TIMER_CLK18_ENABLE		0x00001000
+#define KL3_I2S1_CLK18_ENABLE		0x00002000
+#define KL3_I2S0_CLK18_ENABLE		0x00004000
+#define KL3_VIA_CLK16_ENABLE		0x00008000	/* KL/Pangea only */
+#define KL3_IT_VIA_CLK32_ENABLE		0x00008000	/* Intrepid only */
+#define KL3_STOPPING33_ENABLED		0x00080000	/* KL Only */
+#define KL3_PG_PLL_ENABLE_TEST		0x00080000	/* Pangea Only */
+
+/* Intrepid USB bus 2, port 0,1 */
+#define KL3_IT_PORT_WAKEUP_ENABLE(p)		(0x00080000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_WAKE_EN(p)		(0x00040000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_WAKE_EN(p)		(0x00020000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_WAKE_EN(p)	(0x00010000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_STAT(p)		(0x00300000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_STAT(p)		(0x00200000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_STAT(p)		(0x00100000 << ((p)<<3))
+
+/* Port 0,1 : bus 0, port 2,3 : bus 1 */
+#define KL4_PORT_WAKEUP_ENABLE(p)	(0x00000008 << ((p)<<3))
+#define KL4_PORT_RESUME_WAKE_EN(p)	(0x00000004 << ((p)<<3))
+#define KL4_PORT_CONNECT_WAKE_EN(p)	(0x00000002 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_WAKE_EN(p)	(0x00000001 << ((p)<<3))
+#define KL4_PORT_RESUME_STAT(p)		(0x00000040 << ((p)<<3))
+#define KL4_PORT_CONNECT_STAT(p)	(0x00000020 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_STAT(p)	(0x00000010 << ((p)<<3))
+
+/* Pangea and Intrepid only */
+#define KL5_VIA_USE_CLK31		0000000001	/* Pangea Only */
+#define KL5_SCC_USE_CLK31		0x00000002	/* Pangea Only */
+#define KL5_PWM_CLK32_EN		0x00000004
+#define KL5_CLK3_68_EN			0x00000010
+#define KL5_CLK32_EN			0x00000020
+
+
+/* K2 definitions */
+#define K2_FCR0_USB0_SWRESET		0x00200000
+#define K2_FCR0_USB1_SWRESET		0x02000000
+#define K2_FCR0_RING_PME_DISABLE	0x08000000
+
+#define K2_FCR1_PCI1_BUS_RESET_N	0x00000010
+#define K2_FCR1_PCI1_SLEEP_RESET_EN	0x00000020
+#define K2_FCR1_I2S0_CELL_ENABLE	0x00000400
+#define K2_FCR1_I2S0_RESET		0x00000800
+#define K2_FCR1_I2S0_CLK_ENABLE_BIT	0x00001000
+#define K2_FCR1_I2S0_ENABLE    		0x00002000
+#define K2_FCR1_PCI1_CLK_ENABLE		0x00004000
+#define K2_FCR1_FW_CLK_ENABLE		0x00008000
+#define K2_FCR1_FW_RESET_N		0x00010000
+#define K2_FCR1_I2S1_CELL_ENABLE	0x00020000
+#define K2_FCR1_I2S1_CLK_ENABLE_BIT	0x00080000
+#define K2_FCR1_I2S1_ENABLE		0x00100000
+#define K2_FCR1_GMAC_CLK_ENABLE		0x00400000
+#define K2_FCR1_GMAC_POWER_DOWN		0x00800000
+#define K2_FCR1_GMAC_RESET_N		0x01000000
+#define K2_FCR1_SATA_CLK_ENABLE		0x02000000
+#define K2_FCR1_SATA_POWER_DOWN		0x04000000
+#define K2_FCR1_SATA_RESET_N		0x08000000
+#define K2_FCR1_UATA_CLK_ENABLE		0x10000000
+#define K2_FCR1_UATA_RESET_N		0x40000000
+#define K2_FCR1_UATA_CHOOSE_CLK66	0x80000000
+
+/* Shasta definitions */
+#define SH_FCR1_I2S2_CELL_ENABLE	0x00000010
+#define SH_FCR1_I2S2_CLK_ENABLE_BIT	0x00000040
+#define SH_FCR1_I2S2_ENABLE		0x00000080
+#define SH_FCR3_I2S2_CLK18_ENABLE	0x00008000
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KEYLARGO_H */
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
new file mode 100644
index 0000000..9db24e7
--- /dev/null
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -0,0 +1,64 @@
+/*
+ * The PowerPC (32/64) specific defines / externs for KGDB.  Based on
+ * the previous 32bit and 64bit specific files, which had the following
+ * copyrights:
+ *
+ * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
+ * PPC Mods (C) 2004 Tom Rini (trini@mvista.com)
+ * PPC Mods (C) 2003 John Whitney (john.whitney@timesys.com)
+ * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu)
+ *
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Author: Tom Rini <trini@kernel.crashing.org>
+ *
+ * 2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifdef __KERNEL__
+#ifndef __POWERPC_KGDB_H__
+#define __POWERPC_KGDB_H__
+
+#ifndef __ASSEMBLY__
+
+#define BREAK_INSTR_SIZE	4
+#define BUFMAX			((NUMREGBYTES * 2) + 512)
+#define OUTBUFMAX		((NUMREGBYTES * 2) + 512)
+static inline void arch_kgdb_breakpoint(void)
+{
+	asm(".long 0x7d821008"); /* twge r2, r2 */
+}
+#define CACHE_FLUSH_IS_SAFE	1
+#define DBG_MAX_REG_NUM     70
+
+/* The number bytes of registers we have to save depends on a few
+ * things.  For 64bit we default to not including vector registers and
+ * vector state registers. */
+#ifdef CONFIG_PPC64
+/*
+ * 64 bit (8 byte) registers:
+ *   32 gpr, 32 fpr, nip, msr, link, ctr
+ * 32 bit (4 byte) registers:
+ *   ccr, xer, fpscr
+ */
+#define NUMREGBYTES		((68 * 8) + (3 * 4))
+#define NUMCRITREGBYTES		184
+#else /* CONFIG_PPC32 */
+/* On non-E500 family PPC32 we determine the size by picking the last
+ * register we need, but on E500 we skip sections so we list what we
+ * need to store, and add it up. */
+#ifndef CONFIG_E500
+#define MAXREG			(PT_FPSCR+1)
+#else
+/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/
+#define MAXREG                 ((32*2)+6+2+1)
+#endif
+#define NUMREGBYTES		(MAXREG * sizeof(int))
+/* CR/LR, R1, R2, R13-R31 inclusive. */
+#define NUMCRITREGBYTES		(23 * sizeof(int))
+#endif /* 32/64 */
+#endif /* !(__ASSEMBLY__) */
+#endif /* !__POWERPC_KGDB_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
new file mode 100644
index 0000000..5acabbd
--- /dev/null
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_POWERPC_KMAP_TYPES_H
+#define _ASM_POWERPC_KMAP_TYPES_H
+
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define KM_TYPE_NR 16
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
new file mode 100644
index 0000000..785c464
--- /dev/null
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -0,0 +1,110 @@
+#ifndef _ASM_POWERPC_KPROBES_H
+#define _ASM_POWERPC_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef __KERNEL__
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *		Probes initial implementation ( includes suggestions from
+ *		Rusty Russell).
+ * 2004-Nov	Modified for PPC64 by Ananth N Mavinakayanahalli
+ *		<ananth@in.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+#include <linux/module.h>
+#include <asm/probes.h>
+#include <asm/code-patching.h>
+
+#ifdef CONFIG_KPROBES
+#define  __ARCH_WANT_KPROBES_INSN_SLOT
+
+struct pt_regs;
+struct kprobe;
+
+typedef ppc_opcode_t kprobe_opcode_t;
+
+extern kprobe_opcode_t optinsn_slot;
+
+/* Optinsn template address */
+extern kprobe_opcode_t optprobe_template_entry[];
+extern kprobe_opcode_t optprobe_template_op_address[];
+extern kprobe_opcode_t optprobe_template_call_handler[];
+extern kprobe_opcode_t optprobe_template_insn[];
+extern kprobe_opcode_t optprobe_template_call_emulate[];
+extern kprobe_opcode_t optprobe_template_ret[];
+extern kprobe_opcode_t optprobe_template_end[];
+
+/* Fixed instruction size for powerpc */
+#define MAX_INSN_SIZE		1
+#define MAX_OPTIMIZED_LENGTH	sizeof(kprobe_opcode_t)	/* 4 bytes */
+#define MAX_OPTINSN_SIZE	(optprobe_template_end - optprobe_template_entry)
+#define RELATIVEJUMP_SIZE	sizeof(kprobe_opcode_t)	/* 4 bytes */
+
+#define flush_insn_slot(p)	do { } while (0)
+#define kretprobe_blacklist_size 0
+
+void kretprobe_trampoline(void);
+extern void arch_remove_kprobe(struct kprobe *p);
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+	/* copy of original instruction */
+	kprobe_opcode_t *insn;
+	/*
+	 * Set in kprobes code, initially to 0. If the instruction can be
+	 * eumulated, this is set to 1, if not, to -1.
+	 */
+	int boostable;
+};
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_msr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_msr;
+	struct prev_kprobe prev_kprobe;
+};
+
+struct arch_optimized_insn {
+	kprobe_opcode_t copied_insn[1];
+	/* detour buffer */
+	kprobe_opcode_t *insn;
+};
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+					unsigned long val, void *data);
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_handler(struct pt_regs *regs);
+extern int kprobe_post_handler(struct pt_regs *regs);
+#else
+static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
+static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_KPROBES */
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_KPROBES_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
new file mode 100644
index 0000000..a790d5c
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -0,0 +1,166 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_ASM_H__
+#define __POWERPC_KVM_ASM_H__
+
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg)  std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg)   ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg)  stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg)   lwz treg, (offset+4)(areg)
+#endif
+#endif
+
+/* IVPR must be 64KiB-aligned. */
+#define VCPU_SIZE_ORDER 4
+#define VCPU_SIZE_LOG   (VCPU_SIZE_ORDER + 12)
+#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
+
+#define BOOKE_INTERRUPT_CRITICAL 0
+#define BOOKE_INTERRUPT_MACHINE_CHECK 1
+#define BOOKE_INTERRUPT_DATA_STORAGE 2
+#define BOOKE_INTERRUPT_INST_STORAGE 3
+#define BOOKE_INTERRUPT_EXTERNAL 4
+#define BOOKE_INTERRUPT_ALIGNMENT 5
+#define BOOKE_INTERRUPT_PROGRAM 6
+#define BOOKE_INTERRUPT_FP_UNAVAIL 7
+#define BOOKE_INTERRUPT_SYSCALL 8
+#define BOOKE_INTERRUPT_AP_UNAVAIL 9
+#define BOOKE_INTERRUPT_DECREMENTER 10
+#define BOOKE_INTERRUPT_FIT 11
+#define BOOKE_INTERRUPT_WATCHDOG 12
+#define BOOKE_INTERRUPT_DTLB_MISS 13
+#define BOOKE_INTERRUPT_ITLB_MISS 14
+#define BOOKE_INTERRUPT_DEBUG 15
+
+/* E500 */
+#ifdef CONFIG_SPE_POSSIBLE
+#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
+#endif
+
+#ifdef CONFIG_PPC_E500MC
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 33
+#endif
+
+#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
+#define BOOKE_INTERRUPT_DOORBELL 36
+#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
+
+/* booke_hv */
+#define BOOKE_INTERRUPT_GUEST_DBELL 38
+#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
+#define BOOKE_INTERRUPT_HV_SYSCALL 40
+#define BOOKE_INTERRUPT_HV_PRIV 41
+#define BOOKE_INTERRUPT_LRAT_ERROR 42
+
+/* book3s */
+
+#define BOOK3S_INTERRUPT_SYSTEM_RESET	0x100
+#define BOOK3S_INTERRUPT_MACHINE_CHECK	0x200
+#define BOOK3S_INTERRUPT_DATA_STORAGE	0x300
+#define BOOK3S_INTERRUPT_DATA_SEGMENT	0x380
+#define BOOK3S_INTERRUPT_INST_STORAGE	0x400
+#define BOOK3S_INTERRUPT_INST_SEGMENT	0x480
+#define BOOK3S_INTERRUPT_EXTERNAL	0x500
+#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL	0x501
+#define BOOK3S_INTERRUPT_EXTERNAL_HV	0x502
+#define BOOK3S_INTERRUPT_ALIGNMENT	0x600
+#define BOOK3S_INTERRUPT_PROGRAM	0x700
+#define BOOK3S_INTERRUPT_FP_UNAVAIL	0x800
+#define BOOK3S_INTERRUPT_DECREMENTER	0x900
+#define BOOK3S_INTERRUPT_HV_DECREMENTER	0x980
+#define BOOK3S_INTERRUPT_DOORBELL	0xa00
+#define BOOK3S_INTERRUPT_SYSCALL	0xc00
+#define BOOK3S_INTERRUPT_TRACE		0xd00
+#define BOOK3S_INTERRUPT_H_DATA_STORAGE	0xe00
+#define BOOK3S_INTERRUPT_H_INST_STORAGE	0xe20
+#define BOOK3S_INTERRUPT_H_EMUL_ASSIST	0xe40
+#define BOOK3S_INTERRUPT_HMI		0xe60
+#define BOOK3S_INTERRUPT_H_DOORBELL	0xe80
+#define BOOK3S_INTERRUPT_H_VIRT		0xea0
+#define BOOK3S_INTERRUPT_PERFMON	0xf00
+#define BOOK3S_INTERRUPT_ALTIVEC	0xf20
+#define BOOK3S_INTERRUPT_VSX		0xf40
+#define BOOK3S_INTERRUPT_FAC_UNAVAIL	0xf60
+#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL	0xf80
+
+/* book3s_hv */
+
+#define BOOK3S_INTERRUPT_HV_SOFTPATCH	0x1500
+
+/*
+ * Special trap used to indicate to host that this is a
+ * passthrough interrupt that could not be handled
+ * completely in the guest.
+ */
+#define BOOK3S_INTERRUPT_HV_RM_HARD	0x5555
+
+#define BOOK3S_IRQPRIO_SYSTEM_RESET		0
+#define BOOK3S_IRQPRIO_DATA_SEGMENT		1
+#define BOOK3S_IRQPRIO_INST_SEGMENT		2
+#define BOOK3S_IRQPRIO_DATA_STORAGE		3
+#define BOOK3S_IRQPRIO_INST_STORAGE		4
+#define BOOK3S_IRQPRIO_ALIGNMENT		5
+#define BOOK3S_IRQPRIO_PROGRAM			6
+#define BOOK3S_IRQPRIO_FP_UNAVAIL		7
+#define BOOK3S_IRQPRIO_ALTIVEC			8
+#define BOOK3S_IRQPRIO_VSX			9
+#define BOOK3S_IRQPRIO_FAC_UNAVAIL		10
+#define BOOK3S_IRQPRIO_SYSCALL			11
+#define BOOK3S_IRQPRIO_MACHINE_CHECK		12
+#define BOOK3S_IRQPRIO_DEBUG			13
+#define BOOK3S_IRQPRIO_EXTERNAL			14
+#define BOOK3S_IRQPRIO_DECREMENTER		15
+#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR	16
+#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL		17
+#define BOOK3S_IRQPRIO_MAX			18
+
+#define BOOK3S_HFLAG_DCBZ32			0x1
+#define BOOK3S_HFLAG_SLB			0x2
+#define BOOK3S_HFLAG_PAIRED_SINGLE		0x4
+#define BOOK3S_HFLAG_NATIVE_PS			0x8
+#define BOOK3S_HFLAG_MULTI_PGSIZE		0x10
+#define BOOK3S_HFLAG_NEW_TLBIE			0x20
+#define BOOK3S_HFLAG_SPLIT_HACK			0x40
+
+#define RESUME_FLAG_NV          (1<<0)  /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+#define RESUME_FLAG_ARCH1	(1<<2)
+#define RESUME_FLAG_ARCH2	(1<<3)
+
+#define RESUME_GUEST            0
+#define RESUME_GUEST_NV         RESUME_FLAG_NV
+#define RESUME_HOST             RESUME_FLAG_HOST
+#define RESUME_HOST_NV          (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+
+#define KVM_GUEST_MODE_NONE	0
+#define KVM_GUEST_MODE_GUEST	1
+#define KVM_GUEST_MODE_SKIP	2
+#define KVM_GUEST_MODE_GUEST_HV	3
+#define KVM_GUEST_MODE_HOST_HV	4
+
+#define KVM_INST_FETCH_FAILED	-1
+
+#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
new file mode 100644
index 0000000..83a9aa3
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -0,0 +1,440 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_H__
+#define __ASM_KVM_BOOK3S_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_book3s_asm.h>
+
+struct kvmppc_bat {
+	u64 raw;
+	u32 bepi;
+	u32 bepi_mask;
+	u32 brpn;
+	u8 wimg;
+	u8 pp;
+	bool vs		: 1;
+	bool vp		: 1;
+};
+
+struct kvmppc_sid_map {
+	u64 guest_vsid;
+	u64 guest_esid;
+	u64 host_vsid;
+	bool valid	: 1;
+};
+
+#define SID_MAP_BITS    9
+#define SID_MAP_NUM     (1 << SID_MAP_BITS)
+#define SID_MAP_MASK    (SID_MAP_NUM - 1)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define SID_CONTEXTS	1
+#else
+#define SID_CONTEXTS	128
+#define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
+#endif
+
+struct hpte_cache {
+	struct hlist_node list_pte;
+	struct hlist_node list_pte_long;
+	struct hlist_node list_vpte;
+	struct hlist_node list_vpte_long;
+#ifdef CONFIG_PPC_BOOK3S_64
+	struct hlist_node list_vpte_64k;
+#endif
+	struct rcu_head rcu_head;
+	u64 host_vpn;
+	u64 pfn;
+	ulong slot;
+	struct kvmppc_pte pte;
+	int pagesize;
+};
+
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_map combines a bitmap of threads that have entered
+ * in the bottom 8 bits and a bitmap of threads that have exited in the
+ * next 8 bits.  This is so that we can atomically set the entry bit
+ * iff the exit map is 0 without taking a lock.
+ */
+struct kvmppc_vcore {
+	int n_runnable;
+	int num_threads;
+	int entry_exit_map;
+	int napping_threads;
+	int first_vcpuid;
+	u16 pcpu;
+	u16 last_cpu;
+	u8 vcore_state;
+	u8 in_guest;
+	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
+	struct list_head preempt_list;
+	spinlock_t lock;
+	struct swait_queue_head wq;
+	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
+	u64 stolen_tb;
+	u64 preempt_tb;
+	struct kvm_vcpu *runner;
+	struct kvm *kvm;
+	u64 tb_offset;		/* guest timebase - host timebase */
+	u64 tb_offset_applied;	/* timebase offset currently in force */
+	ulong lpcr;
+	u32 arch_compat;
+	ulong pcr;
+	ulong dpdes;		/* doorbell state (POWER8) */
+	ulong vtb;		/* virtual timebase */
+	ulong conferring_threads;
+	unsigned int halt_poll_ns;
+	atomic_t online_count;
+};
+
+struct kvmppc_vcpu_book3s {
+	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
+	struct {
+		u64 esid;
+		u64 vsid;
+	} slb_shadow[64];
+	u8 slb_shadow_max;
+	struct kvmppc_bat ibat[8];
+	struct kvmppc_bat dbat[8];
+	u64 hid[6];
+	u64 gqr[8];
+	u64 sdr1;
+	u64 hior;
+	u64 msr_mask;
+	u64 vtb;
+#ifdef CONFIG_PPC_BOOK3S_32
+	u32 vsid_pool[VSID_POOL_SIZE];
+	u32 vsid_next;
+#else
+	u64 proto_vsid_first;
+	u64 proto_vsid_max;
+	u64 proto_vsid_next;
+#endif
+	int context_id[SID_CONTEXTS];
+
+	bool hior_explicit;		/* HIOR is set by ioctl, not PVR */
+
+	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
+	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
+	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
+	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+#ifdef CONFIG_PPC_BOOK3S_64
+	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
+#endif
+	int hpte_cache_count;
+	spinlock_t mmu_lock;
+};
+
+#define VSID_REAL	0x07ffffffffc00000ULL
+#define VSID_BAT	0x07ffffffffb00000ULL
+#define VSID_64K	0x0800000000000000ULL
+#define VSID_1T		0x1000000000000000ULL
+#define VSID_REAL_DR	0x2000000000000000ULL
+#define VSID_REAL_IR	0x4000000000000000ULL
+#define VSID_PR		0x8000000000000000ULL
+
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
+extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
+extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
+extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
+			       bool iswrite);
+extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
+extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
+extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
+extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
+			struct kvm_vcpu *vcpu, unsigned long addr,
+			unsigned long status);
+extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
+			unsigned long slb_v, unsigned long valid);
+extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+			unsigned long gpa, gva_t ea, int is_store);
+
+extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
+extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
+extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
+extern int kvmppc_mmu_hpte_sysinit(void);
+extern void kvmppc_mmu_hpte_sysexit(void);
+extern int kvmppc_mmu_hv_init(void);
+extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
+
+extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
+			struct kvm_vcpu *vcpu,
+			unsigned long ea, unsigned long dsisr);
+extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+			struct kvmppc_pte *gpte, bool data, bool iswrite);
+extern int kvmppc_init_vm_radix(struct kvm *kvm);
+extern void kvmppc_free_radix(struct kvm *kvm);
+extern int kvmppc_radix_init(void);
+extern void kvmppc_radix_exit(void);
+extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			unsigned long gfn);
+extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			unsigned long gfn);
+extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			unsigned long gfn);
+extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
+			struct kvm_memory_slot *memslot, unsigned long *map);
+extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
+
+/* XXX remove this export when load_last_inst() is generic */
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
+extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+					  unsigned int vec);
+extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
+extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
+extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
+			   bool upper, u32 val);
+extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
+extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
+			bool writing, bool *writable);
+extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
+			unsigned long *rmap, long pte_index, int realmode);
+extern void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
+			unsigned long gfn, unsigned long psize);
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
+			unsigned long pte_index);
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
+			unsigned long pte_index);
+extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
+			unsigned long *nb_ret);
+extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
+			unsigned long gpa, bool dirty);
+extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
+			long pte_index, unsigned long pteh, unsigned long ptel,
+			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
+extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
+			unsigned long pte_index, unsigned long avpn,
+			unsigned long *hpret);
+extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
+			struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
+			struct kvm_memory_slot *memslot,
+			unsigned long *map);
+extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
+			unsigned long mask);
+extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
+
+extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
+extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
+extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
+
+extern void kvmppc_entry_trampoline(void);
+extern void kvmppc_hv_entry_trampoline(void);
+extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
+extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
+extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
+extern int kvmppc_hcall_impl_pr(unsigned long cmd);
+extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
+#else
+static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
+#endif
+
+void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
+extern int kvm_irq_bypass;
+
+static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.book3s;
+}
+
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+#include <asm/kvm_book3s_32.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64.h>
+#endif
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+	vcpu->arch.regs.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+	return vcpu->arch.regs.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+	vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.xer = val;
+}
+
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.link = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.link;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.nip = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.nip;
+}
+
+static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.fault_dar;
+}
+
+static inline bool is_kvmppc_resume_guest(int r)
+{
+	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
+}
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
+static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
+{
+	/* Only PR KVM supports the magic page */
+	return !is_kvmppc_hv_enabled(vcpu->kvm);
+}
+
+extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
+extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
+
+/* Magic register values loaded into r3 and r4 before the 'sc' assembly
+ * instruction for the OSI hypercalls */
+#define OSI_SC_MAGIC_R3			0x113724FA
+#define OSI_SC_MAGIC_R4			0x77810F9B
+
+#define INS_DCBZ			0x7c0007ec
+/* TO = 31 for unconditional trap */
+#define INS_TW				0x7fe00008
+
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS			(LPID_RSVD + 1)
+
+#define SPLIT_HACK_MASK			0xff000000
+#define SPLIT_HACK_OFFS			0xfb000000
+
+/*
+ * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
+ * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
+ * (but not its actual threading mode, which is not available) to avoid
+ * collisions.
+ *
+ * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
+ * 0) unchanged: if the guest is filling each VCORE completely then it will be
+ * using consecutive IDs and it will fill the space without any packing.
+ *
+ * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
+ * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
+ * added to avoid collisions.
+ *
+ * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
+ * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
+ * can be safely packed into the second half of each VCORE by adding an offset
+ * of (stride / 2).
+ *
+ * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
+ * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
+ * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
+ *
+ * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
+ * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
+ * must be free to use.
+ *
+ * (The offsets for each block are stored in block_offsets[], indexed by the
+ * block number if the stride is 8. For cases where the guest's stride is less
+ * than 8, we can re-use the block_offsets array by multiplying the block
+ * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
+ */
+static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
+{
+	const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
+	int stride = kvm->arch.emul_smt_mode;
+	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
+	u32 packed_id;
+
+	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
+		return 0;
+	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
+	if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
+		return 0;
+	return packed_id;
+}
+
+#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
new file mode 100644
index 0000000..c720e0b
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -0,0 +1,47 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_32_H__
+#define __ASM_KVM_BOOK3S_32_H__
+
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.shadow_vcpu;
+}
+
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+}
+
+#define PTE_SIZE	12
+#define VSID_ALL	0
+#define SR_INVALID	0x00000001	/* VSID 1 should always be unused */
+#define SR_KP		0x20000000
+#define PTE_V		0x80000000
+#define PTE_SEC		0x00000040
+#define PTE_M		0x00000010
+#define PTE_R		0x00000100
+#define PTE_C		0x00000080
+
+#define SID_SHIFT	28
+#define ESID_MASK	0xf0000000
+#define VSID_MASK	0x00fffffff0000000ULL
+#define VPN_SHIFT	12
+
+#endif /* __ASM_KVM_BOOK3S_32_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
new file mode 100644
index 0000000..dc435a5
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -0,0 +1,520 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_64_H__
+#define __ASM_KVM_BOOK3S_64_H__
+
+#include <linux/string.h>
+#include <asm/bitops.h>
+#include <asm/book3s/64/mmu-hash.h>
+
+/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
+#define PPC_MIN_HPT_ORDER	18
+#define PPC_MAX_HPT_ORDER	46
+
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
+{
+	preempt_disable();
+	return &get_paca()->shadow_vcpu;
+}
+
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+	preempt_enable();
+}
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+static inline bool kvm_is_radix(struct kvm *kvm)
+{
+	return kvm->arch.radix;
+}
+
+#define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
+#endif
+
+/*
+ * We use a lock bit in HPTE dword 0 to synchronize updates and
+ * accesses to each HPTE, and another bit to indicate non-present
+ * HPTEs.
+ */
+#define HPTE_V_HVLOCK	0x40UL
+#define HPTE_V_ABSENT	0x20UL
+
+/*
+ * We use this bit in the guest_rpte field of the revmap entry
+ * to indicate a modified HPTE.
+ */
+#define HPTE_GR_MODIFIED	(1ul << 62)
+
+/* These bits are reserved in the guest view of the HPTE */
+#define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
+
+static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
+{
+	unsigned long tmp, old;
+	__be64 be_lockbit, be_bits;
+
+	/*
+	 * We load/store in native endian, but the HTAB is in big endian. If
+	 * we byte swap all data we apply on the PTE we're implicitly correct
+	 * again.
+	 */
+	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
+	be_bits = cpu_to_be64(bits);
+
+	asm volatile("	ldarx	%0,0,%2\n"
+		     "	and.	%1,%0,%3\n"
+		     "	bne	2f\n"
+		     "	or	%0,%0,%4\n"
+		     "  stdcx.	%0,0,%2\n"
+		     "	beq+	2f\n"
+		     "	mr	%1,%3\n"
+		     "2:	isync"
+		     : "=&r" (tmp), "=&r" (old)
+		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
+		     : "cc", "memory");
+	return old == 0;
+}
+
+static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+{
+	hpte_v &= ~HPTE_V_HVLOCK;
+	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+	hpte[0] = cpu_to_be64(hpte_v);
+}
+
+/* Without barrier */
+static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+{
+	hpte_v &= ~HPTE_V_HVLOCK;
+	hpte[0] = cpu_to_be64(hpte_v);
+}
+
+/*
+ * These functions encode knowledge of the POWER7/8/9 hardware
+ * interpretations of the HPTE LP (large page size) field.
+ */
+static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
+{
+	unsigned int lphi;
+
+	if (!(h & HPTE_V_LARGE))
+		return 12;	/* 4kB */
+	lphi = (l >> 16) & 0xf;
+	switch ((l >> 12) & 0xf) {
+	case 0:
+		return !lphi ? 24 : 0;		/* 16MB */
+		break;
+	case 1:
+		return 16;			/* 64kB */
+		break;
+	case 3:
+		return !lphi ? 34 : 0;		/* 16GB */
+		break;
+	case 7:
+		return (16 << 8) + 12;		/* 64kB in 4kB */
+		break;
+	case 8:
+		if (!lphi)
+			return (24 << 8) + 16;	/* 16MB in 64kkB */
+		if (lphi == 3)
+			return (24 << 8) + 12;	/* 16MB in 4kB */
+		break;
+	}
+	return 0;
+}
+
+static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
+{
+	return kvmppc_hpte_page_shifts(h, l) & 0xff;
+}
+
+static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
+{
+	int tmp = kvmppc_hpte_page_shifts(h, l);
+
+	if (tmp >= 0x100)
+		tmp >>= 8;
+	return tmp;
+}
+
+static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
+{
+	int shift = kvmppc_hpte_actual_page_shift(v, r);
+
+	if (shift)
+		return 1ul << shift;
+	return 0;
+}
+
+static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
+{
+	switch (base_shift) {
+	case 12:
+		switch (actual_shift) {
+		case 12:
+			return 0;
+		case 16:
+			return 7;
+		case 24:
+			return 0x38;
+		}
+		break;
+	case 16:
+		switch (actual_shift) {
+		case 16:
+			return 1;
+		case 24:
+			return 8;
+		}
+		break;
+	case 24:
+		return 0;
+	}
+	return -1;
+}
+
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+					     unsigned long pte_index)
+{
+	int a_pgshift, b_pgshift;
+	unsigned long rb = 0, va_low, sllp;
+
+	b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
+	if (a_pgshift >= 0x100) {
+		b_pgshift &= 0xff;
+		a_pgshift >>= 8;
+	}
+
+	/*
+	 * Ignore the top 14 bits of va
+	 * v have top two bits covering segment size, hence move
+	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
+	 * AVA field in v also have the lower 23 bits ignored.
+	 * For base page size 4K we need 14 .. 65 bits (so need to
+	 * collect extra 11 bits)
+	 * For others we need 14..14+i
+	 */
+	/* This covers 14..54 bits of va*/
+	rb = (v & ~0x7fUL) << 16;		/* AVA field */
+
+	/*
+	 * AVA in v had cleared lower 23 bits. We need to derive
+	 * that from pteg index
+	 */
+	va_low = pte_index >> 3;
+	if (v & HPTE_V_SECONDARY)
+		va_low = ~va_low;
+	/*
+	 * get the vpn bits from va_low using reverse of hashing.
+	 * In v we have va with 23 bits dropped and then left shifted
+	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
+	 * right shift it with (SID_SHIFT - (23 - 7))
+	 */
+	if (!(v & HPTE_V_1TB_SEG))
+		va_low ^= v >> (SID_SHIFT - 16);
+	else
+		va_low ^= v >> (SID_SHIFT_1T - 16);
+	va_low &= 0x7ff;
+
+	if (b_pgshift <= 12) {
+		if (a_pgshift > 12) {
+			sllp = (a_pgshift == 16) ? 5 : 4;
+			rb |= sllp << 5;	/*  AP field */
+		}
+		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
+	} else {
+		int aval_shift;
+		/*
+		 * remaining bits of AVA/LP fields
+		 * Also contain the rr bits of LP
+		 */
+		rb |= (va_low << b_pgshift) & 0x7ff000;
+		/*
+		 * Now clear not needed LP bits based on actual psize
+		 */
+		rb &= ~((1ul << a_pgshift) - 1);
+		/*
+		 * AVAL field 58..77 - base_page_shift bits of va
+		 * we have space for 58..64 bits, Missing bits should
+		 * be zero filled. +1 is to take care of L bit shift
+		 */
+		aval_shift = 64 - (77 - b_pgshift) + 1;
+		rb |= ((va_low << aval_shift) & 0xfe);
+
+		rb |= 1;		/* L field */
+		rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
+	}
+	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/* B field */
+	return rb;
+}
+
+static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
+{
+	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
+}
+
+static inline int hpte_is_writable(unsigned long ptel)
+{
+	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
+
+	return pp != PP_RXRX && pp != PP_RXXX;
+}
+
+static inline unsigned long hpte_make_readonly(unsigned long ptel)
+{
+	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
+		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
+	else
+		ptel |= PP_RXRX;
+	return ptel;
+}
+
+static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
+{
+	unsigned int wimg = hptel & HPTE_R_WIMG;
+
+	/* Handle SAO */
+	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
+	    cpu_has_feature(CPU_FTR_ARCH_206))
+		wimg = HPTE_R_M;
+
+	if (!is_ci)
+		return wimg == HPTE_R_M;
+	/*
+	 * if host is mapped cache inhibited, make sure hptel also have
+	 * cache inhibited.
+	 */
+	if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
+		return false;
+	return !!(wimg & HPTE_R_I);
+}
+
+/*
+ * If it's present and writable, atomically set dirty and referenced bits and
+ * return the PTE, otherwise return 0.
+ */
+static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
+{
+	pte_t old_pte, new_pte = __pte(0);
+
+	while (1) {
+		/*
+		 * Make sure we don't reload from ptep
+		 */
+		old_pte = READ_ONCE(*ptep);
+		/*
+		 * wait until H_PAGE_BUSY is clear then set it atomically
+		 */
+		if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
+			cpu_relax();
+			continue;
+		}
+		/* If pte is not present return None */
+		if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
+			return __pte(0);
+
+		new_pte = pte_mkyoung(old_pte);
+		if (writing && pte_write(old_pte))
+			new_pte = pte_mkdirty(new_pte);
+
+		if (pte_xchg(ptep, old_pte, new_pte))
+			break;
+	}
+	return new_pte;
+}
+
+static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
+{
+	if (key)
+		return PP_RWRX <= pp && pp <= PP_RXRX;
+	return true;
+}
+
+static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
+{
+	if (key)
+		return pp == PP_RWRW;
+	return pp <= PP_RWRW;
+}
+
+static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
+{
+	unsigned long skey;
+
+	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
+		((hpte_r & HPTE_R_KEY_LO) >> 9);
+	return (amr >> (62 - 2 * skey)) & 3;
+}
+
+static inline void lock_rmap(unsigned long *rmap)
+{
+	do {
+		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
+			cpu_relax();
+	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
+}
+
+static inline void unlock_rmap(unsigned long *rmap)
+{
+	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
+}
+
+static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
+				   unsigned long pagesize)
+{
+	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
+
+	if (pagesize <= PAGE_SIZE)
+		return true;
+	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
+}
+
+/*
+ * This works for 4k, 64k and 16M pages on POWER7,
+ * and 4k and 16M pages on PPC970.
+ */
+static inline unsigned long slb_pgsize_encoding(unsigned long psize)
+{
+	unsigned long senc = 0;
+
+	if (psize > 0x1000) {
+		senc = SLB_VSID_L;
+		if (psize == 0x10000)
+			senc |= SLB_VSID_LP_01;
+	}
+	return senc;
+}
+
+static inline int is_vrma_hpte(unsigned long hpte_v)
+{
+	return (hpte_v & ~0xffffffUL) ==
+		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * Note modification of an HPTE; set the HPTE modified bit
+ * if anyone is interested.
+ */
+static inline void note_hpte_modification(struct kvm *kvm,
+					  struct revmap_entry *rev)
+{
+	if (atomic_read(&kvm->arch.hpte_mod_interest))
+		rev->guest_rpte |= HPTE_GR_MODIFIED;
+}
+
+/*
+ * Like kvm_memslots(), but for use in real mode when we can't do
+ * any RCU stuff (since the secondary threads are offline from the
+ * kernel's point of view), and we can't print anything.
+ * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
+ */
+static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
+{
+	return rcu_dereference_raw_notrace(kvm->memslots[0]);
+}
+
+extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
+
+extern void kvmhv_rm_send_ipi(int cpu);
+
+static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
+{
+	/* HPTEs are 2**4 bytes long */
+	return 1UL << (hpt->order - 4);
+}
+
+static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
+{
+	/* 128 (2**7) bytes in each HPTEG */
+	return (1UL << (hpt->order - 7)) - 1;
+}
+
+/* Set bits in a dirty bitmap, which is in LE format */
+static inline void set_dirty_bits(unsigned long *map, unsigned long i,
+				  unsigned long npages)
+{
+
+	if (npages >= 8)
+		memset((char *)map + i / 8, 0xff, npages / 8);
+	else
+		for (; npages; ++i, --npages)
+			__set_bit_le(i, map);
+}
+
+static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
+					 unsigned long npages)
+{
+	if (npages >= 8)
+		memset((char *)map + i / 8, 0xff, npages / 8);
+	else
+		for (; npages; ++i, --npages)
+			set_bit_le(i, map);
+}
+
+static inline u64 sanitize_msr(u64 msr)
+{
+	msr &= ~MSR_HV;
+	msr |= MSR_ME;
+	return msr;
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.cr  = vcpu->arch.cr_tm;
+	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
+	vcpu->arch.regs.link  = vcpu->arch.lr_tm;
+	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
+	vcpu->arch.amr = vcpu->arch.amr_tm;
+	vcpu->arch.ppr = vcpu->arch.ppr_tm;
+	vcpu->arch.dscr = vcpu->arch.dscr_tm;
+	vcpu->arch.tar = vcpu->arch.tar_tm;
+	memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
+	       sizeof(vcpu->arch.regs.gpr));
+	vcpu->arch.fp  = vcpu->arch.fp_tm;
+	vcpu->arch.vr  = vcpu->arch.vr_tm;
+	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
+}
+
+static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.cr_tm  = vcpu->arch.cr;
+	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
+	vcpu->arch.lr_tm  = vcpu->arch.regs.link;
+	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
+	vcpu->arch.amr_tm = vcpu->arch.amr;
+	vcpu->arch.ppr_tm = vcpu->arch.ppr;
+	vcpu->arch.dscr_tm = vcpu->arch.dscr;
+	vcpu->arch.tar_tm = vcpu->arch.tar;
+	memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
+	       sizeof(vcpu->arch.regs.gpr));
+	vcpu->arch.fp_tm  = vcpu->arch.fp;
+	vcpu->arch.vr_tm  = vcpu->arch.vr;
+	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
+#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
new file mode 100644
index 0000000..d978fdf
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -0,0 +1,182 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_ASM_H__
+#define __ASM_KVM_BOOK3S_ASM_H__
+
+/* XICS ICP register offsets */
+#define XICS_XIRR		4
+#define XICS_MFRR		0xc
+#define XICS_IPI		2	/* interrupt source # for IPIs */
+
+/* Maximum number of threads per physical core */
+#define MAX_SMT_THREADS		8
+
+/* Maximum number of subcores per physical core */
+#define MAX_SUBCORES		4
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+
+#include <asm/kvm_asm.h>
+
+.macro DO_KVM intno
+	.if (\intno == BOOK3S_INTERRUPT_SYSTEM_RESET) || \
+	    (\intno == BOOK3S_INTERRUPT_MACHINE_CHECK) || \
+	    (\intno == BOOK3S_INTERRUPT_DATA_STORAGE) || \
+	    (\intno == BOOK3S_INTERRUPT_INST_STORAGE) || \
+	    (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
+	    (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
+	    (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
+	    (\intno == BOOK3S_INTERRUPT_EXTERNAL_HV) || \
+	    (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
+	    (\intno == BOOK3S_INTERRUPT_PROGRAM) || \
+	    (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
+	    (\intno == BOOK3S_INTERRUPT_DECREMENTER) || \
+	    (\intno == BOOK3S_INTERRUPT_SYSCALL) || \
+	    (\intno == BOOK3S_INTERRUPT_TRACE) || \
+	    (\intno == BOOK3S_INTERRUPT_PERFMON) || \
+	    (\intno == BOOK3S_INTERRUPT_ALTIVEC) || \
+	    (\intno == BOOK3S_INTERRUPT_VSX)
+
+	b	kvmppc_trampoline_\intno
+kvmppc_resume_\intno:
+
+	.endif
+.endm
+
+#else
+
+.macro DO_KVM intno
+.endm
+
+#endif /* CONFIG_KVM_BOOK3S_HANDLER */
+
+#else  /*__ASSEMBLY__ */
+
+struct kvmppc_vcore;
+
+/* Struct used for coordinating micro-threading (split-core) mode changes */
+struct kvm_split_mode {
+	unsigned long	rpr;
+	unsigned long	pmmar;
+	unsigned long	ldbar;
+	u8		subcore_size;
+	u8		do_nap;
+	u8		napped[MAX_SMT_THREADS];
+	struct kvmppc_vcore *vc[MAX_SUBCORES];
+	/* Bits for changing lpcr on P9 */
+	unsigned long	lpcr_req;
+	unsigned long	lpidr_req;
+	unsigned long	host_lpcr;
+	u32		do_set;
+	u32		do_restore;
+	union {
+		u32	allphases;
+		u8	phase[4];
+	} lpcr_sync;
+};
+
+/*
+ * This struct goes in the PACA on 64-bit processors.  It is used
+ * to store host state that needs to be saved when we enter a guest
+ * and restored when we exit, but isn't specific to any particular
+ * guest or vcpu.  It also has some scratch fields used by the guest
+ * exit code.
+ */
+struct kvmppc_host_state {
+	ulong host_r1;
+	ulong host_r2;
+	ulong host_msr;
+	ulong vmhandler;
+	ulong scratch0;
+	ulong scratch1;
+	ulong scratch2;
+	u8 in_guest;
+	u8 restore_hid5;
+	u8 napping;
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	u8 hwthread_req;
+	u8 hwthread_state;
+	u8 host_ipi;
+	u8 ptid;		/* thread number within subcore when split */
+	u8 tid;			/* thread number within whole core */
+	u8 fake_suspend;
+	struct kvm_vcpu *kvm_vcpu;
+	struct kvmppc_vcore *kvm_vcore;
+	void __iomem *xics_phys;
+	void __iomem *xive_tima_phys;
+	void __iomem *xive_tima_virt;
+	u32 saved_xirr;
+	u64 dabr;
+	u64 host_mmcr[7];	/* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
+	u32 host_pmc[8];
+	u64 host_purr;
+	u64 host_spurr;
+	u64 host_dscr;
+	u64 dec_expires;
+	struct kvm_split_mode *kvm_split_mode;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+	u64 cfar;
+	u64 ppr;
+	u64 host_fscr;
+#endif
+};
+
+struct kvmppc_book3s_shadow_vcpu {
+	bool in_use;
+	ulong gpr[14];
+	u32 cr;
+	ulong xer;
+	ulong ctr;
+	ulong lr;
+	ulong pc;
+
+	ulong shadow_srr1;
+	ulong fault_dar;
+	u32 fault_dsisr;
+	u32 last_inst;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+	u32     sr[16];			/* Guest SRs */
+
+	struct kvmppc_host_state hstate;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+	u8 slb_max;			/* highest used guest slb entry */
+	struct  {
+		u64     esid;
+		u64     vsid;
+	} slb[64];			/* guest SLB */
+	u64 shadow_fscr;
+#endif
+};
+
+#endif /*__ASSEMBLY__ */
+
+/* Values for kvm_state */
+#define KVM_HWTHREAD_IN_KERNEL	0
+#define KVM_HWTHREAD_IN_IDLE	1
+#define KVM_HWTHREAD_IN_KVM	2
+
+#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
new file mode 100644
index 0000000..d513e3e
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -0,0 +1,117 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOKE_H__
+#define __ASM_KVM_BOOKE_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+
+/*
+ * Number of available lpids. Only the low-order 6 bits of LPID rgister are
+ * implemented on e500mc+ cores.
+ */
+#define KVMPPC_NR_LPIDS                        64
+
+#define KVMPPC_INST_EHPRIV		0x7c00021c
+#define EHPRIV_OC_SHIFT			11
+/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
+#define EHPRIV_OC_DEBUG			1
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+	vcpu->arch.regs.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+	return vcpu->arch.regs.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+	vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.xer = val;
+}
+
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.xer;
+}
+
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+	/* XXX Would need to check TLB entry */
+	return false;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.link = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.link;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+	vcpu->arch.regs.nip = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.regs.nip;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.fault_dear;
+}
+
+static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
+{
+	/* Magic page is only supported on e500v2 */
+#ifdef CONFIG_KVM_E500V2
+	return true;
+#else
+	return false;
+#endif
+}
+#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644
index 0000000..931260b
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_KVM_BOOKE_HV_ASM_H
+#define ASM_KVM_BOOKE_HV_ASM_H
+
+#include <asm/feature-fixups.h>
+
+#ifdef __ASSEMBLY__
+
+/*
+ * All exceptions from guest state must go through KVM
+ * (except for those which are delivered directly to the guest) --
+ * there are no exceptions for which we fall through directly to
+ * the normal host handler.
+ *
+ * 32-bit host
+ * Expected inputs (normal exceptions):
+ *   SCRATCH0 = saved r10
+ *   r10 = thread struct
+ *   r11 = appropriate SRR1 variant (currently used as scratch)
+ *   r13 = saved CR
+ *   *(r10 + THREAD_NORMSAVE(0)) = saved r11
+ *   *(r10 + THREAD_NORMSAVE(2)) = saved r13
+ *
+ * Expected inputs (crit/mcheck/debug exceptions):
+ *   appropriate SCRATCH = saved r8
+ *   r8 = exception level stack frame
+ *   r9 = *(r8 + _CCR) = saved CR
+ *   r11 = appropriate SRR1 variant (currently used as scratch)
+ *   *(r8 + GPR9) = saved r9
+ *   *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
+ *   *(r8 + GPR11) = saved r11
+ *
+ * 64-bit host
+ * Expected inputs (GEN/GDBELL/DBG/CRIT/MC exception types):
+ *  r10 = saved CR
+ *  r13 = PACA_POINTER
+ *  *(r13 + PACA_EX##type + EX_R10) = saved r10
+ *  *(r13 + PACA_EX##type + EX_R11) = saved r11
+ *  SPRN_SPRG_##type##_SCRATCH = saved r13
+ *
+ * Expected inputs (TLB exception type):
+ *  r10 = saved CR
+ *  r12 = extlb pointer
+ *  r13 = PACA_POINTER
+ *  *(r12 + EX_TLB_R10) = saved r10
+ *  *(r12 + EX_TLB_R11) = saved r11
+ *  *(r12 + EX_TLB_R13) = saved r13
+ *  SPRN_SPRG_GEN_SCRATCH = saved r12
+ *
+ * Only the bolted version of TLB miss exception handlers is supported now.
+ */
+.macro DO_KVM intno srr1
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+	mtocrf	0x80, r11	/* check MSR[GS] without clobbering reg */
+	bf	3, 1975f
+	b	kvmppc_handler_\intno\()_\srr1
+1975:
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
+.endm
+
+#endif /*__ASSEMBLY__ */
+#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h
new file mode 100644
index 0000000..92daae1
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_fpu.h
@@ -0,0 +1,88 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright Novell Inc. 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_FPU_H__
+#define __ASM_KVM_FPU_H__
+
+#include <linux/types.h>
+
+extern void fps_fres(u64 *fpscr, u32 *dst, u32 *src1);
+extern void fps_frsqrte(u64 *fpscr, u32 *dst, u32 *src1);
+extern void fps_fsqrts(u64 *fpscr, u32 *dst, u32 *src1);
+
+extern void fps_fadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fdivs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fmuls(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2);
+
+extern void fps_fmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+		       u32 *src3);
+extern void fps_fmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+		       u32 *src3);
+extern void fps_fnmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+		        u32 *src3);
+extern void fps_fnmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+		        u32 *src3);
+extern void fps_fsel(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2,
+		     u32 *src3);
+
+#define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+				u64 *dst, u64 *src1);
+#define FPD_TWO_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+				u64 *dst, u64 *src1, u64 *src2);
+#define FPD_THREE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+				u64 *dst, u64 *src1, u64 *src2, u64 *src3);
+
+extern void fpd_fcmpu(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+extern void fpd_fcmpo(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+
+FPD_ONE_IN(fsqrts)
+FPD_ONE_IN(frsqrtes)
+FPD_ONE_IN(fres)
+FPD_ONE_IN(frsp)
+FPD_ONE_IN(fctiw)
+FPD_ONE_IN(fctiwz)
+FPD_ONE_IN(fsqrt)
+FPD_ONE_IN(fre)
+FPD_ONE_IN(frsqrte)
+FPD_ONE_IN(fneg)
+FPD_ONE_IN(fabs)
+FPD_TWO_IN(fadds)
+FPD_TWO_IN(fsubs)
+FPD_TWO_IN(fdivs)
+FPD_TWO_IN(fmuls)
+FPD_TWO_IN(fcpsgn)
+FPD_TWO_IN(fdiv)
+FPD_TWO_IN(fadd)
+FPD_TWO_IN(fmul)
+FPD_TWO_IN(fsub)
+FPD_THREE_IN(fmsubs)
+FPD_THREE_IN(fmadds)
+FPD_THREE_IN(fnmsubs)
+FPD_THREE_IN(fnmadds)
+FPD_THREE_IN(fsel)
+FPD_THREE_IN(fmsub)
+FPD_THREE_IN(fmadd)
+FPD_THREE_IN(fnmsub)
+FPD_THREE_IN(fnmadd)
+
+extern void kvm_cvt_fd(u32 *from, u64 *to);
+extern void kvm_cvt_df(u64 *from, u32 *to);
+
+#endif
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
new file mode 100644
index 0000000..906bcbd
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -0,0 +1,833 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_HOST_H__
+#define __POWERPC_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/kvm_para.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <asm/kvm_asm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/hvcall.h>
+#include <asm/mce.h>
+
+#define KVM_MAX_VCPUS		NR_CPUS
+#define KVM_MAX_VCORES		NR_CPUS
+#define KVM_USER_MEM_SLOTS	512
+
+#include <asm/cputhreads.h>
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#include <asm/kvm_book3s_asm.h>		/* for MAX_SMT_THREADS */
+#define KVM_MAX_VCPU_ID		(MAX_SMT_THREADS * KVM_MAX_VCORES)
+
+#else
+#define KVM_MAX_VCPU_ID		KVM_MAX_VCPUS
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
+#define KVM_HALT_POLL_NS_DEFAULT 10000	/* 10 us */
+
+/* These values are internal and can be increased later */
+#define KVM_NR_IRQCHIPS          1
+#define KVM_IRQCHIP_NUM_PINS     256
+
+/* PPC-specific vcpu->requests bit members */
+#define KVM_REQ_WATCHDOG	KVM_ARCH_REQ(0)
+#define KVM_REQ_EPR_EXIT	KVM_ARCH_REQ(1)
+
+#include <linux/mmu_notifier.h>
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+
+extern int kvm_unmap_hva_range(struct kvm *kvm,
+			       unsigned long start, unsigned long end);
+extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+#define HPTEG_CACHE_NUM			(1 << 15)
+#define HPTEG_HASH_BITS_PTE		13
+#define HPTEG_HASH_BITS_PTE_LONG	12
+#define HPTEG_HASH_BITS_VPTE		13
+#define HPTEG_HASH_BITS_VPTE_LONG	5
+#define HPTEG_HASH_BITS_VPTE_64K	11
+#define HPTEG_HASH_NUM_PTE		(1 << HPTEG_HASH_BITS_PTE)
+#define HPTEG_HASH_NUM_PTE_LONG		(1 << HPTEG_HASH_BITS_PTE_LONG)
+#define HPTEG_HASH_NUM_VPTE		(1 << HPTEG_HASH_BITS_VPTE)
+#define HPTEG_HASH_NUM_VPTE_LONG	(1 << HPTEG_HASH_BITS_VPTE_LONG)
+#define HPTEG_HASH_NUM_VPTE_64K		(1 << HPTEG_HASH_BITS_VPTE_64K)
+
+/* Physical Address Mask - allowed range of real mode RAM access */
+#define KVM_PAM			0x0fffffffffffffffULL
+
+struct lppaca;
+struct slb_shadow;
+struct dtl_entry;
+
+struct kvmppc_vcpu_book3s;
+struct kvmppc_book3s_shadow_vcpu;
+
+struct kvm_vm_stat {
+	ulong remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+	u64 sum_exits;
+	u64 mmio_exits;
+	u64 signal_exits;
+	u64 light_exits;
+	/* Account for special types of light exits: */
+	u64 itlb_real_miss_exits;
+	u64 itlb_virt_miss_exits;
+	u64 dtlb_real_miss_exits;
+	u64 dtlb_virt_miss_exits;
+	u64 syscall_exits;
+	u64 isi_exits;
+	u64 dsi_exits;
+	u64 emulated_inst_exits;
+	u64 dec_exits;
+	u64 ext_intr_exits;
+	u64 halt_poll_success_ns;
+	u64 halt_poll_fail_ns;
+	u64 halt_wait_ns;
+	u64 halt_successful_poll;
+	u64 halt_attempted_poll;
+	u64 halt_successful_wait;
+	u64 halt_poll_invalid;
+	u64 halt_wakeup;
+	u64 dbell_exits;
+	u64 gdbell_exits;
+	u64 ld;
+	u64 st;
+#ifdef CONFIG_PPC_BOOK3S
+	u64 pf_storage;
+	u64 pf_instruc;
+	u64 sp_storage;
+	u64 sp_instruc;
+	u64 queue_intr;
+	u64 ld_slow;
+	u64 st_slow;
+#endif
+	u64 pthru_all;
+	u64 pthru_host;
+	u64 pthru_bad_aff;
+};
+
+enum kvm_exit_types {
+	MMIO_EXITS,
+	SIGNAL_EXITS,
+	ITLB_REAL_MISS_EXITS,
+	ITLB_VIRT_MISS_EXITS,
+	DTLB_REAL_MISS_EXITS,
+	DTLB_VIRT_MISS_EXITS,
+	SYSCALL_EXITS,
+	ISI_EXITS,
+	DSI_EXITS,
+	EMULATED_INST_EXITS,
+	EMULATED_MTMSRWE_EXITS,
+	EMULATED_WRTEE_EXITS,
+	EMULATED_MTSPR_EXITS,
+	EMULATED_MFSPR_EXITS,
+	EMULATED_MTMSR_EXITS,
+	EMULATED_MFMSR_EXITS,
+	EMULATED_TLBSX_EXITS,
+	EMULATED_TLBWE_EXITS,
+	EMULATED_RFI_EXITS,
+	EMULATED_RFCI_EXITS,
+	EMULATED_RFDI_EXITS,
+	DEC_EXITS,
+	EXT_INTR_EXITS,
+	HALT_WAKEUP,
+	USR_PR_INST,
+	FP_UNAVAIL,
+	DEBUG_EXITS,
+	TIMEINGUEST,
+	DBELL_EXITS,
+	GDBELL_EXITS,
+	__NUMBER_OF_KVM_EXIT_TYPES
+};
+
+/* allow access to big endian 32bit upper/lower parts and 64bit var */
+struct kvmppc_exit_timing {
+	union {
+		u64 tv64;
+		struct {
+			u32 tbu, tbl;
+		} tv32;
+	};
+};
+
+struct kvmppc_pginfo {
+	unsigned long pfn;
+	atomic_t refcnt;
+};
+
+struct kvmppc_spapr_tce_iommu_table {
+	struct rcu_head rcu;
+	struct list_head next;
+	struct iommu_table *tbl;
+	struct kref kref;
+};
+
+struct kvmppc_spapr_tce_table {
+	struct list_head list;
+	struct kvm *kvm;
+	u64 liobn;
+	struct rcu_head rcu;
+	u32 page_shift;
+	u64 offset;		/* in pages */
+	u64 size;		/* window size in pages */
+	struct list_head iommu_tables;
+	struct page *pages[0];
+};
+
+/* XICS components, defined in book3s_xics.c */
+struct kvmppc_xics;
+struct kvmppc_icp;
+extern struct kvm_device_ops kvm_xics_ops;
+
+/* XIVE components, defined in book3s_xive.c */
+struct kvmppc_xive;
+struct kvmppc_xive_vcpu;
+extern struct kvm_device_ops kvm_xive_ops;
+
+struct kvmppc_passthru_irqmap;
+
+/*
+ * The reverse mapping array has one entry for each HPTE,
+ * which stores the guest's view of the second word of the HPTE
+ * (including the guest physical address of the mapping),
+ * plus forward and backward pointers in a doubly-linked ring
+ * of HPTEs that map the same host page.  The pointers in this
+ * ring are 32-bit HPTE indexes, to save space.
+ */
+struct revmap_entry {
+	unsigned long guest_rpte;
+	unsigned int forw, back;
+};
+
+/*
+ * We use the top bit of each memslot->arch.rmap entry as a lock bit,
+ * and bit 32 as a present flag.  The bottom 32 bits are the
+ * index in the guest HPT of a HPTE that points to the page.
+ */
+#define KVMPPC_RMAP_LOCK_BIT	63
+#define KVMPPC_RMAP_RC_SHIFT	32
+#define KVMPPC_RMAP_REFERENCED	(HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_PRESENT	0x100000000ul
+#define KVMPPC_RMAP_INDEX	0xfffffffful
+
+struct kvm_arch_memory_slot {
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	unsigned long *rmap;
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+};
+
+struct kvm_hpt_info {
+	/* Host virtual (linear mapping) address of guest HPT */
+	unsigned long virt;
+	/* Array of reverse mapping entries for each guest HPTE */
+	struct revmap_entry *rev;
+	/* Guest HPT size is 2**(order) bytes */
+	u32 order;
+	/* 1 if HPT allocated with CMA, 0 otherwise */
+	int cma;
+};
+
+struct kvm_resize_hpt;
+
+struct kvm_arch {
+	unsigned int lpid;
+	unsigned int smt_mode;		/* # vcpus per virtual core */
+	unsigned int emul_smt_mode;	/* emualted SMT mode, on P9 */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	unsigned int tlb_sets;
+	struct kvm_hpt_info hpt;
+	atomic64_t mmio_update;
+	unsigned int host_lpid;
+	unsigned long host_lpcr;
+	unsigned long sdr1;
+	unsigned long host_sdr1;
+	unsigned long lpcr;
+	unsigned long vrma_slb_v;
+	int mmu_ready;
+	atomic_t vcpus_running;
+	u32 online_vcores;
+	atomic_t hpte_mod_interest;
+	cpumask_t need_tlb_flush;
+	cpumask_t cpu_in_guest;
+	u8 radix;
+	u8 fwnmi_enabled;
+	bool threads_indep;
+	pgd_t *pgtable;
+	u64 process_table;
+	struct dentry *debugfs_dir;
+	struct dentry *htab_dentry;
+	struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+	struct mutex hpt_mutex;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+	struct list_head spapr_tce_tables;
+	struct list_head rtas_tokens;
+	DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
+#endif
+#ifdef CONFIG_KVM_MPIC
+	struct openpic *mpic;
+#endif
+#ifdef CONFIG_KVM_XICS
+	struct kvmppc_xics *xics;
+	struct kvmppc_xive *xive;
+	struct kvmppc_passthru_irqmap *pimap;
+#endif
+	struct kvmppc_ops *kvm_ops;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	/* This array can grow quite large, keep it at the end */
+	struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+#endif
+};
+
+#define VCORE_ENTRY_MAP(vc)	((vc)->entry_exit_map & 0xff)
+#define VCORE_EXIT_MAP(vc)	((vc)->entry_exit_map >> 8)
+#define VCORE_IS_EXITING(vc)	(VCORE_EXIT_MAP(vc) != 0)
+
+/* This bit is used when a vcore exit is triggered from outside the vcore */
+#define VCORE_EXIT_REQ		0x10000
+
+/*
+ * Values for vcore_state.
+ * Note that these are arranged such that lower values
+ * (< VCORE_SLEEPING) don't require stolen time accounting
+ * on load/unload, and higher values do.
+ */
+#define VCORE_INACTIVE	0
+#define VCORE_PREEMPT	1
+#define VCORE_PIGGYBACK	2
+#define VCORE_SLEEPING	3
+#define VCORE_RUNNING	4
+#define VCORE_EXITING	5
+#define VCORE_POLLING	6
+
+/*
+ * Struct used to manage memory for a virtual processor area
+ * registered by a PAPR guest.  There are three types of area
+ * that a guest can register.
+ */
+struct kvmppc_vpa {
+	unsigned long gpa;	/* Current guest phys addr */
+	void *pinned_addr;	/* Address in kernel linear mapping */
+	void *pinned_end;	/* End of region */
+	unsigned long next_gpa;	/* Guest phys addr for update */
+	unsigned long len;	/* Number of bytes required */
+	u8 update_pending;	/* 1 => update pinned_addr from next_gpa */
+	bool dirty;		/* true => area has been modified by kernel */
+};
+
+struct kvmppc_pte {
+	ulong eaddr;
+	u64 vpage;
+	ulong raddr;
+	bool may_read		: 1;
+	bool may_write		: 1;
+	bool may_execute	: 1;
+	unsigned long wimg;
+	u8 page_size;		/* MMU_PAGE_xxx */
+};
+
+struct kvmppc_mmu {
+	/* book3s_64 only */
+	void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
+	u64  (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
+	u64  (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
+	void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
+	void (*slbia)(struct kvm_vcpu *vcpu);
+	/* book3s */
+	void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
+	u32  (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
+	int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
+		      struct kvmppc_pte *pte, bool data, bool iswrite);
+	void (*reset_msr)(struct kvm_vcpu *vcpu);
+	void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
+	int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
+	u64  (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
+	bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
+};
+
+struct kvmppc_slb {
+	u64 esid;
+	u64 vsid;
+	u64 orige;
+	u64 origv;
+	bool valid	: 1;
+	bool Ks		: 1;
+	bool Kp		: 1;
+	bool nx		: 1;
+	bool large	: 1;	/* PTEs are 16MB */
+	bool tb		: 1;	/* 1TB segment */
+	bool class	: 1;
+	u8 base_page_size;	/* MMU_PAGE_xxx */
+};
+
+/* Struct used to accumulate timing information in HV real mode code */
+struct kvmhv_tb_accumulator {
+	u64	seqcount;	/* used to synchronize access, also count * 2 */
+	u64	tb_total;	/* total time in timebase ticks */
+	u64	tb_min;		/* min time */
+	u64	tb_max;		/* max time */
+};
+
+#ifdef CONFIG_PPC_BOOK3S_64
+struct kvmppc_irq_map {
+	u32	r_hwirq;
+	u32	v_hwirq;
+	struct irq_desc *desc;
+};
+
+#define	KVMPPC_PIRQ_MAPPED	1024
+struct kvmppc_passthru_irqmap {
+	int n_mapped;
+	struct kvmppc_irq_map mapped[KVMPPC_PIRQ_MAPPED];
+};
+#endif
+
+# ifdef CONFIG_PPC_FSL_BOOK3E
+#define KVMPPC_BOOKE_IAC_NUM	2
+#define KVMPPC_BOOKE_DAC_NUM	2
+# else
+#define KVMPPC_BOOKE_IAC_NUM	4
+#define KVMPPC_BOOKE_DAC_NUM	2
+# endif
+#define KVMPPC_BOOKE_MAX_IAC	4
+#define KVMPPC_BOOKE_MAX_DAC	2
+
+/* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
+#define KVMPPC_EPR_NONE		0 /* EPR not supported */
+#define KVMPPC_EPR_USER		1 /* exit to userspace to fill EPR */
+#define KVMPPC_EPR_KERNEL	2 /* in-kernel irqchip */
+
+#define KVMPPC_IRQ_DEFAULT	0
+#define KVMPPC_IRQ_MPIC		1
+#define KVMPPC_IRQ_XICS		2 /* Includes a XIVE option */
+
+#define MMIO_HPTE_CACHE_SIZE	4
+
+struct mmio_hpte_cache_entry {
+	unsigned long hpte_v;
+	unsigned long hpte_r;
+	unsigned long rpte;
+	unsigned long pte_index;
+	unsigned long eaddr;
+	unsigned long slb_v;
+	long mmio_update;
+	unsigned int slb_base_pshift;
+};
+
+struct mmio_hpte_cache {
+	struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
+	unsigned int index;
+};
+
+#define KVMPPC_VSX_COPY_NONE		0
+#define KVMPPC_VSX_COPY_WORD		1
+#define KVMPPC_VSX_COPY_DWORD		2
+#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP	3
+#define KVMPPC_VSX_COPY_WORD_LOAD_DUMP	4
+
+#define KVMPPC_VMX_COPY_BYTE		8
+#define KVMPPC_VMX_COPY_HWORD		9
+#define KVMPPC_VMX_COPY_WORD		10
+#define KVMPPC_VMX_COPY_DWORD		11
+
+struct openpic;
+
+/* W0 and W1 of a XIVE thread management context */
+union xive_tma_w01 {
+	struct {
+		u8	nsr;
+		u8	cppr;
+		u8	ipb;
+		u8	lsmfb;
+		u8	ack;
+		u8	inc;
+		u8	age;
+		u8	pipr;
+	};
+	__be64 w01;
+};
+
+struct kvm_vcpu_arch {
+	ulong host_stack;
+	u32 host_pid;
+#ifdef CONFIG_PPC_BOOK3S
+	struct kvmppc_slb slb[64];
+	int slb_max;		/* 1 + index of last valid entry in slb[] */
+	int slb_nr;		/* total number of entries in SLB */
+	struct kvmppc_mmu mmu;
+	struct kvmppc_vcpu_book3s *book3s;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
+#endif
+
+	struct pt_regs regs;
+
+	struct thread_fp_state fp;
+
+#ifdef CONFIG_SPE
+	ulong evr[32];
+	ulong spefscr;
+	ulong host_spefscr;
+	u64 acc;
+#endif
+#ifdef CONFIG_ALTIVEC
+	struct thread_vr_state vr;
+#endif
+
+#ifdef CONFIG_KVM_BOOKE_HV
+	u32 host_mas4;
+	u32 host_mas6;
+	u32 shadow_epcr;
+	u32 shadow_msrp;
+	u32 eplc;
+	u32 epsc;
+	u32 oldpir;
+#endif
+
+#if defined(CONFIG_BOOKE)
+#if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
+	u32 epcr;
+#endif
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S
+	/* For Gekko paired singles */
+	u32 qpr[32];
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S
+	ulong tar;
+#endif
+
+	u32 cr;
+
+#ifdef CONFIG_PPC_BOOK3S
+	ulong hflags;
+	ulong guest_owned_ext;
+	ulong purr;
+	ulong spurr;
+	ulong ic;
+	ulong dscr;
+	ulong amr;
+	ulong uamor;
+	ulong iamr;
+	u32 ctrl;
+	u32 dabrx;
+	ulong dabr;
+	ulong dawr;
+	ulong dawrx;
+	ulong ciabr;
+	ulong cfar;
+	ulong ppr;
+	u32 pspb;
+	ulong fscr;
+	ulong shadow_fscr;
+	ulong ebbhr;
+	ulong ebbrr;
+	ulong bescr;
+	ulong csigr;
+	ulong tacr;
+	ulong tcscr;
+	ulong acop;
+	ulong wort;
+	ulong tid;
+	ulong psscr;
+	ulong hfscr;
+	ulong shadow_srr1;
+#endif
+	u32 vrsave; /* also USPRG0 */
+	u32 mmucr;
+	/* shadow_msr is unused for BookE HV */
+	ulong shadow_msr;
+	ulong csrr0;
+	ulong csrr1;
+	ulong dsrr0;
+	ulong dsrr1;
+	ulong mcsrr0;
+	ulong mcsrr1;
+	ulong mcsr;
+	ulong dec;
+#ifdef CONFIG_BOOKE
+	u32 decar;
+#endif
+	/* Time base value when we entered the guest */
+	u64 entry_tb;
+	u64 entry_vtb;
+	u64 entry_ic;
+	u32 tcr;
+	ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
+	u32 ivor[64];
+	ulong ivpr;
+	u32 pvr;
+
+	u32 shadow_pid;
+	u32 shadow_pid1;
+	u32 pid;
+	u32 swap_pid;
+
+	u32 ccr0;
+	u32 ccr1;
+	u32 dbsr;
+
+	u64 mmcr[5];
+	u32 pmc[8];
+	u32 spmc[2];
+	u64 siar;
+	u64 sdar;
+	u64 sier;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	u64 tfhar;
+	u64 texasr;
+	u64 tfiar;
+	u64 orig_texasr;
+
+	u32 cr_tm;
+	u64 xer_tm;
+	u64 lr_tm;
+	u64 ctr_tm;
+	u64 amr_tm;
+	u64 ppr_tm;
+	u64 dscr_tm;
+	u64 tar_tm;
+
+	ulong gpr_tm[32];
+
+	struct thread_fp_state fp_tm;
+
+	struct thread_vr_state vr_tm;
+	u32 vrsave_tm; /* also USPRG0 */
+#endif
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+	struct mutex exit_timing_lock;
+	struct kvmppc_exit_timing timing_exit;
+	struct kvmppc_exit_timing timing_last_enter;
+	u32 last_exit_type;
+	u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_last_exit;
+	struct dentry *debugfs_exit_timing;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S
+	ulong fault_dar;
+	u32 fault_dsisr;
+	unsigned long intr_msr;
+	ulong fault_gpa;	/* guest real address of page fault (POWER9) */
+#endif
+
+#ifdef CONFIG_BOOKE
+	ulong fault_dear;
+	ulong fault_esr;
+	ulong queued_dear;
+	ulong queued_esr;
+	spinlock_t wdt_lock;
+	struct timer_list wdt_timer;
+	u32 tlbcfg[4];
+	u32 tlbps[4];
+	u32 mmucfg;
+	u32 eptcfg;
+	u32 epr;
+	u64 sprg9;
+	u32 pwrmgtcr0;
+	u32 crit_save;
+	/* guest debug registers*/
+	struct debug_reg dbg_reg;
+#endif
+	gpa_t paddr_accessed;
+	gva_t vaddr_accessed;
+	pgd_t *pgdir;
+
+	u16 io_gpr; /* GPR used as IO source/target */
+	u8 mmio_host_swabbed;
+	u8 mmio_sign_extend;
+	/* conversion between single and double precision */
+	u8 mmio_sp64_extend;
+	/*
+	 * Number of simulations for vsx.
+	 * If we use 2*8bytes to simulate 1*16bytes,
+	 * then the number should be 2 and
+	 * mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
+	 * If we use 4*4bytes to simulate 1*16bytes,
+	 * the number should be 4 and
+	 * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
+	 */
+	u8 mmio_vsx_copy_nums;
+	u8 mmio_vsx_offset;
+	u8 mmio_vmx_copy_nums;
+	u8 mmio_vmx_offset;
+	u8 mmio_copy_type;
+	u8 osi_needed;
+	u8 osi_enabled;
+	u8 papr_enabled;
+	u8 watchdog_enabled;
+	u8 sane;
+	u8 cpu_type;
+	u8 hcall_needed;
+	u8 epr_flags; /* KVMPPC_EPR_xxx */
+	u8 epr_needed;
+
+	u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
+
+	struct hrtimer dec_timer;
+	u64 dec_jiffies;
+	u64 dec_expires;
+	unsigned long pending_exceptions;
+	u8 ceded;
+	u8 prodded;
+	u8 doorbell_request;
+	u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
+	u32 last_inst;
+
+	struct swait_queue_head *wqp;
+	struct kvmppc_vcore *vcore;
+	int ret;
+	int trap;
+	int state;
+	int ptid;
+	int thread_cpu;
+	int prev_cpu;
+	bool timer_running;
+	wait_queue_head_t cpu_run;
+	struct machine_check_event mce_evt; /* Valid if trap == 0x200 */
+
+	struct kvm_vcpu_arch_shared *shared;
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+	bool shared_big_endian;
+#endif
+	unsigned long magic_page_pa; /* phys addr to map the magic page to */
+	unsigned long magic_page_ea; /* effect. addr to map the magic page to */
+	bool disable_kernel_nx;
+
+	int irq_type;		/* one of KVM_IRQ_* */
+	int irq_cpu_id;
+	struct openpic *mpic;	/* KVM_IRQ_MPIC */
+#ifdef CONFIG_KVM_XICS
+	struct kvmppc_icp *icp; /* XICS presentation controller */
+	struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
+	__be32 xive_cam_word;    /* Cooked W2 in proper endian with valid bit */
+	u8 xive_pushed;		 /* Is the VP pushed on the physical CPU ? */
+	u8 xive_esc_on;		 /* Is the escalation irq enabled ? */
+	union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
+	u64 xive_esc_raddr;	 /* Escalation interrupt ESB real addr */
+	u64 xive_esc_vaddr;	 /* Escalation interrupt ESB virt addr */
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	struct kvm_vcpu_arch_shared shregs;
+
+	struct mmio_hpte_cache mmio_cache;
+	unsigned long pgfault_addr;
+	long pgfault_index;
+	unsigned long pgfault_hpte[2];
+	struct mmio_hpte_cache_entry *pgfault_cache;
+
+	struct task_struct *run_task;
+	struct kvm_run *kvm_run;
+
+	spinlock_t vpa_update_lock;
+	struct kvmppc_vpa vpa;
+	struct kvmppc_vpa dtl;
+	struct dtl_entry *dtl_ptr;
+	unsigned long dtl_index;
+	u64 stolen_logged;
+	struct kvmppc_vpa slb_shadow;
+
+	spinlock_t tbacct_lock;
+	u64 busy_stolen;
+	u64 busy_preempt;
+
+	u32 emul_inst;
+
+	u32 online;
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+	struct kvmhv_tb_accumulator *cur_activity;	/* What we're timing */
+	u64	cur_tb_start;			/* when it started */
+	struct kvmhv_tb_accumulator rm_entry;	/* real-mode entry code */
+	struct kvmhv_tb_accumulator rm_intr;	/* real-mode intr handling */
+	struct kvmhv_tb_accumulator rm_exit;	/* real-mode exit code */
+	struct kvmhv_tb_accumulator guest_time;	/* guest execution */
+	struct kvmhv_tb_accumulator cede_time;	/* time napping inside guest */
+
+	struct dentry *debugfs_dir;
+	struct dentry *debugfs_timings;
+#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
+};
+
+#define VCPU_FPR(vcpu, i)	(vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
+#define VCPU_VSX_FPR(vcpu, i, j)	((vcpu)->arch.fp.fpr[i][j])
+#define VCPU_VSX_VR(vcpu, i)		((vcpu)->arch.vr.vr[i])
+
+/* Values for vcpu->arch.state */
+#define KVMPPC_VCPU_NOTREADY		0
+#define KVMPPC_VCPU_RUNNABLE		1
+#define KVMPPC_VCPU_BUSY_IN_HOST	2
+
+/* Values for vcpu->arch.io_gpr */
+#define KVM_MMIO_REG_MASK	0x003f
+#define KVM_MMIO_REG_EXT_MASK	0xffc0
+#define KVM_MMIO_REG_GPR	0x0000
+#define KVM_MMIO_REG_FPR	0x0040
+#define KVM_MMIO_REG_QPR	0x0080
+#define KVM_MMIO_REG_FQPR	0x00c0
+#define KVM_MMIO_REG_VSX	0x0100
+#define KVM_MMIO_REG_VMX	0x0180
+
+#define __KVM_HAVE_ARCH_WQP
+#define __KVM_HAVE_CREATE_DEVICE
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_exit(void) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+
+#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
new file mode 100644
index 0000000..5ceb4ef
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -0,0 +1,74 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+#ifndef __POWERPC_KVM_PARA_H__
+#define __POWERPC_KVM_PARA_H__
+
+#include <uapi/asm/kvm_para.h>
+
+#ifdef CONFIG_KVM_GUEST
+
+#include <linux/of.h>
+
+static inline int kvm_para_available(void)
+{
+	struct device_node *hyper_node;
+
+	hyper_node = of_find_node_by_path("/hypervisor");
+	if (!hyper_node)
+		return 0;
+
+	if (!of_device_is_compatible(hyper_node, "linux,kvm"))
+		return 0;
+
+	return 1;
+}
+
+#else
+
+static inline int kvm_para_available(void)
+{
+	return 0;
+}
+
+#endif
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+	unsigned long r;
+
+	if (!kvm_para_available())
+		return 0;
+
+	if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
+		return 0;
+
+	return r;
+}
+
+static inline unsigned int kvm_arch_para_hints(void)
+{
+	return 0;
+}
+
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+	return false;
+}
+
+#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
new file mode 100644
index 0000000..e991821
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -0,0 +1,909 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PPC_H__
+#define __POWERPC_KVM_PPC_H__
+
+/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
+ * dependencies. */
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+#include <linux/bug.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/kvm_book3s.h>
+#else
+#include <asm/kvm_booke.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/paca.h>
+#endif
+
+/*
+ * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
+ * for supporting software breakpoint.
+ */
+#define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
+
+enum emulation_result {
+	EMULATE_DONE,         /* no further processing */
+	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
+	EMULATE_FAIL,         /* can't emulate this instruction */
+	EMULATE_AGAIN,        /* something went wrong. go again */
+	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
+};
+
+enum instruction_fetch_type {
+	INST_GENERIC,
+	INST_SC,		/* system call */
+};
+
+enum xlate_instdata {
+	XLATE_INST,		/* translate instruction address */
+	XLATE_DATA		/* translate data address */
+};
+
+enum xlate_readwrite {
+	XLATE_READ,		/* check for read permissions */
+	XLATE_WRITE		/* check for write permissions */
+};
+
+extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern void kvmppc_handler_highmem(void);
+
+extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                              unsigned int rt, unsigned int bytes,
+			      int is_default_endian);
+extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                               unsigned int rt, unsigned int bytes,
+			       int is_default_endian);
+extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				unsigned int rt, unsigned int bytes,
+			int is_default_endian, int mmio_sign_extend);
+extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+		unsigned int rt, unsigned int bytes, int is_default_endian);
+extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+		unsigned int rs, unsigned int bytes, int is_default_endian);
+extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+			       u64 val, unsigned int bytes,
+			       int is_default_endian);
+extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				int rs, unsigned int bytes,
+				int is_default_endian);
+
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+				 enum instruction_fetch_type type, u32 *inst);
+
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+		     bool data);
+extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+		     bool data);
+extern int kvmppc_emulate_instruction(struct kvm_run *run,
+                                      struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
+extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
+extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
+extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
+extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+/* Core-specific hooks */
+
+extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
+                           unsigned int gtlb_idx);
+extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
+extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
+                              gva_t eaddr);
+extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
+			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
+			struct kvmppc_pte *pte);
+
+extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
+                                                unsigned int id);
+extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_check_processor_compat(void);
+extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
+                                      struct kvm_translation *tr);
+
+extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
+
+extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
+                                       struct kvm_interrupt *irq);
+extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
+					ulong esr_flags);
+extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
+					   ulong dear_flags,
+					   ulong esr_flags);
+extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
+					   ulong esr_flags);
+extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
+
+extern int kvmppc_booke_init(void);
+extern void kvmppc_booke_exit(void);
+
+extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
+extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
+
+extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
+extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
+extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
+extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
+extern void kvmppc_rmap_reset(struct kvm *kvm);
+extern long kvmppc_prepare_vrma(struct kvm *kvm,
+				struct kvm_userspace_memory_region *mem);
+extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
+			struct kvm_memory_slot *memslot, unsigned long porder);
+extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+		struct iommu_group *grp);
+extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
+		struct iommu_group *grp);
+extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
+extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
+
+extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+				struct kvm_create_spapr_tce_64 *args);
+extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
+		struct kvm *kvm, unsigned long liobn);
+#define kvmppc_ioba_validate(stt, ioba, npages)                         \
+		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
+				(stt)->size, (ioba), (npages)) ?        \
+				H_PARAMETER : H_SUCCESS)
+extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
+		unsigned long tce);
+extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+		unsigned long *ua, unsigned long **prmap);
+extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
+		unsigned long idx, unsigned long tce);
+extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+			     unsigned long ioba, unsigned long tce);
+extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_list, unsigned long npages);
+extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_value, unsigned long npages);
+extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+			     unsigned long ioba);
+extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
+extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
+extern int kvmppc_core_init_vm(struct kvm *kvm);
+extern void kvmppc_core_destroy_vm(struct kvm *kvm);
+extern void kvmppc_core_free_memslot(struct kvm *kvm,
+				     struct kvm_memory_slot *free,
+				     struct kvm_memory_slot *dont);
+extern int kvmppc_core_create_memslot(struct kvm *kvm,
+				      struct kvm_memory_slot *slot,
+				      unsigned long npages);
+extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+				struct kvm_memory_slot *memslot,
+				const struct kvm_userspace_memory_region *mem);
+extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
+				const struct kvm_userspace_memory_region *mem,
+				const struct kvm_memory_slot *old,
+				const struct kvm_memory_slot *new);
+extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
+				      struct kvm_ppc_smmu_info *info);
+extern void kvmppc_core_flush_memslot(struct kvm *kvm,
+				      struct kvm_memory_slot *memslot);
+
+extern int kvmppc_bookehv_init(void);
+extern void kvmppc_bookehv_exit(void);
+
+extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
+
+extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
+extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
+					    struct kvm_ppc_resize_hpt *rhpt);
+extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
+					   struct kvm_ppc_resize_hpt *rhpt);
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+
+extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
+extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
+extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+
+extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
+				u32 priority);
+extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+				u32 *priority);
+extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+
+void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
+void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
+
+union kvmppc_one_reg {
+	u32	wval;
+	u64	dval;
+	vector128 vval;
+	u64	vsxval[2];
+	u32	vsx32val[4];
+	u16	vsx16val[8];
+	u8	vsx8val[16];
+	struct {
+		u64	addr;
+		u64	length;
+	}	vpaval;
+};
+
+struct kvmppc_ops {
+	struct module *owner;
+	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+			   union kvmppc_one_reg *val);
+	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+			   union kvmppc_one_reg *val);
+	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+	void (*vcpu_put)(struct kvm_vcpu *vcpu);
+	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
+	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
+	void (*vcpu_free)(struct kvm_vcpu *vcpu);
+	int (*check_requests)(struct kvm_vcpu *vcpu);
+	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
+	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
+	int (*prepare_memory_region)(struct kvm *kvm,
+				     struct kvm_memory_slot *memslot,
+				     const struct kvm_userspace_memory_region *mem);
+	void (*commit_memory_region)(struct kvm *kvm,
+				     const struct kvm_userspace_memory_region *mem,
+				     const struct kvm_memory_slot *old,
+				     const struct kvm_memory_slot *new);
+	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
+			   unsigned long end);
+	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
+	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
+	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
+	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
+	void (*free_memslot)(struct kvm_memory_slot *free,
+			     struct kvm_memory_slot *dont);
+	int (*create_memslot)(struct kvm_memory_slot *slot,
+			      unsigned long npages);
+	int (*init_vm)(struct kvm *kvm);
+	void (*destroy_vm)(struct kvm *kvm);
+	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
+	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+			  unsigned int inst, int *advance);
+	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
+	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
+	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
+			      unsigned long arg);
+	int (*hcall_implemented)(unsigned long hcall);
+	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
+				       struct irq_bypass_producer *);
+	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
+					struct irq_bypass_producer *);
+	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
+	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
+	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
+			    unsigned long flags);
+	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
+};
+
+extern struct kvmppc_ops *kvmppc_hv_ops;
+extern struct kvmppc_ops *kvmppc_pr_ops;
+
+static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
+				enum instruction_fetch_type type, u32 *inst)
+{
+	int ret = EMULATE_DONE;
+	u32 fetched_inst;
+
+	/* Load the instruction manually if it failed to do so in the
+	 * exit path */
+	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
+
+	/*  Write fetch_failed unswapped if the fetch failed */
+	if (ret == EMULATE_DONE)
+		fetched_inst = kvmppc_need_byteswap(vcpu) ?
+				swab32(vcpu->arch.last_inst) :
+				vcpu->arch.last_inst;
+	else
+		fetched_inst = vcpu->arch.last_inst;
+
+	*inst = fetched_inst;
+	return ret;
+}
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
+{
+	return kvm->arch.kvm_ops == kvmppc_hv_ops;
+}
+
+extern int kvmppc_hwrng_present(void);
+
+/*
+ * Cuts out inst bits with ordering according to spec.
+ * That means the leftmost bit is zero. All given bits are included.
+ */
+static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
+{
+	u32 r;
+	u32 mask;
+
+	BUG_ON(msb > lsb);
+
+	mask = (1 << (lsb - msb + 1)) - 1;
+	r = (inst >> (63 - lsb)) & mask;
+
+	return r;
+}
+
+/*
+ * Replaces inst bits with ordering according to spec.
+ */
+static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
+{
+	u32 r;
+	u32 mask;
+
+	BUG_ON(msb > lsb);
+
+	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
+	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
+
+	return r;
+}
+
+#define one_reg_size(id)	\
+	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+#define get_reg_val(id, reg)	({		\
+	union kvmppc_one_reg __u;		\
+	switch (one_reg_size(id)) {		\
+	case 4: __u.wval = (reg); break;	\
+	case 8: __u.dval = (reg); break;	\
+	default: BUG();				\
+	}					\
+	__u;					\
+})
+
+
+#define set_reg_val(id, val)	({		\
+	u64 __v;				\
+	switch (one_reg_size(id)) {		\
+	case 4: __v = (val).wval; break;	\
+	case 8: __v = (val).dval; break;	\
+	default: BUG();				\
+	}					\
+	__v;					\
+})
+
+int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
+
+struct openpic;
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+extern void kvm_cma_reserve(void) __init;
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{
+	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
+}
+
+static inline void kvmppc_set_xive_tima(int cpu,
+					unsigned long phys_addr,
+					void __iomem *virt_addr)
+{
+	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
+	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
+}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+	u32 xirr;
+
+	xirr = get_paca()->kvm_hstate.saved_xirr;
+	get_paca()->kvm_hstate.saved_xirr = 0;
+	return xirr;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{
+	paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
+}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
+}
+
+extern void kvm_hv_vm_activated(void);
+extern void kvm_hv_vm_deactivated(void);
+extern bool kvm_hv_mode_active(void);
+
+#else
+static inline void __init kvm_cma_reserve(void)
+{}
+
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{}
+
+static inline void kvmppc_set_xive_tima(int cpu,
+					unsigned long phys_addr,
+					void __iomem *virt_addr)
+{}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+	return 0;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+	kvm_vcpu_kick(vcpu);
+}
+
+static inline bool kvm_hv_mode_active(void)		{ return false; }
+
+#endif
+
+#ifdef CONFIG_KVM_XICS
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
+}
+
+static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
+				struct kvm *kvm)
+{
+	if (kvm && kvm_irq_bypass)
+		return kvm->arch.pimap;
+	return NULL;
+}
+
+extern void kvmppc_alloc_host_rm_ops(void);
+extern void kvmppc_free_host_rm_ops(void);
+extern void kvmppc_free_pimap(struct kvm *kvm);
+extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
+extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
+			struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xics_ipi_action(void);
+extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+				   unsigned long host_irq);
+extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+				   unsigned long host_irq);
+extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
+					struct kvmppc_irq_map *irq_map,
+					struct kvmppc_passthru_irqmap *pimap,
+					bool *again);
+
+extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+			       int level, bool line_status);
+
+extern int h_ipi_redirect;
+#else
+static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
+				struct kvm *kvm)
+	{ return NULL; }
+static inline void kvmppc_alloc_host_rm_ops(void) {};
+static inline void kvmppc_free_host_rm_ops(void) {};
+static inline void kvmppc_free_pimap(struct kvm *kvm) {};
+static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
+	{ return 0; }
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+	{ return 0; }
+static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+	{ return 0; }
+#endif
+
+#ifdef CONFIG_KVM_XIVE
+/*
+ * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
+ * ie. P9 new interrupt controller, while the second "xive" is the legacy
+ * "eXternal Interrupt Vector Entry" which is the configuration of an
+ * interrupt on the "xics" interrupt controller on P8 and earlier. Those
+ * two function consume or produce a legacy "XIVE" state from the
+ * new "XIVE" interrupt controller.
+ */
+extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+				u32 priority);
+extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+				u32 *priority);
+extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
+extern void kvmppc_xive_init_module(void);
+extern void kvmppc_xive_exit_module(void);
+
+extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+				    struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+				  struct irq_desc *host_desc);
+extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+				  struct irq_desc *host_desc);
+extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+
+extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+			       int level, bool line_status);
+#else
+static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+				       u32 priority) { return -1; }
+static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+				       u32 *priority) { return -1; }
+static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
+static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
+static inline void kvmppc_xive_init_module(void) { }
+static inline void kvmppc_xive_exit_module(void) { }
+
+static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+					 struct irq_desc *host_desc) { return -ENODEV; }
+static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+					 struct irq_desc *host_desc) { return -ENODEV; }
+static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
+static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
+
+static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+				      int level, bool line_status) { return -ENODEV; }
+#endif /* CONFIG_KVM_XIVE */
+
+/*
+ * Prototypes for functions called only from assembler code.
+ * Having prototypes reduces sparse errors.
+ */
+long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+			 unsigned long ioba, unsigned long tce);
+long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+				  unsigned long liobn, unsigned long ioba,
+				  unsigned long tce_list, unsigned long npages);
+long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+			   unsigned long liobn, unsigned long ioba,
+			   unsigned long tce_value, unsigned long npages);
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+                            unsigned int yield_count);
+long kvmppc_h_random(struct kvm_vcpu *vcpu);
+void kvmhv_commence_exit(int trap);
+long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
+void kvmppc_subcore_enter_guest(void);
+void kvmppc_subcore_exit_guest(void);
+long kvmppc_realmode_hmi_handler(void);
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+                    long pte_index, unsigned long pteh, unsigned long ptel);
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+                     unsigned long pte_index, unsigned long avpn);
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+                      unsigned long pte_index, unsigned long avpn,
+                      unsigned long va);
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+                   unsigned long pte_index);
+long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
+                        unsigned long pte_index);
+long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
+                        unsigned long pte_index);
+long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
+                          unsigned long slb_v, unsigned int status, bool data);
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+                    unsigned long mfrr);
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+/*
+ * Host-side operations we want to set up while running in real
+ * mode in the guest operating on the xics.
+ * Currently only VCPU wakeup is supported.
+ */
+
+union kvmppc_rm_state {
+	unsigned long raw;
+	struct {
+		u32 in_host;
+		u32 rm_action;
+	};
+};
+
+struct kvmppc_host_rm_core {
+	union kvmppc_rm_state rm_state;
+	void *rm_data;
+	char pad[112];
+};
+
+struct kvmppc_host_rm_ops {
+	struct kvmppc_host_rm_core	*rm_core;
+	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
+};
+
+extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
+
+static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+	return mfspr(SPRN_GEPR);
+#elif defined(CONFIG_BOOKE)
+	return vcpu->arch.epr;
+#else
+	return 0;
+#endif
+}
+
+static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+	mtspr(SPRN_GEPR, epr);
+#elif defined(CONFIG_BOOKE)
+	vcpu->arch.epr = epr;
+#endif
+}
+
+#ifdef CONFIG_KVM_MPIC
+
+void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
+int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
+			     u32 cpu);
+void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
+
+#else
+
+static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
+		struct kvm_vcpu *vcpu, u32 cpu)
+{
+	return -EINVAL;
+}
+
+static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
+		struct kvm_vcpu *vcpu)
+{
+}
+
+#endif /* CONFIG_KVM_MPIC */
+
+int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
+			      struct kvm_config_tlb *cfg);
+int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
+			     struct kvm_dirty_tlb *cfg);
+
+long kvmppc_alloc_lpid(void);
+void kvmppc_claim_lpid(long lpid);
+void kvmppc_free_lpid(long lpid);
+void kvmppc_init_lpid(unsigned long nr_lpids);
+
+static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
+{
+	struct page *page;
+	/*
+	 * We can only access pages that the kernel maps
+	 * as memory. Bail out for unmapped ones.
+	 */
+	if (!pfn_valid(pfn))
+		return;
+
+	/* Clear i-cache for new pages */
+	page = pfn_to_page(pfn);
+	if (!test_bit(PG_arch_1, &page->flags)) {
+		flush_dcache_icache_page(page);
+		set_bit(PG_arch_1, &page->flags);
+	}
+}
+
+/*
+ * Shared struct helpers. The shared struct can be little or big endian,
+ * depending on the guest endianness. So expose helpers to all of them.
+ */
+static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+	/* Only Book3S_64 PR supports bi-endian for now */
+	return vcpu->arch.shared_big_endian;
+#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
+	/* Book3s_64 HV on little endian is always little endian */
+	return false;
+#else
+	return true;
+#endif
+}
+
+#define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
+static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
+{									\
+	return mfspr(bookehv_spr);					\
+}									\
+
+#define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
+{									\
+	mtspr(bookehv_spr, val);						\
+}									\
+
+#define SHARED_WRAPPER_GET(reg, size)					\
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
+{									\
+	if (kvmppc_shared_big_endian(vcpu))				\
+	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
+	else								\
+	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
+}									\
+
+#define SHARED_WRAPPER_SET(reg, size)					\
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
+{									\
+	if (kvmppc_shared_big_endian(vcpu))				\
+	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
+	else								\
+	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
+}									\
+
+#define SHARED_WRAPPER(reg, size)					\
+	SHARED_WRAPPER_GET(reg, size)					\
+	SHARED_WRAPPER_SET(reg, size)					\
+
+#define SPRNG_WRAPPER(reg, bookehv_spr)					\
+	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
+	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
+
+#ifdef CONFIG_KVM_BOOKE_HV
+
+#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
+	SPRNG_WRAPPER(reg, bookehv_spr)					\
+
+#else
+
+#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
+	SHARED_WRAPPER(reg, size)					\
+
+#endif
+
+SHARED_WRAPPER(critical, 64)
+SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
+SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
+SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
+SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
+SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
+SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
+SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
+SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
+SHARED_WRAPPER_GET(msr, 64)
+static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
+{
+	if (kvmppc_shared_big_endian(vcpu))
+	       vcpu->arch.shared->msr = cpu_to_be64(val);
+	else
+	       vcpu->arch.shared->msr = cpu_to_le64(val);
+}
+SHARED_WRAPPER(dsisr, 32)
+SHARED_WRAPPER(int_pending, 32)
+SHARED_WRAPPER(sprg4, 64)
+SHARED_WRAPPER(sprg5, 64)
+SHARED_WRAPPER(sprg6, 64)
+SHARED_WRAPPER(sprg7, 64)
+
+static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
+{
+	if (kvmppc_shared_big_endian(vcpu))
+	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
+	else
+	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
+}
+
+static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
+{
+	if (kvmppc_shared_big_endian(vcpu))
+	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
+	else
+	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
+}
+
+/*
+ * Please call after prepare_to_enter. This function puts the lazy ee and irq
+ * disabled tracking state back to normal mode, without actually enabling
+ * interrupts.
+ */
+static inline void kvmppc_fix_ee_before_entry(void)
+{
+	trace_hardirqs_on();
+
+#ifdef CONFIG_PPC64
+	/*
+	 * To avoid races, the caller must have gone directly from having
+	 * interrupts fully-enabled to hard-disabled.
+	 */
+	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+
+	/* Only need to enable IRQs by hard enabling them after this */
+	local_paca->irq_happened = 0;
+	irq_soft_mask_set(IRQS_ENABLED);
+#endif
+}
+
+static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
+{
+	ulong ea;
+	ulong msr_64bit = 0;
+
+	ea = kvmppc_get_gpr(vcpu, rb);
+	if (ra)
+		ea += kvmppc_get_gpr(vcpu, ra);
+
+#if defined(CONFIG_PPC_BOOK3E_64)
+	msr_64bit = MSR_CM;
+#elif defined(CONFIG_PPC_BOOK3S_64)
+	msr_64bit = MSR_SF;
+#endif
+
+	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
+		ea = (uint32_t)ea;
+
+	return ea;
+}
+
+extern void xics_wake_cpu(int cpu);
+
+#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/libata-portmap.h b/arch/powerpc/include/asm/libata-portmap.h
new file mode 100644
index 0000000..7c602da
--- /dev/null
+++ b/arch/powerpc/include/asm/libata-portmap.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_LIBATA_PORTMAP_H
+#define __ASM_POWERPC_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_IRQ(dev)	pci_get_legacy_ide_irq(dev, 0)
+
+#define ATA_SECONDARY_IRQ(dev)	pci_get_legacy_ide_irq(dev, 1)
+
+#endif
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
new file mode 100644
index 0000000..1f00d28
--- /dev/null
+++ b/arch/powerpc/include/asm/linkage.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_LINKAGE_H
+#define _ASM_POWERPC_LINKAGE_H
+
+#include <asm/types.h>
+
+#ifdef PPC64_ELF_ABI_v1
+#define cond_syscall(x) \
+	asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n"		\
+	     "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
+#define SYSCALL_ALIAS(alias, name)					\
+	asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n"	\
+	     "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
+#endif
+
+#endif	/* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
new file mode 100644
index 0000000..47a03b9
--- /dev/null
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -0,0 +1,55 @@
+/*
+ * livepatch.h - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2015-2016, SUSE, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _ASM_POWERPC_LIVEPATCH_H
+#define _ASM_POWERPC_LIVEPATCH_H
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+	return 0;
+}
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->nip = ip;
+}
+
+#define klp_get_ftrace_location klp_get_ftrace_location
+static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
+{
+	/*
+	 * Live patch works only with -mprofile-kernel on PPC. In this case,
+	 * the ftrace location is always within the first 16 bytes.
+	 */
+	return ftrace_location_range(faddr, faddr + 16);
+}
+
+static inline void klp_init_thread_info(struct thread_info *ti)
+{
+	/* + 1 to account for STACK_END_MAGIC */
+	ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
+}
+#else
+static void klp_init_thread_info(struct thread_info *ti) { }
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
new file mode 100644
index 0000000..fdd0093
--- /dev/null
+++ b/arch/powerpc/include/asm/local.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARCH_POWERPC_LOCAL_H
+#define _ARCH_POWERPC_LOCAL_H
+
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#include <linux/percpu.h>
+#include <linux/atomic.h>
+#include <linux/irqflags.h>
+
+#include <asm/hw_irq.h>
+
+typedef struct
+{
+	long v;
+} local_t;
+
+#define LOCAL_INIT(i)	{ (i) }
+
+static __inline__ long local_read(local_t *l)
+{
+	return READ_ONCE(l->v);
+}
+
+static __inline__ void local_set(local_t *l, long i)
+{
+	WRITE_ONCE(l->v, i);
+}
+
+#define LOCAL_OP(op, c_op)						\
+static __inline__ void local_##op(long i, local_t *l)			\
+{									\
+	unsigned long flags;						\
+									\
+	powerpc_local_irq_pmu_save(flags);				\
+	l->v c_op i;						\
+	powerpc_local_irq_pmu_restore(flags);				\
+}
+
+#define LOCAL_OP_RETURN(op, c_op)					\
+static __inline__ long local_##op##_return(long a, local_t *l)		\
+{									\
+	long t;								\
+	unsigned long flags;						\
+									\
+	powerpc_local_irq_pmu_save(flags);				\
+	t = (l->v c_op a);						\
+	powerpc_local_irq_pmu_restore(flags);				\
+									\
+	return t;							\
+}
+
+#define LOCAL_OPS(op, c_op)		\
+	LOCAL_OP(op, c_op)		\
+	LOCAL_OP_RETURN(op, c_op)
+
+LOCAL_OPS(add, +=)
+LOCAL_OPS(sub, -=)
+
+#define local_add_negative(a, l)	(local_add_return((a), (l)) < 0)
+#define local_inc_return(l)		local_add_return(1LL, l)
+#define local_inc(l)			local_inc_return(l)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l)		(local_inc_return(l) == 0)
+
+#define local_dec_return(l)		local_sub_return(1LL, l)
+#define local_dec(l)			local_dec_return(l)
+#define local_sub_and_test(a, l)	(local_sub_return((a), (l)) == 0)
+#define local_dec_and_test(l)		(local_dec_return((l)) == 0)
+
+static __inline__ long local_cmpxchg(local_t *l, long o, long n)
+{
+	long t;
+	unsigned long flags;
+
+	powerpc_local_irq_pmu_save(flags);
+	t = l->v;
+	if (t == o)
+		l->v = n;
+	powerpc_local_irq_pmu_restore(flags);
+
+	return t;
+}
+
+static __inline__ long local_xchg(local_t *l, long n)
+{
+	long t;
+	unsigned long flags;
+
+	powerpc_local_irq_pmu_save(flags);
+	t = l->v;
+	l->v = n;
+	powerpc_local_irq_pmu_restore(flags);
+
+	return t;
+}
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+static __inline__ int local_add_unless(local_t *l, long a, long u)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	powerpc_local_irq_pmu_save(flags);
+	if (l->v != u) {
+		l->v += a;
+		ret = 1;
+	}
+	powerpc_local_irq_pmu_restore(flags);
+
+	return ret;
+}
+
+#define local_inc_not_zero(l)		local_add_unless((l), 1, 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations.  Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l)		((l)->v++)
+#define __local_dec(l)		((l)->v++)
+#define __local_add(i,l)	((l)->v+=(i))
+#define __local_sub(i,l)	((l)->v-=(i))
+
+#else /* CONFIG_PPC64 */
+
+#include <asm-generic/local.h>
+
+#endif /* CONFIG_PPC64 */
+
+#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
new file mode 100644
index 0000000..7c23ce8
--- /dev/null
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -0,0 +1,169 @@
+/*
+ * lppaca.h
+ * Copyright (C) 2001  Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_LPPACA_H
+#define _ASM_POWERPC_LPPACA_H
+#ifdef __KERNEL__
+
+/*
+ * These definitions relate to hypervisors that only exist when using
+ * a server type processor
+ */
+#ifdef CONFIG_PPC_BOOK3S
+
+/*
+ * This control block contains the data that is shared between the
+ * hypervisor and the OS.
+ */
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <asm/types.h>
+#include <asm/mmu.h>
+#include <asm/firmware.h>
+
+/*
+ * The lppaca is the "virtual processor area" registered with the hypervisor,
+ * H_REGISTER_VPA etc.
+ *
+ * According to PAPR, the structure is 640 bytes long, must be L1 cache line
+ * aligned, and must not cross a 4kB boundary. Its size field must be at
+ * least 640 bytes (but may be more).
+ *
+ * Pre-v4.14 KVM hypervisors reject the VPA if its size field is smaller than
+ * 1kB, so we dynamically allocate 1kB and advertise size as 1kB, but keep
+ * this structure as the canonical 640 byte size.
+ */
+struct lppaca {
+	/* cacheline 1 contains read-only data */
+
+	__be32	desc;			/* Eye catcher 0xD397D781 */
+	__be16	size;			/* Size of this struct */
+	u8	reserved1[3];
+	u8	__old_status;		/* Old status, including shared proc */
+	u8	reserved3[14];
+	volatile __be32 dyn_hw_node_id;	/* Dynamic hardware node id */
+	volatile __be32 dyn_hw_proc_id;	/* Dynamic hardware proc id */
+	u8	reserved4[56];
+	volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
+					  /* associativity change counters */
+	u8	reserved5[32];
+
+	/* cacheline 2 contains local read-write data */
+
+	u8	reserved6[48];
+	u8	cede_latency_hint;
+	u8	ebb_regs_in_use;
+	u8	reserved7[6];
+	u8	dtl_enable_mask;	/* Dispatch Trace Log mask */
+	u8	donate_dedicated_cpu;	/* Donate dedicated CPU cycles */
+	u8	fpregs_in_use;
+	u8	pmcregs_in_use;
+	u8	reserved8[28];
+	__be64	wait_state_cycles;	/* Wait cycles for this proc */
+	u8	reserved9[28];
+	__be16	slb_count;		/* # of SLBs to maintain */
+	u8	idle;			/* Indicate OS is idle */
+	u8	vmxregs_in_use;
+
+	/* cacheline 3 is shared with other processors */
+
+	/*
+	 * This is the yield_count.  An "odd" value (low bit on) means that
+	 * the processor is yielded (either because of an OS yield or a
+	 * hypervisor preempt).  An even value implies that the processor is
+	 * currently executing.
+	 * NOTE: Even dedicated processor partitions can yield so this
+	 * field cannot be used to determine if we are shared or dedicated.
+	 */
+	volatile __be32 yield_count;
+	volatile __be32 dispersion_count; /* dispatch changed physical cpu */
+	volatile __be64 cmo_faults;	/* CMO page fault count */
+	volatile __be64 cmo_fault_time;	/* CMO page fault time */
+	u8	reserved10[104];
+
+	/* cacheline 4-5 */
+
+	__be32	page_ins;		/* CMO Hint - # page ins by OS */
+	u8	reserved11[148];
+	volatile __be64 dtl_idx;	/* Dispatch Trace Log head index */
+	u8	reserved12[96];
+} ____cacheline_aligned;
+
+#define lppaca_of(cpu)	(*paca_ptrs[cpu]->lppaca_ptr)
+
+/*
+ * We are using a non architected field to determine if a partition is
+ * shared or dedicated. This currently works on both KVM and PHYP, but
+ * we will have to transition to something better.
+ */
+#define LPPACA_OLD_SHARED_PROC		2
+
+static inline bool lppaca_shared_proc(struct lppaca *l)
+{
+	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+		return false;
+	return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
+}
+
+/*
+ * SLB shadow buffer structure as defined in the PAPR.  The save_area
+ * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
+ * ESID is stored in the lower 64bits, then the VSID.
+ */
+struct slb_shadow {
+	__be32	persistent;		/* Number of persistent SLBs */
+	__be32	buffer_length;		/* Total shadow buffer length */
+	__be64	reserved;
+	struct	{
+		__be64     esid;
+		__be64	vsid;
+	} save_area[SLB_NUM_BOLTED];
+} ____cacheline_aligned;
+
+/*
+ * Layout of entries in the hypervisor's dispatch trace log buffer.
+ */
+struct dtl_entry {
+	u8	dispatch_reason;
+	u8	preempt_reason;
+	__be16	processor_id;
+	__be32	enqueue_to_dispatch_time;
+	__be32	ready_to_enqueue_time;
+	__be32	waiting_to_ready_time;
+	__be64	timebase;
+	__be64	fault_addr;
+	__be64	srr0;
+	__be64	srr1;
+};
+
+#define DISPATCH_LOG_BYTES	4096	/* bytes per cpu */
+#define N_DISPATCH_LOG		(DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+
+extern struct kmem_cache *dtl_cache;
+
+/*
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
+ * reading from the dispatch trace log.  If other code wants to consume
+ * DTL entries, it can set this pointer to a function that will get
+ * called once for each DTL entry that gets processed.
+ */
+extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
+
+#endif /* CONFIG_PPC_BOOK3S */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
new file mode 100644
index 0000000..f511767
--- /dev/null
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -0,0 +1,349 @@
+/*
+ *  PS3 hvcall interface.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *  Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_ASM_POWERPC_LV1CALL_H)
+#define _ASM_POWERPC_LV1CALL_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/types.h>
+#include <linux/export.h>
+
+/* lv1 call declaration macros */
+
+#define LV1_1_IN_ARG_DECL u64 in_1
+#define LV1_2_IN_ARG_DECL LV1_1_IN_ARG_DECL, u64 in_2
+#define LV1_3_IN_ARG_DECL LV1_2_IN_ARG_DECL, u64 in_3
+#define LV1_4_IN_ARG_DECL LV1_3_IN_ARG_DECL, u64 in_4
+#define LV1_5_IN_ARG_DECL LV1_4_IN_ARG_DECL, u64 in_5
+#define LV1_6_IN_ARG_DECL LV1_5_IN_ARG_DECL, u64 in_6
+#define LV1_7_IN_ARG_DECL LV1_6_IN_ARG_DECL, u64 in_7
+#define LV1_8_IN_ARG_DECL LV1_7_IN_ARG_DECL, u64 in_8
+#define LV1_1_OUT_ARG_DECL u64 *out_1
+#define LV1_2_OUT_ARG_DECL LV1_1_OUT_ARG_DECL, u64 *out_2
+#define LV1_3_OUT_ARG_DECL LV1_2_OUT_ARG_DECL, u64 *out_3
+#define LV1_4_OUT_ARG_DECL LV1_3_OUT_ARG_DECL, u64 *out_4
+#define LV1_5_OUT_ARG_DECL LV1_4_OUT_ARG_DECL, u64 *out_5
+#define LV1_6_OUT_ARG_DECL LV1_5_OUT_ARG_DECL, u64 *out_6
+#define LV1_7_OUT_ARG_DECL LV1_6_OUT_ARG_DECL, u64 *out_7
+
+#define LV1_0_IN_0_OUT_ARG_DECL void
+#define LV1_1_IN_0_OUT_ARG_DECL LV1_1_IN_ARG_DECL
+#define LV1_2_IN_0_OUT_ARG_DECL LV1_2_IN_ARG_DECL
+#define LV1_3_IN_0_OUT_ARG_DECL LV1_3_IN_ARG_DECL
+#define LV1_4_IN_0_OUT_ARG_DECL LV1_4_IN_ARG_DECL
+#define LV1_5_IN_0_OUT_ARG_DECL LV1_5_IN_ARG_DECL
+#define LV1_6_IN_0_OUT_ARG_DECL LV1_6_IN_ARG_DECL
+#define LV1_7_IN_0_OUT_ARG_DECL LV1_7_IN_ARG_DECL
+
+#define LV1_0_IN_1_OUT_ARG_DECL                    LV1_1_OUT_ARG_DECL
+#define LV1_1_IN_1_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_2_IN_1_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_3_IN_1_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_4_IN_1_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_5_IN_1_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_6_IN_1_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_7_IN_1_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_8_IN_1_OUT_ARG_DECL LV1_8_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+
+#define LV1_0_IN_2_OUT_ARG_DECL                    LV1_2_OUT_ARG_DECL
+#define LV1_1_IN_2_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_2_IN_2_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_3_IN_2_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_4_IN_2_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_5_IN_2_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_6_IN_2_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_7_IN_2_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+
+#define LV1_0_IN_3_OUT_ARG_DECL                    LV1_3_OUT_ARG_DECL
+#define LV1_1_IN_3_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_2_IN_3_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_3_IN_3_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_4_IN_3_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_5_IN_3_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_6_IN_3_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_7_IN_3_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+
+#define LV1_0_IN_4_OUT_ARG_DECL                    LV1_4_OUT_ARG_DECL
+#define LV1_1_IN_4_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_2_IN_4_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_3_IN_4_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_4_IN_4_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_5_IN_4_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_6_IN_4_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_7_IN_4_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+
+#define LV1_0_IN_5_OUT_ARG_DECL                    LV1_5_OUT_ARG_DECL
+#define LV1_1_IN_5_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_2_IN_5_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_3_IN_5_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_4_IN_5_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_5_IN_5_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_6_IN_5_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_7_IN_5_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+
+#define LV1_0_IN_6_OUT_ARG_DECL                    LV1_6_OUT_ARG_DECL
+#define LV1_1_IN_6_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_2_IN_6_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_3_IN_6_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_4_IN_6_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_5_IN_6_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_6_IN_6_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_7_IN_6_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+
+#define LV1_0_IN_7_OUT_ARG_DECL                    LV1_7_OUT_ARG_DECL
+#define LV1_1_IN_7_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_2_IN_7_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_3_IN_7_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_4_IN_7_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_5_IN_7_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_6_IN_7_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_7_IN_7_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+
+#define LV1_1_IN_ARGS in_1
+#define LV1_2_IN_ARGS LV1_1_IN_ARGS, in_2
+#define LV1_3_IN_ARGS LV1_2_IN_ARGS, in_3
+#define LV1_4_IN_ARGS LV1_3_IN_ARGS, in_4
+#define LV1_5_IN_ARGS LV1_4_IN_ARGS, in_5
+#define LV1_6_IN_ARGS LV1_5_IN_ARGS, in_6
+#define LV1_7_IN_ARGS LV1_6_IN_ARGS, in_7
+#define LV1_8_IN_ARGS LV1_7_IN_ARGS, in_8
+
+#define LV1_1_OUT_ARGS out_1
+#define LV1_2_OUT_ARGS LV1_1_OUT_ARGS, out_2
+#define LV1_3_OUT_ARGS LV1_2_OUT_ARGS, out_3
+#define LV1_4_OUT_ARGS LV1_3_OUT_ARGS, out_4
+#define LV1_5_OUT_ARGS LV1_4_OUT_ARGS, out_5
+#define LV1_6_OUT_ARGS LV1_5_OUT_ARGS, out_6
+#define LV1_7_OUT_ARGS LV1_6_OUT_ARGS, out_7
+
+#define LV1_0_IN_0_OUT_ARGS
+#define LV1_1_IN_0_OUT_ARGS LV1_1_IN_ARGS
+#define LV1_2_IN_0_OUT_ARGS LV1_2_IN_ARGS
+#define LV1_3_IN_0_OUT_ARGS LV1_3_IN_ARGS
+#define LV1_4_IN_0_OUT_ARGS LV1_4_IN_ARGS
+#define LV1_5_IN_0_OUT_ARGS LV1_5_IN_ARGS
+#define LV1_6_IN_0_OUT_ARGS LV1_6_IN_ARGS
+#define LV1_7_IN_0_OUT_ARGS LV1_7_IN_ARGS
+
+#define LV1_0_IN_1_OUT_ARGS                LV1_1_OUT_ARGS
+#define LV1_1_IN_1_OUT_ARGS LV1_1_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_2_IN_1_OUT_ARGS LV1_2_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_3_IN_1_OUT_ARGS LV1_3_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_4_IN_1_OUT_ARGS LV1_4_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_5_IN_1_OUT_ARGS LV1_5_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_6_IN_1_OUT_ARGS LV1_6_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_7_IN_1_OUT_ARGS LV1_7_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_8_IN_1_OUT_ARGS LV1_8_IN_ARGS, LV1_1_OUT_ARGS
+
+#define LV1_0_IN_2_OUT_ARGS                LV1_2_OUT_ARGS
+#define LV1_1_IN_2_OUT_ARGS LV1_1_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_2_IN_2_OUT_ARGS LV1_2_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_3_IN_2_OUT_ARGS LV1_3_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_4_IN_2_OUT_ARGS LV1_4_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_5_IN_2_OUT_ARGS LV1_5_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_6_IN_2_OUT_ARGS LV1_6_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_7_IN_2_OUT_ARGS LV1_7_IN_ARGS, LV1_2_OUT_ARGS
+
+#define LV1_0_IN_3_OUT_ARGS                LV1_3_OUT_ARGS
+#define LV1_1_IN_3_OUT_ARGS LV1_1_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_2_IN_3_OUT_ARGS LV1_2_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_3_IN_3_OUT_ARGS LV1_3_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_4_IN_3_OUT_ARGS LV1_4_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_5_IN_3_OUT_ARGS LV1_5_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_6_IN_3_OUT_ARGS LV1_6_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_7_IN_3_OUT_ARGS LV1_7_IN_ARGS, LV1_3_OUT_ARGS
+
+#define LV1_0_IN_4_OUT_ARGS                LV1_4_OUT_ARGS
+#define LV1_1_IN_4_OUT_ARGS LV1_1_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_2_IN_4_OUT_ARGS LV1_2_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_3_IN_4_OUT_ARGS LV1_3_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_4_IN_4_OUT_ARGS LV1_4_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_5_IN_4_OUT_ARGS LV1_5_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_6_IN_4_OUT_ARGS LV1_6_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_7_IN_4_OUT_ARGS LV1_7_IN_ARGS, LV1_4_OUT_ARGS
+
+#define LV1_0_IN_5_OUT_ARGS                LV1_5_OUT_ARGS
+#define LV1_1_IN_5_OUT_ARGS LV1_1_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_2_IN_5_OUT_ARGS LV1_2_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_3_IN_5_OUT_ARGS LV1_3_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_4_IN_5_OUT_ARGS LV1_4_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_5_IN_5_OUT_ARGS LV1_5_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_6_IN_5_OUT_ARGS LV1_6_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_7_IN_5_OUT_ARGS LV1_7_IN_ARGS, LV1_5_OUT_ARGS
+
+#define LV1_0_IN_6_OUT_ARGS                LV1_6_OUT_ARGS
+#define LV1_1_IN_6_OUT_ARGS LV1_1_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_2_IN_6_OUT_ARGS LV1_2_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_3_IN_6_OUT_ARGS LV1_3_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_4_IN_6_OUT_ARGS LV1_4_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_5_IN_6_OUT_ARGS LV1_5_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_6_IN_6_OUT_ARGS LV1_6_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_7_IN_6_OUT_ARGS LV1_7_IN_ARGS, LV1_6_OUT_ARGS
+
+#define LV1_0_IN_7_OUT_ARGS                LV1_7_OUT_ARGS
+#define LV1_1_IN_7_OUT_ARGS LV1_1_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_2_IN_7_OUT_ARGS LV1_2_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_3_IN_7_OUT_ARGS LV1_3_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_4_IN_7_OUT_ARGS LV1_4_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_5_IN_7_OUT_ARGS LV1_5_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_6_IN_7_OUT_ARGS LV1_6_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_7_IN_7_OUT_ARGS LV1_7_IN_ARGS, LV1_7_OUT_ARGS
+
+/*
+ * This LV1_CALL() macro is for use by callers.  It expands into an
+ * inline call wrapper and an underscored HV call declaration.  The
+ * wrapper can be used to instrument the lv1 call interface.  The
+ * file lv1call.S defines its own LV1_CALL() macro to expand into
+ * the actual underscored call definition.
+ */
+
+#if !defined(LV1_CALL)
+#define LV1_CALL(name, in, out, num)                               \
+  extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL);      \
+  static inline int lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL) \
+    {return _lv1_##name(LV1_##in##_IN_##out##_OUT_ARGS);}
+#endif
+
+#endif /* !defined(__ASSEMBLY__) */
+
+/* lv1 call table */
+
+LV1_CALL(allocate_memory,                               4, 2,   0 )
+LV1_CALL(write_htab_entry,                              4, 0,   1 )
+LV1_CALL(construct_virtual_address_space,               3, 2,   2 )
+LV1_CALL(invalidate_htab_entries,                       5, 0,   3 )
+LV1_CALL(get_virtual_address_space_id_of_ppe,           0, 1,   4 )
+LV1_CALL(query_logical_partition_address_region_info,   1, 5,   6 )
+LV1_CALL(select_virtual_address_space,                  1, 0,   7 )
+LV1_CALL(pause,                                         1, 0,   9 )
+LV1_CALL(destruct_virtual_address_space,                1, 0,  10 )
+LV1_CALL(configure_irq_state_bitmap,                    3, 0,  11 )
+LV1_CALL(connect_irq_plug_ext,                          5, 0,  12 )
+LV1_CALL(release_memory,                                1, 0,  13 )
+LV1_CALL(put_iopte,                                     5, 0,  15 )
+LV1_CALL(disconnect_irq_plug_ext,                       3, 0,  17 )
+LV1_CALL(construct_event_receive_port,                  0, 1,  18 )
+LV1_CALL(destruct_event_receive_port,                   1, 0,  19 )
+LV1_CALL(send_event_locally,                            1, 0,  24 )
+LV1_CALL(end_of_interrupt,                              1, 0,  27 )
+LV1_CALL(connect_irq_plug,                              2, 0,  28 )
+LV1_CALL(disconnect_irq_plug,                           1, 0,  29 )
+LV1_CALL(end_of_interrupt_ext,                          3, 0,  30 )
+LV1_CALL(did_update_interrupt_mask,                     2, 0,  31 )
+LV1_CALL(shutdown_logical_partition,                    1, 0,  44 )
+LV1_CALL(destruct_logical_spe,                          1, 0,  54 )
+LV1_CALL(construct_logical_spe,                         7, 6,  57 )
+LV1_CALL(set_spe_interrupt_mask,                        3, 0,  61 )
+LV1_CALL(set_spe_transition_notifier,                   3, 0,  64 )
+LV1_CALL(disable_logical_spe,                           2, 0,  65 )
+LV1_CALL(clear_spe_interrupt_status,                    4, 0,  66 )
+LV1_CALL(get_spe_interrupt_status,                      2, 1,  67 )
+LV1_CALL(get_logical_ppe_id,                            0, 1,  69 )
+LV1_CALL(set_interrupt_mask,                            5, 0,  73 )
+LV1_CALL(get_logical_partition_id,                      0, 1,  74 )
+LV1_CALL(configure_execution_time_variable,             1, 0,  77 )
+LV1_CALL(get_spe_irq_outlet,                            2, 1,  78 )
+LV1_CALL(set_spe_privilege_state_area_1_register,       3, 0,  79 )
+LV1_CALL(create_repository_node,                        6, 0,  90 )
+LV1_CALL(read_repository_node,                          5, 2,  91 )
+LV1_CALL(write_repository_node,                         6, 0,  92 )
+LV1_CALL(delete_repository_node,                        4, 0,  93 )
+LV1_CALL(read_htab_entries,                             2, 5,  95 )
+LV1_CALL(set_dabr,                                      2, 0,  96 )
+LV1_CALL(get_total_execution_time,                      2, 1, 103 )
+LV1_CALL(allocate_io_segment,                           3, 1, 116 )
+LV1_CALL(release_io_segment,                            2, 0, 117 )
+LV1_CALL(construct_io_irq_outlet,                       1, 1, 120 )
+LV1_CALL(destruct_io_irq_outlet,                        1, 0, 121 )
+LV1_CALL(map_htab,                                      1, 1, 122 )
+LV1_CALL(unmap_htab,                                    1, 0, 123 )
+LV1_CALL(get_version_info,                              0, 2, 127 )
+LV1_CALL(insert_htab_entry,                             6, 3, 158 )
+LV1_CALL(read_virtual_uart,                             3, 1, 162 )
+LV1_CALL(write_virtual_uart,                            3, 1, 163 )
+LV1_CALL(set_virtual_uart_param,                        3, 0, 164 )
+LV1_CALL(get_virtual_uart_param,                        2, 1, 165 )
+LV1_CALL(configure_virtual_uart_irq,                    1, 1, 166 )
+LV1_CALL(open_device,                                   3, 0, 170 )
+LV1_CALL(close_device,                                  2, 0, 171 )
+LV1_CALL(map_device_mmio_region,                        5, 1, 172 )
+LV1_CALL(unmap_device_mmio_region,                      3, 0, 173 )
+LV1_CALL(allocate_device_dma_region,                    5, 1, 174 )
+LV1_CALL(free_device_dma_region,                        3, 0, 175 )
+LV1_CALL(map_device_dma_region,                         6, 0, 176 )
+LV1_CALL(unmap_device_dma_region,                       4, 0, 177 )
+LV1_CALL(net_add_multicast_address,                     4, 0, 185 )
+LV1_CALL(net_remove_multicast_address,                  4, 0, 186 )
+LV1_CALL(net_start_tx_dma,                              4, 0, 187 )
+LV1_CALL(net_stop_tx_dma,                               2, 0, 188 )
+LV1_CALL(net_start_rx_dma,                              4, 0, 189 )
+LV1_CALL(net_stop_rx_dma,                               2, 0, 190 )
+LV1_CALL(net_set_interrupt_status_indicator,            4, 0, 191 )
+LV1_CALL(net_set_interrupt_mask,                        4, 0, 193 )
+LV1_CALL(net_control,                                   6, 2, 194 )
+LV1_CALL(connect_interrupt_event_receive_port,          4, 0, 197 )
+LV1_CALL(disconnect_interrupt_event_receive_port,       4, 0, 198 )
+LV1_CALL(get_spe_all_interrupt_statuses,                1, 1, 199 )
+LV1_CALL(deconfigure_virtual_uart_irq,                  0, 0, 202 )
+LV1_CALL(enable_logical_spe,                            2, 0, 207 )
+LV1_CALL(gpu_open,                                      1, 0, 210 )
+LV1_CALL(gpu_close,                                     0, 0, 211 )
+LV1_CALL(gpu_device_map,                                1, 2, 212 )
+LV1_CALL(gpu_device_unmap,                              1, 0, 213 )
+LV1_CALL(gpu_memory_allocate,                           5, 2, 214 )
+LV1_CALL(gpu_memory_free,                               1, 0, 216 )
+LV1_CALL(gpu_context_allocate,                          2, 5, 217 )
+LV1_CALL(gpu_context_free,                              1, 0, 218 )
+LV1_CALL(gpu_context_iomap,                             5, 0, 221 )
+LV1_CALL(gpu_context_attribute,                         6, 0, 225 )
+LV1_CALL(gpu_context_intr,                              1, 1, 227 )
+LV1_CALL(gpu_attribute,                                 3, 0, 228 )
+LV1_CALL(get_rtc,                                       0, 2, 232 )
+LV1_CALL(set_ppe_periodic_tracer_frequency,             1, 0, 240 )
+LV1_CALL(start_ppe_periodic_tracer,                     5, 0, 241 )
+LV1_CALL(stop_ppe_periodic_tracer,                      1, 1, 242 )
+LV1_CALL(storage_read,                                  6, 1, 245 )
+LV1_CALL(storage_write,                                 6, 1, 246 )
+LV1_CALL(storage_send_device_command,                   6, 1, 248 )
+LV1_CALL(storage_get_async_status,                      1, 2, 249 )
+LV1_CALL(storage_check_async_status,                    2, 1, 254 )
+LV1_CALL(panic,                                         1, 0, 255 )
+LV1_CALL(construct_lpm,                                 6, 3, 140 )
+LV1_CALL(destruct_lpm,                                  1, 0, 141 )
+LV1_CALL(start_lpm,                                     1, 0, 142 )
+LV1_CALL(stop_lpm,                                      1, 1, 143 )
+LV1_CALL(copy_lpm_trace_buffer,                         3, 1, 144 )
+LV1_CALL(add_lpm_event_bookmark,                        5, 0, 145 )
+LV1_CALL(delete_lpm_event_bookmark,                     3, 0, 146 )
+LV1_CALL(set_lpm_interrupt_mask,                        3, 1, 147 )
+LV1_CALL(get_lpm_interrupt_status,                      1, 1, 148 )
+LV1_CALL(set_lpm_general_control,                       5, 2, 149 )
+LV1_CALL(set_lpm_interval,                              3, 1, 150 )
+LV1_CALL(set_lpm_trigger_control,                       3, 1, 151 )
+LV1_CALL(set_lpm_counter_control,                       4, 1, 152 )
+LV1_CALL(set_lpm_group_control,                         3, 1, 153 )
+LV1_CALL(set_lpm_debug_bus_control,                     3, 1, 154 )
+LV1_CALL(set_lpm_counter,                               5, 2, 155 )
+LV1_CALL(set_lpm_signal,                                7, 0, 156 )
+LV1_CALL(set_lpm_spr_trigger,                           2, 0, 157 )
+
+#endif
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
new file mode 100644
index 0000000..a47de82
--- /dev/null
+++ b/arch/powerpc/include/asm/machdep.h
@@ -0,0 +1,311 @@
+#ifndef _ASM_POWERPC_MACHDEP_H
+#define _ASM_POWERPC_MACHDEP_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+
+#include <asm/setup.h>
+
+/* We export this macro for external modules like Alsa to know if
+ * ppc_md.feature_call is implemented or not
+ */
+#define CONFIG_PPC_HAS_FEATURE_CALLS
+
+struct pt_regs;
+struct pci_bus;	
+struct device_node;
+struct iommu_table;
+struct rtc_time;
+struct file;
+struct pci_controller;
+struct kimage;
+struct pci_host_bridge;
+
+struct machdep_calls {
+	char		*name;
+#ifdef CONFIG_PPC64
+	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,
+				   unsigned long flags, void *caller);
+	void		(*iounmap)(volatile void __iomem *token);
+
+#ifdef CONFIG_PM
+	void		(*iommu_save)(void);
+	void		(*iommu_restore)(void);
+#endif
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+	unsigned long	(*memory_block_size)(void);
+#endif
+#endif /* CONFIG_PPC64 */
+
+	/* Platform set_dma_mask and dma_get_required_mask overrides */
+	int		(*dma_set_mask)(struct device *dev, u64 dma_mask);
+	u64		(*dma_get_required_mask)(struct device *dev);
+
+	int		(*probe)(void);
+	void		(*setup_arch)(void); /* Optional, may be NULL */
+	/* Optional, may be NULL. */
+	void		(*show_cpuinfo)(struct seq_file *m);
+	void		(*show_percpuinfo)(struct seq_file *m, int i);
+	/* Returns the current operating frequency of "cpu" in Hz */
+	unsigned long  	(*get_proc_freq)(unsigned int cpu);
+
+	void		(*init_IRQ)(void);
+
+	/* Return an irq, or 0 to indicate there are none pending. */
+	unsigned int	(*get_irq)(void);
+
+	/* PCI stuff */
+	/* Called after allocating resources */
+	void		(*pcibios_fixup)(void);
+	void		(*pci_irq_fixup)(struct pci_dev *dev);
+	int		(*pcibios_root_bridge_prepare)(struct pci_host_bridge
+				*bridge);
+
+	/* To setup PHBs when using automatic OF platform driver for PCI */
+	int		(*pci_setup_phb)(struct pci_controller *host);
+
+	void __noreturn	(*restart)(char *cmd);
+	void __noreturn (*halt)(void);
+	void		(*panic)(char *str);
+	void		(*cpu_die)(void);
+
+	long		(*time_init)(void); /* Optional, may be NULL */
+
+	int		(*set_rtc_time)(struct rtc_time *);
+	void		(*get_rtc_time)(struct rtc_time *);
+	time64_t	(*get_boot_time)(void);
+	unsigned char 	(*rtc_read_val)(int addr);
+	void		(*rtc_write_val)(int addr, unsigned char val);
+
+	void		(*calibrate_decr)(void);
+
+	void		(*progress)(char *, unsigned short);
+
+	/* Interface for platform error logging */
+	void 		(*log_error)(char *buf, unsigned int err_type, int fatal);
+
+	unsigned char 	(*nvram_read_val)(int addr);
+	void		(*nvram_write_val)(int addr, unsigned char val);
+	ssize_t		(*nvram_write)(char *buf, size_t count, loff_t *index);
+	ssize_t		(*nvram_read)(char *buf, size_t count, loff_t *index);	
+	ssize_t		(*nvram_size)(void);		
+	void		(*nvram_sync)(void);
+
+	/* Exception handlers */
+	int		(*system_reset_exception)(struct pt_regs *regs);
+	int 		(*machine_check_exception)(struct pt_regs *regs);
+	int		(*handle_hmi_exception)(struct pt_regs *regs);
+
+	/* Early exception handlers called in realmode */
+	int		(*hmi_exception_early)(struct pt_regs *regs);
+
+	/* Called during machine check exception to retrive fixup address. */
+	bool		(*mce_check_early_recovery)(struct pt_regs *regs);
+
+	/* Motherboard/chipset features. This is a kind of general purpose
+	 * hook used to control some machine specific features (like reset
+	 * lines, chip power control, etc...).
+	 */
+	long	 	(*feature_call)(unsigned int feature, ...);
+
+	/* Get legacy PCI/IDE interrupt mapping */ 
+	int		(*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
+	
+	/* Get access protection for /dev/mem */
+	pgprot_t	(*phys_mem_access_prot)(struct file *file,
+						unsigned long pfn,
+						unsigned long size,
+						pgprot_t vma_prot);
+
+	/*
+	 * Function for waiting for work with reduced power in idle loop;
+	 * called with interrupts disabled.
+	 */
+	void		(*power_save)(void);
+
+	/* Function to enable performance monitor counters for this
+	   platform, called once per cpu. */
+	void		(*enable_pmcs)(void);
+
+	/* Set DABR for this platform, leave empty for default implementation */
+	int		(*set_dabr)(unsigned long dabr,
+				    unsigned long dabrx);
+
+	/* Set DAWR for this platform, leave empty for default implementation */
+	int		(*set_dawr)(unsigned long dawr,
+				    unsigned long dawrx);
+
+#ifdef CONFIG_PPC32	/* XXX for now */
+	/* A general init function, called by ppc_init in init/main.c.
+	   May be NULL. */
+	void		(*init)(void);
+
+	void		(*kgdb_map_scc)(void);
+
+	/*
+	 * optional PCI "hooks"
+	 */
+	/* Called at then very end of pcibios_init() */
+	void (*pcibios_after_init)(void);
+
+#endif /* CONFIG_PPC32 */
+
+	/* Called in indirect_* to avoid touching devices */
+	int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
+
+	/* Called after PPC generic resource fixup to perform
+	   machine specific fixups */
+	void (*pcibios_fixup_resources)(struct pci_dev *);
+
+	/* Called for each PCI bus in the system when it's probed */
+	void (*pcibios_fixup_bus)(struct pci_bus *);
+
+	/* Called after scan and before resource survey */
+	void (*pcibios_fixup_phb)(struct pci_controller *hose);
+
+	/*
+	 * Called after device has been added to bus and
+	 * before sysfs has been created.
+	 */
+	void (*pcibios_bus_add_device)(struct pci_dev *pdev);
+
+	resource_size_t (*pcibios_default_alignment)(void);
+
+#ifdef CONFIG_PCI_IOV
+	void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
+	resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
+	int (*pcibios_sriov_enable)(struct pci_dev *pdev, u16 num_vfs);
+	int (*pcibios_sriov_disable)(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
+
+	/* Called to shutdown machine specific hardware not already controlled
+	 * by other drivers.
+	 */
+	void (*machine_shutdown)(void);
+
+#ifdef CONFIG_KEXEC_CORE
+	void (*kexec_cpu_down)(int crash_shutdown, int secondary);
+
+	/* Called to do what every setup is needed on image and the
+	 * reboot code buffer. Returns 0 on success.
+	 * Provide your own (maybe dummy) implementation if your platform
+	 * claims to support kexec.
+	 */
+	int (*machine_kexec_prepare)(struct kimage *image);
+
+	/* Called to perform the _real_ kexec.
+	 * Do NOT allocate memory or fail here. We are past the point of
+	 * no return.
+	 */
+	void (*machine_kexec)(struct kimage *image);
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_SUSPEND
+	/* These are called to disable and enable, respectively, IRQs when
+	 * entering a suspend state.  If NULL, then the generic versions
+	 * will be called.  The generic versions disable/enable the
+	 * decrementer along with interrupts.
+	 */
+	void (*suspend_disable_irqs)(void);
+	void (*suspend_enable_irqs)(void);
+#endif
+	int (*suspend_disable_cpu)(void);
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+	ssize_t (*cpu_probe)(const char *, size_t);
+	ssize_t (*cpu_release)(const char *, size_t);
+#endif
+
+#ifdef CONFIG_ARCH_RANDOM
+	int (*get_random_seed)(unsigned long *v);
+#endif
+};
+
+extern void e500_idle(void);
+extern void power4_idle(void);
+extern void power7_idle(void);
+extern void power9_idle(void);
+extern void ppc6xx_idle(void);
+extern void book3e_idle(void);
+
+/*
+ * ppc_md contains a copy of the machine description structure for the
+ * current platform. machine_id contains the initial address where the
+ * description was found during boot.
+ */
+extern struct machdep_calls ppc_md;
+extern struct machdep_calls *machine_id;
+
+#define __machine_desc __attribute__ ((__section__ (".machine.desc")))
+
+#define define_machine(name)					\
+	extern struct machdep_calls mach_##name;		\
+	EXPORT_SYMBOL(mach_##name);				\
+	struct machdep_calls mach_##name __machine_desc =
+
+#define machine_is(name) \
+	({ \
+		extern struct machdep_calls mach_##name \
+			__attribute__((weak));		 \
+		machine_id == &mach_##name; \
+	})
+
+extern void probe_machine(void);
+
+#ifdef CONFIG_PPC_PMAC
+/*
+ * Power macintoshes have either a CUDA, PMU or SMU controlling
+ * system reset, power, NVRAM, RTC.
+ */
+typedef enum sys_ctrler_kind {
+	SYS_CTRLER_UNKNOWN = 0,
+	SYS_CTRLER_CUDA = 1,
+	SYS_CTRLER_PMU = 2,
+	SYS_CTRLER_SMU = 3,
+} sys_ctrler_t;
+extern sys_ctrler_t sys_ctrler;
+
+#endif /* CONFIG_PPC_PMAC */
+
+static inline void log_error(char *buf, unsigned int err_type, int fatal)
+{
+	if (ppc_md.log_error)
+		ppc_md.log_error(buf, err_type, fatal);
+}
+
+#define __define_machine_initcall(mach, fn, id) \
+	static int __init __machine_initcall_##mach##_##fn(void) { \
+		if (machine_is(mach)) return fn(); \
+		return 0; \
+	} \
+	__define_initcall(__machine_initcall_##mach##_##fn, id);
+
+#define machine_early_initcall(mach, fn)	__define_machine_initcall(mach, fn, early)
+#define machine_core_initcall(mach, fn)		__define_machine_initcall(mach, fn, 1)
+#define machine_core_initcall_sync(mach, fn)	__define_machine_initcall(mach, fn, 1s)
+#define machine_postcore_initcall(mach, fn)	__define_machine_initcall(mach, fn, 2)
+#define machine_postcore_initcall_sync(mach, fn)	__define_machine_initcall(mach, fn, 2s)
+#define machine_arch_initcall(mach, fn)		__define_machine_initcall(mach, fn, 3)
+#define machine_arch_initcall_sync(mach, fn)	__define_machine_initcall(mach, fn, 3s)
+#define machine_subsys_initcall(mach, fn)	__define_machine_initcall(mach, fn, 4)
+#define machine_subsys_initcall_sync(mach, fn)	__define_machine_initcall(mach, fn, 4s)
+#define machine_fs_initcall(mach, fn)		__define_machine_initcall(mach, fn, 5)
+#define machine_fs_initcall_sync(mach, fn)	__define_machine_initcall(mach, fn, 5s)
+#define machine_rootfs_initcall(mach, fn)	__define_machine_initcall(mach, fn, rootfs)
+#define machine_device_initcall(mach, fn)	__define_machine_initcall(mach, fn, 6)
+#define machine_device_initcall_sync(mach, fn)	__define_machine_initcall(mach, fn, 6s)
+#define machine_late_initcall(mach, fn)		__define_machine_initcall(mach, fn, 7)
+#define machine_late_initcall_sync(mach, fn)	__define_machine_initcall(mach, fn, 7s)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MACHDEP_H */
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
new file mode 100644
index 0000000..ff5fd82
--- /dev/null
+++ b/arch/powerpc/include/asm/macio.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MACIO_ASIC_H__
+#define __MACIO_ASIC_H__
+#ifdef __KERNEL__
+
+#include <linux/of_device.h>
+
+extern struct bus_type macio_bus_type;
+
+/* MacIO device driver is defined later */
+struct macio_driver;
+struct macio_chip;
+
+#define MACIO_DEV_COUNT_RESOURCES	8
+#define MACIO_DEV_COUNT_IRQS		8
+
+/*
+ * the macio_bus structure is used to describe a "virtual" bus
+ * within a MacIO ASIC. It's typically provided by a macio_pci_asic
+ * PCI device, but could be provided differently as well (nubus
+ * machines using a fake OF tree).
+ *
+ * The pdev field can be NULL on non-PCI machines
+ */
+struct macio_bus
+{
+	struct macio_chip	*chip;		/* macio_chip (private use) */
+	int			index;		/* macio chip index in system */
+#ifdef CONFIG_PCI
+	struct pci_dev		*pdev;		/* PCI device hosting this bus */
+#endif
+};
+
+/*
+ * the macio_dev structure is used to describe a device
+ * within an Apple MacIO ASIC.
+ */
+struct macio_dev
+{
+	struct macio_bus	*bus;		/* macio bus this device is on */
+	struct macio_dev	*media_bay;	/* Device is part of a media bay */
+	struct platform_device	ofdev;
+	struct device_dma_parameters dma_parms; /* ide needs that */
+	int			n_resources;
+	struct resource		resource[MACIO_DEV_COUNT_RESOURCES];
+	int			n_interrupts;
+	struct resource		interrupt[MACIO_DEV_COUNT_IRQS];
+};
+#define	to_macio_device(d) container_of(d, struct macio_dev, ofdev.dev)
+#define	of_to_macio_device(d) container_of(d, struct macio_dev, ofdev)
+
+extern struct macio_dev *macio_dev_get(struct macio_dev *dev);
+extern void macio_dev_put(struct macio_dev *dev);
+
+/*
+ * Accessors to resources & interrupts and other device
+ * fields
+ */
+
+static inline int macio_resource_count(struct macio_dev *dev)
+{
+	return dev->n_resources;
+}
+
+static inline unsigned long macio_resource_start(struct macio_dev *dev, int resource_no)
+{
+	return dev->resource[resource_no].start;
+}
+
+static inline unsigned long macio_resource_end(struct macio_dev *dev, int resource_no)
+{
+	return dev->resource[resource_no].end;
+}
+
+static inline unsigned long macio_resource_len(struct macio_dev *dev, int resource_no)
+{
+	struct resource *res = &dev->resource[resource_no];
+	if (res->start == 0 || res->end == 0 || res->end < res->start)
+		return 0;
+	return resource_size(res);
+}
+
+extern int macio_enable_devres(struct macio_dev *dev);
+
+extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name);
+extern void macio_release_resource(struct macio_dev *dev, int resource_no);
+extern int macio_request_resources(struct macio_dev *dev, const char *name);
+extern void macio_release_resources(struct macio_dev *dev);
+
+static inline int macio_irq_count(struct macio_dev *dev)
+{
+	return dev->n_interrupts;
+}
+
+static inline int macio_irq(struct macio_dev *dev, int irq_no)
+{
+	return dev->interrupt[irq_no].start;
+}
+
+static inline void macio_set_drvdata(struct macio_dev *dev, void *data)
+{
+	dev_set_drvdata(&dev->ofdev.dev, data);
+}
+
+static inline void* macio_get_drvdata(struct macio_dev *dev)
+{
+	return dev_get_drvdata(&dev->ofdev.dev);
+}
+
+static inline struct device_node *macio_get_of_node(struct macio_dev *mdev)
+{
+	return mdev->ofdev.dev.of_node;
+}
+
+#ifdef CONFIG_PCI
+static inline struct pci_dev *macio_get_pci_dev(struct macio_dev *mdev)
+{
+	return mdev->bus->pdev;
+}
+#endif
+
+/*
+ * A driver for a mac-io chip based device
+ */
+struct macio_driver
+{
+	int	(*probe)(struct macio_dev* dev, const struct of_device_id *match);
+	int	(*remove)(struct macio_dev* dev);
+
+	int	(*suspend)(struct macio_dev* dev, pm_message_t state);
+	int	(*resume)(struct macio_dev* dev);
+	int	(*shutdown)(struct macio_dev* dev);
+
+#ifdef CONFIG_PMAC_MEDIABAY
+	void	(*mediabay_event)(struct macio_dev* dev, int mb_state);
+#endif
+	struct device_driver	driver;
+};
+#define	to_macio_driver(drv) container_of(drv,struct macio_driver, driver)
+
+extern int macio_register_driver(struct macio_driver *);
+extern void macio_unregister_driver(struct macio_driver *);
+
+#endif /* __KERNEL__ */
+#endif /* __MACIO_ASIC_H__ */
diff --git a/arch/powerpc/include/asm/mc146818rtc.h b/arch/powerpc/include/asm/mc146818rtc.h
new file mode 100644
index 0000000..f2741c8
--- /dev/null
+++ b/arch/powerpc/include/asm/mc146818rtc.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_POWERPC_MC146818RTC_H
+#define _ASM_POWERPC_MC146818RTC_H
+
+/*
+ * Machine dependent access functions for RTC registers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x)	(0x70 + (x))
+#define RTC_ALWAYS_BCD	1	/* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_MC146818RTC_H */
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
new file mode 100644
index 0000000..3a1226e
--- /dev/null
+++ b/arch/powerpc/include/asm/mce.h
@@ -0,0 +1,213 @@
+/*
+ * Machine check exception header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_MCE_H__
+#define __ASM_PPC64_MCE_H__
+
+#include <linux/bitops.h>
+
+enum MCE_Version {
+	MCE_V1 = 1,
+};
+
+enum MCE_Severity {
+	MCE_SEV_NO_ERROR = 0,
+	MCE_SEV_WARNING = 1,
+	MCE_SEV_ERROR_SYNC = 2,
+	MCE_SEV_FATAL = 3,
+};
+
+enum MCE_Disposition {
+	MCE_DISPOSITION_RECOVERED = 0,
+	MCE_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum MCE_Initiator {
+	MCE_INITIATOR_UNKNOWN = 0,
+	MCE_INITIATOR_CPU = 1,
+};
+
+enum MCE_ErrorType {
+	MCE_ERROR_TYPE_UNKNOWN = 0,
+	MCE_ERROR_TYPE_UE = 1,
+	MCE_ERROR_TYPE_SLB = 2,
+	MCE_ERROR_TYPE_ERAT = 3,
+	MCE_ERROR_TYPE_TLB = 4,
+	MCE_ERROR_TYPE_USER = 5,
+	MCE_ERROR_TYPE_RA = 6,
+	MCE_ERROR_TYPE_LINK = 7,
+};
+
+enum MCE_UeErrorType {
+	MCE_UE_ERROR_INDETERMINATE = 0,
+	MCE_UE_ERROR_IFETCH = 1,
+	MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+	MCE_UE_ERROR_LOAD_STORE = 3,
+	MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
+};
+
+enum MCE_SlbErrorType {
+	MCE_SLB_ERROR_INDETERMINATE = 0,
+	MCE_SLB_ERROR_PARITY = 1,
+	MCE_SLB_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_EratErrorType {
+	MCE_ERAT_ERROR_INDETERMINATE = 0,
+	MCE_ERAT_ERROR_PARITY = 1,
+	MCE_ERAT_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_TlbErrorType {
+	MCE_TLB_ERROR_INDETERMINATE = 0,
+	MCE_TLB_ERROR_PARITY = 1,
+	MCE_TLB_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_UserErrorType {
+	MCE_USER_ERROR_INDETERMINATE = 0,
+	MCE_USER_ERROR_TLBIE = 1,
+};
+
+enum MCE_RaErrorType {
+	MCE_RA_ERROR_INDETERMINATE = 0,
+	MCE_RA_ERROR_IFETCH = 1,
+	MCE_RA_ERROR_IFETCH_FOREIGN = 2,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 3,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 4,
+	MCE_RA_ERROR_LOAD = 5,
+	MCE_RA_ERROR_STORE = 6,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 7,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 8,
+	MCE_RA_ERROR_LOAD_STORE_FOREIGN = 9,
+};
+
+enum MCE_LinkErrorType {
+	MCE_LINK_ERROR_INDETERMINATE = 0,
+	MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
+	MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
+	MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
+	MCE_LINK_ERROR_STORE_TIMEOUT = 4,
+	MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
+};
+
+struct machine_check_event {
+	enum MCE_Version	version:8;	/* 0x00 */
+	uint8_t			in_use;		/* 0x01 */
+	enum MCE_Severity	severity:8;	/* 0x02 */
+	enum MCE_Initiator	initiator:8;	/* 0x03 */
+	enum MCE_ErrorType	error_type:8;	/* 0x04 */
+	enum MCE_Disposition	disposition:8;	/* 0x05 */
+	uint8_t			reserved_1[2];	/* 0x06 */
+	uint64_t		gpr3;		/* 0x08 */
+	uint64_t		srr0;		/* 0x10 */
+	uint64_t		srr1;		/* 0x18 */
+	union {					/* 0x20 */
+		struct {
+			enum MCE_UeErrorType ue_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		physical_address_provided;
+			uint8_t		reserved_1[5];
+			uint64_t	effective_address;
+			uint64_t	physical_address;
+			uint8_t		reserved_2[8];
+		} ue_error;
+
+		struct {
+			enum MCE_SlbErrorType slb_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} slb_error;
+
+		struct {
+			enum MCE_EratErrorType erat_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} erat_error;
+
+		struct {
+			enum MCE_TlbErrorType tlb_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} tlb_error;
+
+		struct {
+			enum MCE_UserErrorType user_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} user_error;
+
+		struct {
+			enum MCE_RaErrorType ra_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} ra_error;
+
+		struct {
+			enum MCE_LinkErrorType link_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} link_error;
+	} u;
+};
+
+struct mce_error_info {
+	enum MCE_ErrorType error_type:8;
+	union {
+		enum MCE_UeErrorType ue_error_type:8;
+		enum MCE_SlbErrorType slb_error_type:8;
+		enum MCE_EratErrorType erat_error_type:8;
+		enum MCE_TlbErrorType tlb_error_type:8;
+		enum MCE_UserErrorType user_error_type:8;
+		enum MCE_RaErrorType ra_error_type:8;
+		enum MCE_LinkErrorType link_error_type:8;
+	} u;
+	enum MCE_Severity	severity:8;
+	enum MCE_Initiator	initiator:8;
+};
+
+#define MAX_MC_EVT	100
+
+/* Release flags for get_mce_event() */
+#define MCE_EVENT_RELEASE	true
+#define MCE_EVENT_DONTRELEASE	false
+
+extern void save_mce_event(struct pt_regs *regs, long handled,
+			   struct mce_error_info *mce_err, uint64_t nip,
+			   uint64_t addr, uint64_t phys_addr);
+extern int get_mce_event(struct machine_check_event *mce, bool release);
+extern void release_mce_event(void);
+extern void machine_check_queue_event(void);
+extern void machine_check_print_event_info(struct machine_check_event *evt,
+					   bool user_mode);
+#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h
new file mode 100644
index 0000000..230fda4
--- /dev/null
+++ b/arch/powerpc/include/asm/mediabay.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mediabay.h: definitions for using the media bay
+ * on PowerBook 3400 and similar computers.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+#ifndef _PPC_MEDIABAY_H
+#define _PPC_MEDIABAY_H
+
+#ifdef __KERNEL__
+
+#define MB_FD		0	/* media bay contains floppy drive (automatic eject ?) */
+#define MB_FD1		1	/* media bay contains floppy drive (manual eject ?) */
+#define MB_SOUND	2	/* sound device ? */
+#define MB_CD		3	/* media bay contains ATA drive such as CD or ZIP */
+#define MB_PCI		5	/* media bay contains a PCI device */
+#define MB_POWER	6	/* media bay contains a Power device (???) */
+#define MB_NO		7	/* media bay contains nothing */
+
+struct macio_dev;
+
+#ifdef CONFIG_PMAC_MEDIABAY
+
+/* Check the content type of the bay, returns MB_NO if the bay is still
+ * transitionning
+ */
+extern int check_media_bay(struct macio_dev *bay);
+
+/* The ATA driver uses the calls below to temporarily hold on the
+ * media bay callbacks while initializing the interface
+ */
+extern void lock_media_bay(struct macio_dev *bay);
+extern void unlock_media_bay(struct macio_dev *bay);
+
+#else
+
+static inline int check_media_bay(struct macio_dev *bay)
+{
+	return MB_NO;
+}
+
+static inline void lock_media_bay(struct macio_dev *bay) { }
+static inline void unlock_media_bay(struct macio_dev *bay) { }
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _PPC_MEDIABAY_H */
diff --git a/arch/powerpc/include/asm/membarrier.h b/arch/powerpc/include/asm/membarrier.h
new file mode 100644
index 0000000..6e20bb5
--- /dev/null
+++ b/arch/powerpc/include/asm/membarrier.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_POWERPC_MEMBARRIER_H
+#define _ASM_POWERPC_MEMBARRIER_H
+
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+					     struct mm_struct *next,
+					     struct task_struct *tsk)
+{
+	/*
+	 * Only need the full barrier when switching between processes.
+	 * Barrier when switching from kernel to userspace is not
+	 * required here, given that it is implied by mmdrop(). Barrier
+	 * when switching from userspace to kernel is not needed after
+	 * store to rq->curr.
+	 */
+	if (likely(!(atomic_read(&next->membarrier_state) &
+		     (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
+		      MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
+		return;
+
+	/*
+	 * The membarrier system call requires a full memory barrier
+	 * after storing to rq->curr, before going back to user-space.
+	 */
+	smp_mb();
+}
+
+#endif /* _ASM_POWERPC_MEMBARRIER_H */
diff --git a/arch/powerpc/include/asm/mm-arch-hooks.h b/arch/powerpc/include/asm/mm-arch-hooks.h
new file mode 100644
index 0000000..f2a2da8
--- /dev/null
+++ b/arch/powerpc/include/asm/mm-arch-hooks.h
@@ -0,0 +1,28 @@
+/*
+ * Architecture specific mm hooks
+ *
+ * Copyright (C) 2015, IBM Corporation
+ * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H
+#define _ASM_POWERPC_MM_ARCH_HOOKS_H
+
+static inline void arch_remap(struct mm_struct *mm,
+			      unsigned long old_start, unsigned long old_end,
+			      unsigned long new_start, unsigned long new_end)
+{
+	/*
+	 * mremap() doesn't allow moving multiple vmas so we can limit the
+	 * check to old_start == vdso_base.
+	 */
+	if (old_start == mm->context.vdso_base)
+		mm->context.vdso_base = new_start;
+}
+#define arch_remap arch_remap
+
+#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
new file mode 100644
index 0000000..e3f1b5b
--- /dev/null
+++ b/arch/powerpc/include/asm/mman.h
@@ -0,0 +1,57 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_MMAN_H
+#define _ASM_POWERPC_MMAN_H
+
+#include <uapi/asm/mman.h>
+
+#ifdef CONFIG_PPC64
+
+#include <asm/cputable.h>
+#include <linux/mm.h>
+#include <linux/pkeys.h>
+#include <asm/cpu_has_feature.h>
+
+/*
+ * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
+ * here.  How important is the optimization?
+ */
+static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+		unsigned long pkey)
+{
+#ifdef CONFIG_PPC_MEM_KEYS
+	return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
+#else
+	return ((prot & PROT_SAO) ? VM_SAO : 0);
+#endif
+}
+#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+
+static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
+{
+#ifdef CONFIG_PPC_MEM_KEYS
+	return (vm_flags & VM_SAO) ?
+		__pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
+		__pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
+#else
+	return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+#endif
+}
+#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
+
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
+{
+	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+		return false;
+	if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO))
+		return false;
+	return true;
+}
+#define arch_validate_prot arch_validate_prot
+
+#endif /* CONFIG_PPC64 */
+#endif	/* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
new file mode 100644
index 0000000..74f4edb
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_40X_H_
+#define _ASM_POWERPC_MMU_40X_H_
+
+/*
+ * PPC40x support
+ */
+
+#define PPC40X_TLB_SIZE 64
+
+/*
+ * TLB entries are defined by a "high" tag portion and a "low" data
+ * portion.  On all architectures, the data portion is 32-bits.
+ *
+ * TLB entries are managed entirely under software control by reading,
+ * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx
+ * instructions.
+ */
+
+#define	TLB_LO          1
+#define	TLB_HI          0
+
+#define	TLB_DATA        TLB_LO
+#define	TLB_TAG         TLB_HI
+
+/* Tag portion */
+
+#define TLB_EPN_MASK    0xFFFFFC00      /* Effective Page Number */
+#define TLB_PAGESZ_MASK 0x00000380
+#define TLB_PAGESZ(x)   (((x) & 0x7) << 7)
+#define   PAGESZ_1K		0
+#define   PAGESZ_4K             1
+#define   PAGESZ_16K            2
+#define   PAGESZ_64K            3
+#define   PAGESZ_256K           4
+#define   PAGESZ_1M             5
+#define   PAGESZ_4M             6
+#define   PAGESZ_16M            7
+#define TLB_VALID       0x00000040      /* Entry is valid */
+
+/* Data portion */
+
+#define TLB_RPN_MASK    0xFFFFFC00      /* Real Page Number */
+#define TLB_PERM_MASK   0x00000300
+#define TLB_EX          0x00000200      /* Instruction execution allowed */
+#define TLB_WR          0x00000100      /* Writes permitted */
+#define TLB_ZSEL_MASK   0x000000F0
+#define TLB_ZSEL(x)     (((x) & 0xF) << 4)
+#define TLB_ATTR_MASK   0x0000000F
+#define TLB_W           0x00000008      /* Caching is write-through */
+#define TLB_I           0x00000004      /* Caching is inhibited */
+#define TLB_M           0x00000002      /* Memory is coherent */
+#define TLB_G           0x00000001      /* Memory is guarded from prefetch */
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+	unsigned int	id;
+	unsigned int	active;
+	unsigned long	vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#define mmu_virtual_psize	MMU_PAGE_4K
+#define mmu_linear_psize	MMU_PAGE_256M
+
+#endif /* _ASM_POWERPC_MMU_40X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
new file mode 100644
index 0000000..295b3db
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_44X_H_
+#define _ASM_POWERPC_MMU_44X_H_
+/*
+ * PPC440 support
+ */
+
+#include <asm/asm-const.h>
+
+#define PPC44x_MMUCR_TID	0x000000ff
+#define PPC44x_MMUCR_STS	0x00010000
+
+#define	PPC44x_TLB_PAGEID	0
+#define	PPC44x_TLB_XLAT		1
+#define	PPC44x_TLB_ATTRIB	2
+
+/* Page identification fields */
+#define PPC44x_TLB_EPN_MASK	0xfffffc00      /* Effective Page Number */
+#define	PPC44x_TLB_VALID	0x00000200      /* Valid flag */
+#define PPC44x_TLB_TS		0x00000100	/* Translation address space */
+#define PPC44x_TLB_1K		0x00000000	/* Page sizes */
+#define PPC44x_TLB_4K		0x00000010
+#define PPC44x_TLB_16K		0x00000020
+#define PPC44x_TLB_64K		0x00000030
+#define PPC44x_TLB_256K		0x00000040
+#define PPC44x_TLB_1M		0x00000050
+#define PPC44x_TLB_16M		0x00000070
+#define	PPC44x_TLB_256M		0x00000090
+
+/* Translation fields */
+#define PPC44x_TLB_RPN_MASK	0xfffffc00      /* Real Page Number */
+#define	PPC44x_TLB_ERPN_MASK	0x0000000f
+
+/* Storage attribute and access control fields */
+#define PPC44x_TLB_ATTR_MASK	0x0000ff80
+#define PPC44x_TLB_U0		0x00008000      /* User 0 */
+#define PPC44x_TLB_U1		0x00004000      /* User 1 */
+#define PPC44x_TLB_U2		0x00002000      /* User 2 */
+#define PPC44x_TLB_U3		0x00001000      /* User 3 */
+#define PPC44x_TLB_W		0x00000800      /* Caching is write-through */
+#define PPC44x_TLB_I		0x00000400      /* Caching is inhibited */
+#define PPC44x_TLB_M		0x00000200      /* Memory is coherent */
+#define PPC44x_TLB_G		0x00000100      /* Memory is guarded */
+#define PPC44x_TLB_E		0x00000080      /* Memory is little endian */
+
+#define PPC44x_TLB_PERM_MASK	0x0000003f
+#define PPC44x_TLB_UX		0x00000020      /* User execution */
+#define PPC44x_TLB_UW		0x00000010      /* User write */
+#define PPC44x_TLB_UR		0x00000008      /* User read */
+#define PPC44x_TLB_SX		0x00000004      /* Super execution */
+#define PPC44x_TLB_SW		0x00000002      /* Super write */
+#define PPC44x_TLB_SR		0x00000001      /* Super read */
+
+/* Number of TLB entries */
+#define PPC44x_TLB_SIZE		64
+
+/* 47x bits */
+#define PPC47x_MMUCR_TID	0x0000ffff
+#define PPC47x_MMUCR_STS	0x00010000
+
+/* Page identification fields */
+#define PPC47x_TLB0_EPN_MASK	0xfffff000      /* Effective Page Number */
+#define PPC47x_TLB0_VALID	0x00000800      /* Valid flag */
+#define PPC47x_TLB0_TS		0x00000400	/* Translation address space */
+#define PPC47x_TLB0_4K		0x00000000
+#define PPC47x_TLB0_16K		0x00000010
+#define PPC47x_TLB0_64K		0x00000030
+#define PPC47x_TLB0_1M		0x00000070
+#define PPC47x_TLB0_16M		0x000000f0
+#define PPC47x_TLB0_256M	0x000001f0
+#define PPC47x_TLB0_1G		0x000003f0
+#define PPC47x_TLB0_BOLTED_R	0x00000008	/* tlbre only */
+
+/* Translation fields */
+#define PPC47x_TLB1_RPN_MASK	0xfffff000      /* Real Page Number */
+#define PPC47x_TLB1_ERPN_MASK	0x000003ff
+
+/* Storage attribute and access control fields */
+#define PPC47x_TLB2_ATTR_MASK	0x0003ff80
+#define PPC47x_TLB2_IL1I	0x00020000      /* Memory is guarded */
+#define PPC47x_TLB2_IL1D	0x00010000      /* Memory is guarded */
+#define PPC47x_TLB2_U0		0x00008000      /* User 0 */
+#define PPC47x_TLB2_U1		0x00004000      /* User 1 */
+#define PPC47x_TLB2_U2		0x00002000      /* User 2 */
+#define PPC47x_TLB2_U3		0x00001000      /* User 3 */
+#define PPC47x_TLB2_W		0x00000800      /* Caching is write-through */
+#define PPC47x_TLB2_I		0x00000400      /* Caching is inhibited */
+#define PPC47x_TLB2_M		0x00000200      /* Memory is coherent */
+#define PPC47x_TLB2_G		0x00000100      /* Memory is guarded */
+#define PPC47x_TLB2_E		0x00000080      /* Memory is little endian */
+#define PPC47x_TLB2_PERM_MASK	0x0000003f
+#define PPC47x_TLB2_UX		0x00000020      /* User execution */
+#define PPC47x_TLB2_UW		0x00000010      /* User write */
+#define PPC47x_TLB2_UR		0x00000008      /* User read */
+#define PPC47x_TLB2_SX		0x00000004      /* Super execution */
+#define PPC47x_TLB2_SW		0x00000002      /* Super write */
+#define PPC47x_TLB2_SR		0x00000001      /* Super read */
+#define PPC47x_TLB2_U_RWX	(PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
+#define PPC47x_TLB2_S_RWX	(PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
+#define PPC47x_TLB2_S_RW	(PPC47x_TLB2_SW | PPC47x_TLB2_SR)
+#define PPC47x_TLB2_IMG		(PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
+
+#ifndef __ASSEMBLY__
+
+extern unsigned int tlb_44x_hwater;
+extern unsigned int tlb_44x_index;
+
+typedef struct {
+	unsigned int	id;
+	unsigned int	active;
+	unsigned long	vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_PPC_EARLY_DEBUG_44x
+#define PPC44x_EARLY_TLBS	1
+#else
+#define PPC44x_EARLY_TLBS	2
+#define PPC44x_EARLY_DEBUG_VIRTADDR	(ASM_CONST(0xf0000000) \
+	| (ASM_CONST(CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW) & 0xffff))
+#endif
+
+/* Size of the TLBs used for pinning in lowmem */
+#define PPC_PIN_SIZE	(1 << 28)	/* 256M */
+
+#if defined(CONFIG_PPC_4K_PAGES)
+#define PPC44x_TLBE_SIZE	PPC44x_TLB_4K
+#define PPC47x_TLBE_SIZE	PPC47x_TLB0_4K
+#define mmu_virtual_psize	MMU_PAGE_4K
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define PPC44x_TLBE_SIZE	PPC44x_TLB_16K
+#define PPC47x_TLBE_SIZE	PPC47x_TLB0_16K
+#define mmu_virtual_psize	MMU_PAGE_16K
+#elif defined(CONFIG_PPC_64K_PAGES)
+#define PPC44x_TLBE_SIZE	PPC44x_TLB_64K
+#define PPC47x_TLBE_SIZE	PPC47x_TLB0_64K
+#define mmu_virtual_psize	MMU_PAGE_64K
+#elif defined(CONFIG_PPC_256K_PAGES)
+#define PPC44x_TLBE_SIZE	PPC44x_TLB_256K
+#define mmu_virtual_psize	MMU_PAGE_256K
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
+#define mmu_linear_psize	MMU_PAGE_256M
+
+#define PPC44x_PGD_OFF_SHIFT	(32 - PGDIR_SHIFT + PGD_T_LOG2)
+#define PPC44x_PGD_OFF_MASK_BIT	(PGDIR_SHIFT - PGD_T_LOG2)
+#define PPC44x_PTE_ADD_SHIFT	(32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
+#define PPC44x_PTE_ADD_MASK_BIT	(32 - PTE_T_LOG2 - PTE_SHIFT)
+
+#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
new file mode 100644
index 0000000..193f531
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_8XX_H_
+#define _ASM_POWERPC_MMU_8XX_H_
+/*
+ * PPC8xx support
+ */
+
+/* Control/status registers for the MPC8xx.
+ * A write operation to these registers causes serialized access.
+ * During software tablewalk, the registers used perform mask/shift-add
+ * operations when written/read.  A TLB entry is created when the Mx_RPN
+ * is written, and the contents of several registers are used to
+ * create the entry.
+ */
+#define SPRN_MI_CTR	784	/* Instruction TLB control register */
+#define MI_GPM		0x80000000	/* Set domain manager mode */
+#define MI_PPM		0x40000000	/* Set subpage protection */
+#define MI_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
+#define MI_RSV4I	0x08000000	/* Reserve 4 TLB entries */
+#define MI_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
+#define MI_IDXMASK	0x00001f00	/* TLB index to be loaded */
+#define MI_RESETVAL	0x00000000	/* Value of register at reset */
+
+/* These are the Ks and Kp from the PowerPC books.  For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MI_AP	786
+#define MI_Ks		0x80000000	/* Should not be set */
+#define MI_Kp		0x40000000	/* Should always be set */
+
+/*
+ * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
+ * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
+ * respectively NA for All or X for Supervisor and no access for User.
+ * Then we use the APG to say whether accesses are according to Page rules or
+ * "all Supervisor" rules (Access to all)
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * We define all 16 groups so that all other bits of APG can take any value
+ */
+#define MI_APG_INIT	0x44444444
+
+/* The effective page number register.  When read, contains the information
+ * about the last instruction TLB miss.  When MI_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MI_EPN	787
+#define MI_EPNMASK	0xfffff000	/* Effective page number for entry */
+#define MI_EVALID	0x00000200	/* Entry is valid */
+#define MI_ASIDMASK	0x0000000f	/* ASID match value */
+					/* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the instruction TLB, it contains bits that get loaded into the
+ * TLB entry when the MI_RPN is written.
+ */
+#define SPRN_MI_TWC	789
+#define MI_APG		0x000001e0	/* Access protection group (0) */
+#define MI_GUARDED	0x00000010	/* Guarded storage */
+#define MI_PSMASK	0x0000000c	/* Mask of page size bits */
+#define MI_PS8MEG	0x0000000c	/* 8M page size */
+#define MI_PS512K	0x00000004	/* 512K page size */
+#define MI_PS4K_16K	0x00000000	/* 4K or 16K page size */
+#define MI_SVALID	0x00000001	/* Segment entry is valid */
+					/* Reset value is undefined */
+
+/* Real page number.  Defined by the pte.  Writing this register
+ * causes a TLB entry to be created for the instruction TLB, using
+ * additional information from the MI_EPN, and MI_TWC registers.
+ */
+#define SPRN_MI_RPN	790
+#define MI_SPS16K	0x00000008	/* Small page size (0 = 4k, 1 = 16k) */
+
+/* Define an RPN value for mapping kernel memory to large virtual
+ * pages for boot initialization.  This has real page number of 0,
+ * large page size, shared page, cache enabled, and valid.
+ * Also mark all subpages valid and write access.
+ */
+#define MI_BOOTINIT	0x000001fd
+
+#define SPRN_MD_CTR	792	/* Data TLB control register */
+#define MD_GPM		0x80000000	/* Set domain manager mode */
+#define MD_PPM		0x40000000	/* Set subpage protection */
+#define MD_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
+#define MD_WTDEF	0x10000000	/* Set writethrough when MMU dis */
+#define MD_RSV4I	0x08000000	/* Reserve 4 TLB entries */
+#define MD_TWAM		0x04000000	/* Use 4K page hardware assist */
+#define MD_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
+#define MD_IDXMASK	0x00001f00	/* TLB index to be loaded */
+#define MD_RESETVAL	0x04000000	/* Value of register at reset */
+
+#define SPRN_M_CASID	793	/* Address space ID (context) to match */
+#define MC_ASIDMASK	0x0000000f	/* Bits used for ASID value */
+
+
+/* These are the Ks and Kp from the PowerPC books.  For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MD_AP	794
+#define MD_Ks		0x80000000	/* Should not be set */
+#define MD_Kp		0x40000000	/* Should always be set */
+
+/*
+ * All pages' PP data bits are set to either 000 or 011 or 001, which means
+ * respectively RW for Supervisor and no access for User, or RO for
+ * Supervisor and no access for user and NA for ALL.
+ * Then we use the APG to say whether accesses are according to Page rules or
+ * "all Supervisor" rules (Access to all)
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * We define all 16 groups so that all other bits of APG can take any value
+ */
+#define MD_APG_INIT	0x44444444
+
+/* The effective page number register.  When read, contains the information
+ * about the last instruction TLB miss.  When MD_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MD_EPN	795
+#define MD_EPNMASK	0xfffff000	/* Effective page number for entry */
+#define MD_EVALID	0x00000200	/* Entry is valid */
+#define MD_ASIDMASK	0x0000000f	/* ASID match value */
+					/* Reset value is undefined */
+
+/* The pointer to the base address of the first level page table.
+ * During a software tablewalk, reading this register provides the address
+ * of the entry associated with MD_EPN.
+ */
+#define SPRN_M_TWB	796
+#define	M_L1TB		0xfffff000	/* Level 1 table base address */
+#define M_L1INDX	0x00000ffc	/* Level 1 index, when read */
+					/* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the data TLB, it contains bits that get loaded into the TLB entry
+ * when the MD_RPN is written.  It is also provides the hardware assist
+ * for finding the PTE address during software tablewalk.
+ */
+#define SPRN_MD_TWC	797
+#define MD_L2TB		0xfffff000	/* Level 2 table base address */
+#define MD_L2INDX	0xfffffe00	/* Level 2 index (*pte), when read */
+#define MD_APG		0x000001e0	/* Access protection group (0) */
+#define MD_GUARDED	0x00000010	/* Guarded storage */
+#define MD_PSMASK	0x0000000c	/* Mask of page size bits */
+#define MD_PS8MEG	0x0000000c	/* 8M page size */
+#define MD_PS512K	0x00000004	/* 512K page size */
+#define MD_PS4K_16K	0x00000000	/* 4K or 16K page size */
+#define MD_WT		0x00000002	/* Use writethrough page attribute */
+#define MD_SVALID	0x00000001	/* Segment entry is valid */
+					/* Reset value is undefined */
+
+
+/* Real page number.  Defined by the pte.  Writing this register
+ * causes a TLB entry to be created for the data TLB, using
+ * additional information from the MD_EPN, and MD_TWC registers.
+ */
+#define SPRN_MD_RPN	798
+#define MD_SPS16K	0x00000008	/* Small page size (0 = 4k, 1 = 16k) */
+
+/* This is a temporary storage register that could be used to save
+ * a processor working register during a tablewalk.
+ */
+#define SPRN_M_TW	799
+
+#ifdef CONFIG_PPC_MM_SLICES
+#include <asm/nohash/32/slice.h>
+#define SLICE_ARRAY_SIZE	(1 << (32 - SLICE_LOW_SHIFT - 1))
+#endif
+
+#ifndef __ASSEMBLY__
+struct slice_mask {
+	u64 low_slices;
+	DECLARE_BITMAP(high_slices, 0);
+};
+
+typedef struct {
+	unsigned int id;
+	unsigned int active;
+	unsigned long vdso_base;
+#ifdef CONFIG_PPC_MM_SLICES
+	u16 user_psize;		/* page size index */
+	unsigned char low_slices_psize[SLICE_ARRAY_SIZE];
+	unsigned char high_slices_psize[0];
+	unsigned long slb_addr_limit;
+	struct slice_mask mask_base_psize; /* 4k or 16k */
+# ifdef CONFIG_HUGETLB_PAGE
+	struct slice_mask mask_512k;
+	struct slice_mask mask_8m;
+# endif
+#endif
+} mm_context_t;
+
+#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
+#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ *    shift : is the "PAGE_SHIFT" value for that page size
+ *    penc  : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def {
+	unsigned int	shift;	/* number of bits */
+	unsigned int	enc;	/* PTE encoding */
+	unsigned int    ind;    /* Corresponding indirect page size shift */
+	unsigned int	flags;
+#define MMU_PAGE_SIZE_DIRECT	0x1	/* Supported as a direct size */
+#define MMU_PAGE_SIZE_INDIRECT	0x2	/* Supported as an indirect size */
+};
+
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+	int psize;
+
+	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+		if (mmu_psize_defs[psize].shift == shift)
+			return psize;
+	return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+	if (mmu_psize_defs[mmu_psize].shift)
+		return mmu_psize_defs[mmu_psize].shift;
+	BUG();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#if defined(CONFIG_PPC_4K_PAGES)
+#define mmu_virtual_psize	MMU_PAGE_4K
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define mmu_virtual_psize	MMU_PAGE_16K
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
+#define mmu_linear_psize	MMU_PAGE_8M
+
+#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
new file mode 100644
index 0000000..e200729
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_BOOK3E_H_
+#define _ASM_POWERPC_MMU_BOOK3E_H_
+/*
+ * Freescale Book-E/Book-3e (ISA 2.06+) MMU support
+ */
+
+/* Book-3e defined page sizes */
+#define BOOK3E_PAGESZ_1K	0
+#define BOOK3E_PAGESZ_2K	1
+#define BOOK3E_PAGESZ_4K	2
+#define BOOK3E_PAGESZ_8K	3
+#define BOOK3E_PAGESZ_16K	4
+#define BOOK3E_PAGESZ_32K	5
+#define BOOK3E_PAGESZ_64K	6
+#define BOOK3E_PAGESZ_128K	7
+#define BOOK3E_PAGESZ_256K	8
+#define BOOK3E_PAGESZ_512K	9
+#define BOOK3E_PAGESZ_1M	10
+#define BOOK3E_PAGESZ_2M	11
+#define BOOK3E_PAGESZ_4M	12
+#define BOOK3E_PAGESZ_8M	13
+#define BOOK3E_PAGESZ_16M	14
+#define BOOK3E_PAGESZ_32M	15
+#define BOOK3E_PAGESZ_64M	16
+#define BOOK3E_PAGESZ_128M	17
+#define BOOK3E_PAGESZ_256M	18
+#define BOOK3E_PAGESZ_512M	19
+#define BOOK3E_PAGESZ_1GB	20
+#define BOOK3E_PAGESZ_2GB	21
+#define BOOK3E_PAGESZ_4GB	22
+#define BOOK3E_PAGESZ_8GB	23
+#define BOOK3E_PAGESZ_16GB	24
+#define BOOK3E_PAGESZ_32GB	25
+#define BOOK3E_PAGESZ_64GB	26
+#define BOOK3E_PAGESZ_128GB	27
+#define BOOK3E_PAGESZ_256GB	28
+#define BOOK3E_PAGESZ_512GB	29
+#define BOOK3E_PAGESZ_1TB	30
+#define BOOK3E_PAGESZ_2TB	31
+
+/* MAS registers bit definitions */
+
+#define MAS0_TLBSEL_MASK	0x30000000
+#define MAS0_TLBSEL_SHIFT	28
+#define MAS0_TLBSEL(x)		(((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
+#define MAS0_GET_TLBSEL(mas0)	(((mas0) & MAS0_TLBSEL_MASK) >> \
+			MAS0_TLBSEL_SHIFT)
+#define MAS0_ESEL_MASK		0x0FFF0000
+#define MAS0_ESEL_SHIFT		16
+#define MAS0_ESEL(x)		(((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
+#define MAS0_NV(x)		((x) & 0x00000FFF)
+#define MAS0_HES		0x00004000
+#define MAS0_WQ_ALLWAYS		0x00000000
+#define MAS0_WQ_COND		0x00001000
+#define MAS0_WQ_CLR_RSRV       	0x00002000
+
+#define MAS1_VALID		0x80000000
+#define MAS1_IPROT		0x40000000
+#define MAS1_TID(x)		(((x) << 16) & 0x3FFF0000)
+#define MAS1_IND		0x00002000
+#define MAS1_TS			0x00001000
+#define MAS1_TSIZE_MASK		0x00000f80
+#define MAS1_TSIZE_SHIFT	7
+#define MAS1_TSIZE(x)		(((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_GET_TSIZE(mas1)	(((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT)
+
+#define MAS2_EPN		(~0xFFFUL)
+#define MAS2_X0			0x00000040
+#define MAS2_X1			0x00000020
+#define MAS2_W			0x00000010
+#define MAS2_I			0x00000008
+#define MAS2_M			0x00000004
+#define MAS2_G			0x00000002
+#define MAS2_E			0x00000001
+#define MAS2_WIMGE_MASK		0x0000001f
+#define MAS2_EPN_MASK(size)		(~0 << (size + 10))
+#define MAS2_VAL(addr, size, flags)	((addr) & MAS2_EPN_MASK(size) | (flags))
+
+#define MAS3_RPN		0xFFFFF000
+#define MAS3_U0			0x00000200
+#define MAS3_U1			0x00000100
+#define MAS3_U2			0x00000080
+#define MAS3_U3			0x00000040
+#define MAS3_UX			0x00000020
+#define MAS3_SX			0x00000010
+#define MAS3_UW			0x00000008
+#define MAS3_SW			0x00000004
+#define MAS3_UR			0x00000002
+#define MAS3_SR			0x00000001
+#define MAS3_BAP_MASK		0x0000003f
+#define MAS3_SPSIZE		0x0000003e
+#define MAS3_SPSIZE_SHIFT	1
+
+#define MAS4_TLBSEL_MASK	MAS0_TLBSEL_MASK
+#define MAS4_TLBSELD(x) 	MAS0_TLBSEL(x)
+#define MAS4_INDD		0x00008000	/* Default IND */
+#define MAS4_TSIZED(x)		MAS1_TSIZE(x)
+#define MAS4_X0D		0x00000040
+#define MAS4_X1D		0x00000020
+#define MAS4_WD			0x00000010
+#define MAS4_ID			0x00000008
+#define MAS4_MD			0x00000004
+#define MAS4_GD			0x00000002
+#define MAS4_ED			0x00000001
+#define MAS4_WIMGED_MASK	0x0000001f	/* Default WIMGE */
+#define MAS4_WIMGED_SHIFT	0
+#define MAS4_VLED		MAS4_X1D	/* Default VLE */
+#define MAS4_ACMD		0x000000c0	/* Default ACM */
+#define MAS4_ACMD_SHIFT		6
+#define MAS4_TSIZED_MASK	0x00000f80	/* Default TSIZE */
+#define MAS4_TSIZED_SHIFT	7
+
+#define MAS5_SGS		0x80000000
+
+#define MAS6_SPID0		0x3FFF0000
+#define MAS6_SPID1		0x00007FFE
+#define MAS6_ISIZE(x)		MAS1_TSIZE(x)
+#define MAS6_SAS		0x00000001
+#define MAS6_SPID		MAS6_SPID0
+#define MAS6_SIND 		0x00000002	/* Indirect page */
+#define MAS6_SIND_SHIFT		1
+#define MAS6_SPID_MASK		0x3fff0000
+#define MAS6_SPID_SHIFT		16
+#define MAS6_ISIZE_MASK		0x00000f80
+#define MAS6_ISIZE_SHIFT	7
+
+#define MAS7_RPN		0xFFFFFFFF
+
+#define MAS8_TGS		0x80000000 /* Guest space */
+#define MAS8_VF			0x40000000 /* Virtualization Fault */
+#define MAS8_TLPID		0x000000ff
+
+/* Bit definitions for MMUCFG */
+#define MMUCFG_MAVN	0x00000003	/* MMU Architecture Version Number */
+#define MMUCFG_MAVN_V1	0x00000000	/* v1.0 */
+#define MMUCFG_MAVN_V2	0x00000001	/* v2.0 */
+#define MMUCFG_NTLBS	0x0000000c	/* Number of TLBs */
+#define MMUCFG_PIDSIZE	0x000007c0	/* PID Reg Size */
+#define MMUCFG_TWC	0x00008000	/* TLB Write Conditional (v2.0) */
+#define MMUCFG_LRAT	0x00010000	/* LRAT Supported (v2.0) */
+#define MMUCFG_RASIZE	0x00fe0000	/* Real Addr Size */
+#define MMUCFG_LPIDSIZE	0x0f000000	/* LPID Reg Size */
+
+/* Bit definitions for MMUCSR0 */
+#define MMUCSR0_TLB1FI	0x00000002	/* TLB1 Flash invalidate */
+#define MMUCSR0_TLB0FI	0x00000004	/* TLB0 Flash invalidate */
+#define MMUCSR0_TLB2FI	0x00000040	/* TLB2 Flash invalidate */
+#define MMUCSR0_TLB3FI	0x00000020	/* TLB3 Flash invalidate */
+#define MMUCSR0_TLBFI	(MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+			 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+#define MMUCSR0_TLB0PS	0x00000780	/* TLB0 Page Size */
+#define MMUCSR0_TLB1PS	0x00007800	/* TLB1 Page Size */
+#define MMUCSR0_TLB2PS	0x00078000	/* TLB2 Page Size */
+#define MMUCSR0_TLB3PS	0x00780000	/* TLB3 Page Size */
+
+/* MMUCFG bits */
+#define MMUCFG_MAVN_NASK	0x00000003
+#define MMUCFG_MAVN_V1_0	0x00000000
+#define MMUCFG_MAVN_V2_0	0x00000001
+#define MMUCFG_NTLB_MASK	0x0000000c
+#define MMUCFG_NTLB_SHIFT	2
+#define MMUCFG_PIDSIZE_MASK	0x000007c0
+#define MMUCFG_PIDSIZE_SHIFT	6
+#define MMUCFG_TWC		0x00008000
+#define MMUCFG_LRAT		0x00010000
+#define MMUCFG_RASIZE_MASK	0x00fe0000
+#define MMUCFG_RASIZE_SHIFT	17
+#define MMUCFG_LPIDSIZE_MASK	0x0f000000
+#define MMUCFG_LPIDSIZE_SHIFT	24
+
+/* TLBnCFG encoding */
+#define TLBnCFG_N_ENTRY		0x00000fff	/* number of entries */
+#define TLBnCFG_HES		0x00002000	/* HW select supported */
+#define TLBnCFG_IPROT		0x00008000	/* IPROT supported */
+#define TLBnCFG_GTWE		0x00010000	/* Guest can write */
+#define TLBnCFG_IND		0x00020000	/* IND entries supported */
+#define TLBnCFG_PT		0x00040000	/* Can load from page table */
+#define TLBnCFG_MINSIZE		0x00f00000	/* Minimum Page Size (v1.0) */
+#define TLBnCFG_MINSIZE_SHIFT	20
+#define TLBnCFG_MAXSIZE		0x000f0000	/* Maximum Page Size (v1.0) */
+#define TLBnCFG_MAXSIZE_SHIFT	16
+#define TLBnCFG_ASSOC		0xff000000	/* Associativity */
+#define TLBnCFG_ASSOC_SHIFT	24
+
+/* TLBnPS encoding */
+#define TLBnPS_4K		0x00000004
+#define TLBnPS_8K		0x00000008
+#define TLBnPS_16K		0x00000010
+#define TLBnPS_32K		0x00000020
+#define TLBnPS_64K		0x00000040
+#define TLBnPS_128K		0x00000080
+#define TLBnPS_256K		0x00000100
+#define TLBnPS_512K		0x00000200
+#define TLBnPS_1M 		0x00000400
+#define TLBnPS_2M 		0x00000800
+#define TLBnPS_4M 		0x00001000
+#define TLBnPS_8M 		0x00002000
+#define TLBnPS_16M		0x00004000
+#define TLBnPS_32M		0x00008000
+#define TLBnPS_64M		0x00010000
+#define TLBnPS_128M		0x00020000
+#define TLBnPS_256M		0x00040000
+#define TLBnPS_512M		0x00080000
+#define TLBnPS_1G		0x00100000
+#define TLBnPS_2G		0x00200000
+#define TLBnPS_4G		0x00400000
+#define TLBnPS_8G		0x00800000
+#define TLBnPS_16G		0x01000000
+#define TLBnPS_32G		0x02000000
+#define TLBnPS_64G		0x04000000
+#define TLBnPS_128G		0x08000000
+#define TLBnPS_256G		0x10000000
+
+/* tlbilx action encoding */
+#define TLBILX_T_ALL			0
+#define TLBILX_T_TID			1
+#define TLBILX_T_FULLMATCH		3
+#define TLBILX_T_CLASS0			4
+#define TLBILX_T_CLASS1			5
+#define TLBILX_T_CLASS2			6
+#define TLBILX_T_CLASS3			7
+
+#ifndef __ASSEMBLY__
+#include <asm/bug.h>
+
+extern unsigned int tlbcam_index;
+
+typedef struct {
+	unsigned int	id;
+	unsigned int	active;
+	unsigned long	vdso_base;
+} mm_context_t;
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ *    shift : is the "PAGE_SHIFT" value for that page size
+ *    penc  : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def
+{
+	unsigned int	shift;	/* number of bits */
+	unsigned int	enc;	/* PTE encoding */
+	unsigned int    ind;    /* Corresponding indirect page size shift */
+	unsigned int	flags;
+#define MMU_PAGE_SIZE_DIRECT	0x1	/* Supported as a direct size */
+#define MMU_PAGE_SIZE_INDIRECT	0x2	/* Supported as an indirect size */
+};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+	int psize;
+
+	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+		if (mmu_psize_defs[psize].shift == shift)
+			return psize;
+	return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+	if (mmu_psize_defs[mmu_psize].shift)
+		return mmu_psize_defs[mmu_psize].shift;
+	BUG();
+}
+
+/* The page sizes use the same names as 64-bit hash but are
+ * constants
+ */
+#if defined(CONFIG_PPC_4K_PAGES)
+#define mmu_virtual_psize	MMU_PAGE_4K
+#else
+#error Unsupported page size
+#endif
+
+extern int mmu_linear_psize;
+extern int mmu_vmemmap_psize;
+
+struct tlb_core_data {
+	/*
+	 * Per-core spinlock for e6500 TLB handlers (no tlbsrx.)
+	 * Must be the first struct element.
+	 */
+	u8 lock;
+
+	/* For software way selection, as on Freescale TLB1 */
+	u8 esel_next, esel_max, esel_first;
+};
+
+#ifdef CONFIG_PPC64
+extern unsigned long linear_map_top;
+extern int book3e_htw_mode;
+
+#define PPC_HTW_NONE	0
+#define PPC_HTW_IBM	1
+#define PPC_HTW_E6500	2
+
+/*
+ * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+ * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to
+ * return 1, indicating that the tlb requires preloading.
+ */
+#define HUGETLB_NEED_PRELOAD
+
+#define mmu_cleanup_all NULL
+
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
new file mode 100644
index 0000000..13ea441
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMU_H_
+#define _ASM_POWERPC_MMU_H_
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+
+#include <asm/asm-const.h>
+
+/*
+ * MMU features bit definitions
+ */
+
+/*
+ * MMU families
+ */
+#define MMU_FTR_HPTE_TABLE		ASM_CONST(0x00000001)
+#define MMU_FTR_TYPE_8xx		ASM_CONST(0x00000002)
+#define MMU_FTR_TYPE_40x		ASM_CONST(0x00000004)
+#define MMU_FTR_TYPE_44x		ASM_CONST(0x00000008)
+#define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010)
+#define MMU_FTR_TYPE_47x		ASM_CONST(0x00000020)
+
+/* Radix page table supported and enabled */
+#define MMU_FTR_TYPE_RADIX		ASM_CONST(0x00000040)
+
+/*
+ * Individual features below.
+ */
+
+/*
+ * Support for 68 bit VA space. We added that from ISA 2.05
+ */
+#define MMU_FTR_68_BIT_VA		ASM_CONST(0x00002000)
+/*
+ * Kernel read only support.
+ * We added the ppp value 0b110 in ISA 2.04.
+ */
+#define MMU_FTR_KERNEL_RO		ASM_CONST(0x00004000)
+
+/*
+ * We need to clear top 16bits of va (from the remaining 64 bits )in
+ * tlbie* instructions
+ */
+#define MMU_FTR_TLBIE_CROP_VA		ASM_CONST(0x00008000)
+
+/* Enable use of high BAT registers */
+#define MMU_FTR_USE_HIGH_BATS		ASM_CONST(0x00010000)
+
+/* Enable >32-bit physical addresses on 32-bit processor, only used
+ * by CONFIG_6xx currently as BookE supports that from day 1
+ */
+#define MMU_FTR_BIG_PHYS		ASM_CONST(0x00020000)
+
+/* Enable use of broadcast TLB invalidations. We don't always set it
+ * on processors that support it due to other constraints with the
+ * use of such invalidations
+ */
+#define MMU_FTR_USE_TLBIVAX_BCAST	ASM_CONST(0x00040000)
+
+/* Enable use of tlbilx invalidate instructions.
+ */
+#define MMU_FTR_USE_TLBILX		ASM_CONST(0x00080000)
+
+/* This indicates that the processor cannot handle multiple outstanding
+ * broadcast tlbivax or tlbsync. This makes the code use a spinlock
+ * around such invalidate forms.
+ */
+#define MMU_FTR_LOCK_BCAST_INVAL	ASM_CONST(0x00100000)
+
+/* This indicates that the processor doesn't handle way selection
+ * properly and needs SW to track and update the LRU state.  This
+ * is specific to an errata on e300c2/c3/c4 class parts
+ */
+#define MMU_FTR_NEED_DTLB_SW_LRU	ASM_CONST(0x00200000)
+
+/* Enable use of TLB reservation.  Processor should support tlbsrx.
+ * instruction and MAS0[WQ].
+ */
+#define MMU_FTR_USE_TLBRSRV		ASM_CONST(0x00800000)
+
+/* Use paired MAS registers (MAS7||MAS3, etc.)
+ */
+#define MMU_FTR_USE_PAIRED_MAS		ASM_CONST(0x01000000)
+
+/* Doesn't support the B bit (1T segment) in SLBIE
+ */
+#define MMU_FTR_NO_SLBIE_B		ASM_CONST(0x02000000)
+
+/* Support 16M large pages
+ */
+#define MMU_FTR_16M_PAGE		ASM_CONST(0x04000000)
+
+/* Supports TLBIEL variant
+ */
+#define MMU_FTR_TLBIEL			ASM_CONST(0x08000000)
+
+/* Supports tlbies w/o locking
+ */
+#define MMU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x10000000)
+
+/* Large pages can be marked CI
+ */
+#define MMU_FTR_CI_LARGE_PAGE		ASM_CONST(0x20000000)
+
+/* 1T segments available
+ */
+#define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)
+
+/* MMU feature bit sets for various CPUs */
+#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
+	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
+#define MMU_FTRS_POWER		MMU_FTRS_DEFAULT_HPTE_ARCH_V2
+#define MMU_FTRS_PPC970		MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA
+#define MMU_FTRS_POWER5		MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER6		MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA
+#define MMU_FTRS_POWER7		MMU_FTRS_POWER6
+#define MMU_FTRS_POWER8		MMU_FTRS_POWER6
+#define MMU_FTRS_POWER9		MMU_FTRS_POWER6
+#define MMU_FTRS_CELL		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+				MMU_FTR_CI_LARGE_PAGE
+#define MMU_FTRS_PA6T		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+				MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
+#ifndef __ASSEMBLY__
+#include <linux/bug.h>
+#include <asm/cputable.h>
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#include <asm/percpu.h>
+DECLARE_PER_CPU(int, next_tlbcam_idx);
+#endif
+
+enum {
+	MMU_FTRS_POSSIBLE = MMU_FTR_HPTE_TABLE | MMU_FTR_TYPE_8xx |
+		MMU_FTR_TYPE_40x | MMU_FTR_TYPE_44x | MMU_FTR_TYPE_FSL_E |
+		MMU_FTR_TYPE_47x | MMU_FTR_USE_HIGH_BATS | MMU_FTR_BIG_PHYS |
+		MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_USE_TLBILX |
+		MMU_FTR_LOCK_BCAST_INVAL | MMU_FTR_NEED_DTLB_SW_LRU |
+		MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
+		MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
+		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
+		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
+		MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
+#ifdef CONFIG_PPC_RADIX_MMU
+		MMU_FTR_TYPE_RADIX |
+#endif
+		0,
+};
+
+static inline bool early_mmu_has_feature(unsigned long feature)
+{
+	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
+}
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_MMU_FTR_KEYS	32
+
+extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
+
+extern void mmu_feature_keys_init(void);
+
+static __always_inline bool mmu_has_feature(unsigned long feature)
+{
+	int i;
+
+#ifndef __clang__ /* clang can't cope with this */
+	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+	if (!static_key_initialized) {
+		printk("Warning! mmu_has_feature() used prior to jump label init!\n");
+		dump_stack();
+		return early_mmu_has_feature(feature);
+	}
+#endif
+
+	if (!(MMU_FTRS_POSSIBLE & feature))
+		return false;
+
+	i = __builtin_ctzl(feature);
+	return static_branch_likely(&mmu_feature_keys[i]);
+}
+
+static inline void mmu_clear_feature(unsigned long feature)
+{
+	int i;
+
+	i = __builtin_ctzl(feature);
+	cur_cpu_spec->mmu_features &= ~feature;
+	static_branch_disable(&mmu_feature_keys[i]);
+}
+#else
+
+static inline void mmu_feature_keys_init(void)
+{
+
+}
+
+static inline bool mmu_has_feature(unsigned long feature)
+{
+	return early_mmu_has_feature(feature);
+}
+
+static inline void mmu_clear_feature(unsigned long feature)
+{
+	cur_cpu_spec->mmu_features &= ~feature;
+}
+#endif /* CONFIG_JUMP_LABEL */
+
+extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
+
+#ifdef CONFIG_PPC64
+/* This is our real memory area size on ppc64 server, on embedded, we
+ * make it match the size our of bolted TLB area
+ */
+extern u64 ppc64_rma_size;
+
+/* Cleanup function used by kexec */
+extern void mmu_cleanup_all(void);
+extern void radix__mmu_cleanup_all(void);
+
+/* Functions for creating and updating partition table on POWER9 */
+extern void mmu_partition_table_init(void);
+extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+					  unsigned long dw1);
+#endif /* CONFIG_PPC64 */
+
+struct mm_struct;
+#ifdef CONFIG_DEBUG_VM
+extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
+#else /* CONFIG_DEBUG_VM */
+static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
+{
+}
+#endif /* !CONFIG_DEBUG_VM */
+
+#ifdef CONFIG_PPC_RADIX_MMU
+static inline bool radix_enabled(void)
+{
+	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+
+static inline bool early_radix_enabled(void)
+{
+	return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+#else
+static inline bool radix_enabled(void)
+{
+	return false;
+}
+
+static inline bool early_radix_enabled(void)
+{
+	return false;
+}
+#endif
+
+#ifdef CONFIG_PPC_MEM_KEYS
+extern u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address);
+#else
+static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
+{
+	return 0;
+}
+#endif /* CONFIG_PPC_MEM_KEYS */
+
+#endif /* !__ASSEMBLY__ */
+
+/* The kernel use the constants below to index in the page sizes array.
+ * The use of fixed constants for this purpose is better for performances
+ * of the low level hash refill handlers.
+ *
+ * A non supported page size has a "shift" field set to 0
+ *
+ * Any new page size being implemented can get a new entry in here. Whether
+ * the kernel will use it or not is a different matter though. The actual page
+ * size used by hugetlbfs is not defined here and may be made variable
+ *
+ * Note: This array ended up being a false good idea as it's growing to the
+ * point where I wonder if we should replace it with something different,
+ * to think about, feedback welcome. --BenH.
+ */
+
+/* These are #defines as they have to be used in assembly */
+#define MMU_PAGE_4K	0
+#define MMU_PAGE_16K	1
+#define MMU_PAGE_64K	2
+#define MMU_PAGE_64K_AP	3	/* "Admixed pages" (hash64 only) */
+#define MMU_PAGE_256K	4
+#define MMU_PAGE_512K	5
+#define MMU_PAGE_1M	6
+#define MMU_PAGE_2M	7
+#define MMU_PAGE_4M	8
+#define MMU_PAGE_8M	9
+#define MMU_PAGE_16M	10
+#define MMU_PAGE_64M	11
+#define MMU_PAGE_256M	12
+#define MMU_PAGE_1G	13
+#define MMU_PAGE_16G	14
+#define MMU_PAGE_64G	15
+
+/*
+ * N.B. we need to change the type of hpte_page_sizes if this gets to be > 16
+ * Also we need to change he type of mm_context.low/high_slices_psize.
+ */
+#define MMU_PAGE_COUNT	16
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/mmu.h>
+#else /* CONFIG_PPC_BOOK3S_64 */
+
+#ifndef __ASSEMBLY__
+/* MMU initialization */
+extern void early_init_mmu(void);
+extern void early_init_mmu_secondary(void);
+extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+				       phys_addr_t first_memblock_size);
+static inline void mmu_early_init_devtree(void) { }
+#endif /* __ASSEMBLY__ */
+#endif
+
+#if defined(CONFIG_PPC_STD_MMU_32)
+/* 32-bit classic hash table MMU */
+#include <asm/book3s/32/mmu-hash.h>
+#elif defined(CONFIG_40x)
+/* 40x-style software loaded TLB */
+#  include <asm/mmu-40x.h>
+#elif defined(CONFIG_44x)
+/* 44x-style software loaded TLB */
+#  include <asm/mmu-44x.h>
+#elif defined(CONFIG_PPC_BOOK3E_MMU)
+/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
+#  include <asm/mmu-book3e.h>
+#elif defined (CONFIG_PPC_8xx)
+/* Motorola/Freescale 8xx software loaded TLB */
+#  include <asm/mmu-8xx.h>
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
new file mode 100644
index 0000000..b694d6a
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_MMU_CONTEXT_H
+#define __ASM_POWERPC_MMU_CONTEXT_H
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <asm/mmu.h>	
+#include <asm/cputable.h>
+#include <asm/cputhreads.h>
+
+/*
+ * Most if the context management is out of line
+ */
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+struct mm_iommu_table_group_mem_t;
+
+extern int isolate_lru_page(struct page *page);	/* from internal.h */
+extern bool mm_iommu_preregistered(struct mm_struct *mm);
+extern long mm_iommu_get(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries,
+		struct mm_iommu_table_group_mem_t **pmem);
+extern long mm_iommu_put(struct mm_struct *mm,
+		struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_init(struct mm_struct *mm);
+extern void mm_iommu_cleanup(struct mm_struct *mm);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+		unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
+		struct mm_struct *mm, unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries);
+extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
+		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
+		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
+extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
+#endif
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+extern void set_context(unsigned long id, pgd_t *pgd);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+extern void radix__switch_mmu_context(struct mm_struct *prev,
+				      struct mm_struct *next);
+static inline void switch_mmu_context(struct mm_struct *prev,
+				      struct mm_struct *next,
+				      struct task_struct *tsk)
+{
+	if (radix_enabled())
+		return radix__switch_mmu_context(prev, next);
+	return switch_slb(tsk, next);
+}
+
+extern int hash__alloc_context_id(void);
+extern void hash__reserve_context_id(int id);
+extern void __destroy_context(int context_id);
+static inline void mmu_context_init(void) { }
+
+static inline int alloc_extended_context(struct mm_struct *mm,
+					 unsigned long ea)
+{
+	int context_id;
+
+	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
+
+	context_id = hash__alloc_context_id();
+	if (context_id < 0)
+		return context_id;
+
+	VM_WARN_ON(mm->context.extended_id[index]);
+	mm->context.extended_id[index] = context_id;
+	return context_id;
+}
+
+static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
+{
+	int context_id;
+
+	context_id = get_ea_context(&mm->context, ea);
+	if (!context_id)
+		return true;
+	return false;
+}
+
+#else
+extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
+			       struct task_struct *tsk);
+extern unsigned long __init_new_context(void);
+extern void __destroy_context(unsigned long context_id);
+extern void mmu_context_init(void);
+static inline int alloc_extended_context(struct mm_struct *mm,
+					 unsigned long ea)
+{
+	/* non book3s_64 should never find this called */
+	WARN_ON(1);
+	return -ENOMEM;
+}
+
+static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
+{
+	return false;
+}
+#endif
+
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
+#else
+static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
+#endif
+
+extern void switch_cop(struct mm_struct *next);
+extern int use_cop(unsigned long acop, struct mm_struct *mm);
+extern void drop_cop(unsigned long acop, struct mm_struct *mm);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void inc_mm_active_cpus(struct mm_struct *mm)
+{
+	atomic_inc(&mm->context.active_cpus);
+}
+
+static inline void dec_mm_active_cpus(struct mm_struct *mm)
+{
+	atomic_dec(&mm->context.active_cpus);
+}
+
+static inline void mm_context_add_copro(struct mm_struct *mm)
+{
+	/*
+	 * If any copro is in use, increment the active CPU count
+	 * in order to force TLB invalidations to be global as to
+	 * propagate to the Nest MMU.
+	 */
+	if (atomic_inc_return(&mm->context.copros) == 1)
+		inc_mm_active_cpus(mm);
+}
+
+static inline void mm_context_remove_copro(struct mm_struct *mm)
+{
+	int c;
+
+	/*
+	 * When removing the last copro, we need to broadcast a global
+	 * flush of the full mm, as the next TLBI may be local and the
+	 * nMMU and/or PSL need to be cleaned up.
+	 *
+	 * Both the 'copros' and 'active_cpus' counts are looked at in
+	 * flush_all_mm() to determine the scope (local/global) of the
+	 * TLBIs, so we need to flush first before decrementing
+	 * 'copros'. If this API is used by several callers for the
+	 * same context, it can lead to over-flushing. It's hopefully
+	 * not common enough to be a problem.
+	 *
+	 * Skip on hash, as we don't know how to do the proper flush
+	 * for the time being. Invalidations will remain global if
+	 * used on hash. Note that we can't drop 'copros' either, as
+	 * it could make some invalidations local with no flush
+	 * in-between.
+	 */
+	if (radix_enabled()) {
+		flush_all_mm(mm);
+
+		c = atomic_dec_if_positive(&mm->context.copros);
+		/* Detect imbalance between add and remove */
+		WARN_ON(c < 0);
+
+		if (c == 0)
+			dec_mm_active_cpus(mm);
+	}
+}
+#else
+static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
+static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
+static inline void mm_context_add_copro(struct mm_struct *mm) { }
+static inline void mm_context_remove_copro(struct mm_struct *mm) { }
+#endif
+
+
+extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+			       struct task_struct *tsk);
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+	switch_mm(prev, next, current);
+}
+
+/* We don't currently use enter_lazy_tlb() for anything */
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+				  struct task_struct *tsk)
+{
+	/* 64-bit Book3E keeps track of current PGD in the PACA */
+#ifdef CONFIG_PPC_BOOK3E_64
+	get_paca()->pgd = NULL;
+#endif
+}
+
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+				struct mm_struct *mm)
+{
+	return 0;
+}
+
+#ifndef CONFIG_PPC_BOOK3S_64
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+#else
+extern void arch_exit_mmap(struct mm_struct *mm);
+#endif
+
+static inline void arch_unmap(struct mm_struct *mm,
+			      struct vm_area_struct *vma,
+			      unsigned long start, unsigned long end)
+{
+	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
+		mm->context.vdso_base = 0;
+}
+
+static inline void arch_bprm_mm_init(struct mm_struct *mm,
+				     struct vm_area_struct *vma)
+{
+}
+
+#ifdef CONFIG_PPC_MEM_KEYS
+bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
+			       bool execute, bool foreign);
+#else /* CONFIG_PPC_MEM_KEYS */
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+		bool write, bool execute, bool foreign)
+{
+	/* by default, allow everything */
+	return true;
+}
+
+#define pkey_mm_init(mm)
+#define thread_pkey_regs_save(thread)
+#define thread_pkey_regs_restore(new_thread, old_thread)
+#define thread_pkey_regs_init(thread)
+
+static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
+{
+	return 0x0UL;
+}
+
+#endif /* CONFIG_PPC_MEM_KEYS */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
new file mode 100644
index 0000000..91c69ff
--- /dev/null
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
+ *
+ * PowerPC64 port:
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+#ifdef __KERNEL__
+
+#include <linux/cpumask.h>
+
+/*
+ * generic non-linear memory support:
+ *
+ * 1) we will not split memory into more chunks than will fit into the
+ *    flags field of the struct page
+ */
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+
+extern struct pglist_data *node_data[];
+/*
+ * Return a pointer to the node data for node n.
+ */
+#define NODE_DATA(nid)		(node_data[nid])
+
+/*
+ * Following are specific to this numa platform.
+ */
+
+extern int numa_cpu_lookup_table[];
+extern cpumask_var_t node_to_cpumask_map[];
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern unsigned long max_pfn;
+u64 memory_hotplug_max(void);
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
+#endif
+
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#ifdef CONFIG_FA_DUMP
+#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
new file mode 100644
index 0000000..d61b081
--- /dev/null
+++ b/arch/powerpc/include/asm/module.h
@@ -0,0 +1,111 @@
+#ifndef _ASM_POWERPC_MODULE_H
+#define _ASM_POWERPC_MODULE_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <asm/bug.h>
+#include <asm-generic/module.h>
+
+
+#ifdef CONFIG_MPROFILE_KERNEL
+#define MODULE_ARCH_VERMAGIC_FTRACE	"mprofile-kernel "
+#else
+#define MODULE_ARCH_VERMAGIC_FTRACE	""
+#endif
+
+#ifdef CONFIG_RELOCATABLE
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE	"relocatable "
+#else
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE	""
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE
+
+#ifndef __powerpc64__
+/*
+ * Thanks to Paul M for explaining this.
+ *
+ * PPC can only do rel jumps += 32MB, and often the kernel and other
+ * modules are further away than this.  So, we jump to a table of
+ * trampolines attached to the module (the Procedure Linkage Table)
+ * whenever that happens.
+ */
+
+struct ppc_plt_entry {
+	/* 16 byte jump instruction sequence (4 instructions) */
+	unsigned int jump[4];
+};
+#endif	/* __powerpc64__ */
+
+
+struct mod_arch_specific {
+#ifdef __powerpc64__
+	unsigned int stubs_section;	/* Index of stubs section in module */
+	unsigned int toc_section;	/* What section is the TOC? */
+	bool toc_fixed;			/* Have we fixed up .TOC.? */
+
+	/* For module function descriptor dereference */
+	unsigned long start_opd;
+	unsigned long end_opd;
+#else /* powerpc64 */
+	/* Indices of PLT sections within module. */
+	unsigned int core_plt_section;
+	unsigned int init_plt_section;
+#endif /* powerpc64 */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+	unsigned long tramp;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+	unsigned long tramp_regs;
+#endif
+#endif
+
+	/* List of BUG addresses, source line numbers and filenames */
+	struct list_head bug_list;
+	struct bug_entry *bug_table;
+	unsigned int num_bugs;
+};
+
+/*
+ * Select ELF headers.
+ * Make empty section for module_frob_arch_sections to expand.
+ */
+
+#ifdef __powerpc64__
+#    ifdef MODULE
+	asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
+#    endif
+#else
+#    ifdef MODULE
+	asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
+	asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
+#    endif	/* MODULE */
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+#    ifdef MODULE
+	asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
+#    endif	/* MODULE */
+#endif
+
+int module_trampoline_target(struct module *mod, unsigned long trampoline,
+			     unsigned long *target);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
+#else
+static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
+{
+	return 0;
+}
+#endif
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
new file mode 100644
index 0000000..deaeb0b
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -0,0 +1,122 @@
+/*
+ * MPC5121 Prototypes and definitions
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.
+ */
+
+#ifndef __ASM_POWERPC_MPC5121_H__
+#define __ASM_POWERPC_MPC5121_H__
+
+/* MPC512x Reset module registers */
+struct mpc512x_reset_module {
+	u32	rcwlr;	/* Reset Configuration Word Low Register */
+	u32	rcwhr;	/* Reset Configuration Word High Register */
+	u32	reserved1;
+	u32	reserved2;
+	u32	rsr;	/* Reset Status Register */
+	u32	rmr;	/* Reset Mode Register */
+	u32	rpr;	/* Reset Protection Register */
+	u32	rcr;	/* Reset Control Register */
+	u32	rcer;	/* Reset Control Enable Register */
+};
+
+/*
+ * Clock Control Module
+ */
+struct mpc512x_ccm {
+	u32	spmr;	/* System PLL Mode Register */
+	u32	sccr1;	/* System Clock Control Register 1 */
+	u32	sccr2;	/* System Clock Control Register 2 */
+	u32	scfr1;	/* System Clock Frequency Register 1 */
+	u32	scfr2;	/* System Clock Frequency Register 2 */
+	u32	scfr2s;	/* System Clock Frequency Shadow Register 2 */
+	u32	bcr;	/* Bread Crumb Register */
+	u32	psc_ccr[12];	/* PSC Clock Control Registers */
+	u32	spccr;	/* SPDIF Clock Control Register */
+	u32	cccr;	/* CFM Clock Control Register */
+	u32	dccr;	/* DIU Clock Control Register */
+	u32	mscan_ccr[4];	/* MSCAN Clock Control Registers */
+	u32	out_ccr[4];	/* OUT CLK Configure Registers */
+	u32	rsv0[2];	/* Reserved */
+	u32	scfr3;		/* System Clock Frequency Register 3 */
+	u32	rsv1[3];	/* Reserved */
+	u32	spll_lock_cnt;	/* System PLL Lock Counter */
+	u8	res[0x6c];	/* Reserved */
+};
+
+/*
+ * LPC Module
+ */
+struct mpc512x_lpc {
+	u32	cs_cfg[8];	/* CS config */
+	u32	cs_ctrl;	/* CS Control Register */
+	u32	cs_status;	/* CS Status Register */
+	u32	burst_ctrl;	/* CS Burst Control Register */
+	u32	deadcycle_ctrl;	/* CS Deadcycle Control Register */
+	u32	holdcycle_ctrl;	/* CS Holdcycle Control Register */
+	u32	alt;		/* Address Latch Timing Register */
+};
+
+int mpc512x_cs_config(unsigned int cs, u32 val);
+
+/*
+ * SCLPC Module (LPB FIFO)
+ */
+struct mpc512x_lpbfifo {
+	u32	pkt_size;	/* SCLPC Packet Size Register */
+	u32	start_addr;	/* SCLPC Start Address Register */
+	u32	ctrl;		/* SCLPC Control Register */
+	u32	enable;		/* SCLPC Enable Register */
+	u32	reserved1;
+	u32	status;		/* SCLPC Status Register */
+	u32	bytes_done;	/* SCLPC Bytes Done Register */
+	u32	emb_sc;		/* EMB Share Counter Register */
+	u32	emb_pc;		/* EMB Pause Control Register */
+	u32	reserved2[7];
+	u32	data_word;	/* LPC RX/TX FIFO Data Word Register */
+	u32	fifo_status;	/* LPC RX/TX FIFO Status Register */
+	u32	fifo_ctrl;	/* LPC RX/TX FIFO Control Register */
+	u32	fifo_alarm;	/* LPC RX/TX FIFO Alarm Register */
+};
+
+#define MPC512X_SCLPC_START		(1 << 31)
+#define MPC512X_SCLPC_CS(x)		(((x) & 0x7) << 24)
+#define MPC512X_SCLPC_FLUSH		(1 << 17)
+#define MPC512X_SCLPC_READ		(1 << 16)
+#define MPC512X_SCLPC_DAI		(1 << 8)
+#define MPC512X_SCLPC_BPT(x)		((x) & 0x3f)
+#define MPC512X_SCLPC_RESET		(1 << 24)
+#define MPC512X_SCLPC_FIFO_RESET	(1 << 16)
+#define MPC512X_SCLPC_ABORT_INT_ENABLE	(1 << 9)
+#define MPC512X_SCLPC_NORM_INT_ENABLE	(1 << 8)
+#define MPC512X_SCLPC_ENABLE		(1 << 0)
+#define MPC512X_SCLPC_SUCCESS		(1 << 24)
+#define MPC512X_SCLPC_FIFO_CTRL(x)	(((x) & 0x7) << 24)
+#define MPC512X_SCLPC_FIFO_ALARM(x)	((x) & 0x3ff)
+
+enum lpb_dev_portsize {
+	LPB_DEV_PORTSIZE_UNDEFINED = 0,
+	LPB_DEV_PORTSIZE_1_BYTE = 1,
+	LPB_DEV_PORTSIZE_2_BYTES = 2,
+	LPB_DEV_PORTSIZE_4_BYTES = 4,
+	LPB_DEV_PORTSIZE_8_BYTES = 8
+};
+
+enum mpc512x_lpbfifo_req_dir {
+	MPC512X_LPBFIFO_REQ_DIR_READ,
+	MPC512X_LPBFIFO_REQ_DIR_WRITE
+};
+
+struct mpc512x_lpbfifo_request {
+	phys_addr_t dev_phys_addr; /* physical address of some device on LPB */
+	void *ram_virt_addr; /* virtual address of some region in RAM */
+	u32 size;
+	enum lpb_dev_portsize portsize;
+	enum mpc512x_lpbfifo_req_dir dir;
+	void (*callback)(struct mpc512x_lpbfifo_request *);
+};
+
+int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req);
+
+#endif /* __ASM_POWERPC_MPC5121_H__ */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
new file mode 100644
index 0000000..ce1e0aa
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -0,0 +1,365 @@
+/*
+ * Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
+ * May need to be cleaned as the port goes on ...
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_POWERPC_MPC52xx_H__
+#define __ASM_POWERPC_MPC52xx_H__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/prom.h>
+#include <asm/mpc5xxx.h>
+#endif /* __ASSEMBLY__ */
+
+#include <linux/suspend.h>
+
+/* Variants of the 5200(B) */
+#define MPC5200_SVR		0x80110010
+#define MPC5200_SVR_MASK	0xfffffff0
+#define MPC5200B_SVR		0x80110020
+#define MPC5200B_SVR_MASK	0xfffffff0
+
+/* ======================================================================== */
+/* Structures mapping of some unit register set                             */
+/* ======================================================================== */
+
+#ifndef __ASSEMBLY__
+
+/* Memory Mapping Control */
+struct mpc52xx_mmap_ctl {
+	u32 mbar;		/* MMAP_CTRL + 0x00 */
+
+	u32 cs0_start;		/* MMAP_CTRL + 0x04 */
+	u32 cs0_stop;		/* MMAP_CTRL + 0x08 */
+	u32 cs1_start;		/* MMAP_CTRL + 0x0c */
+	u32 cs1_stop;		/* MMAP_CTRL + 0x10 */
+	u32 cs2_start;		/* MMAP_CTRL + 0x14 */
+	u32 cs2_stop;		/* MMAP_CTRL + 0x18 */
+	u32 cs3_start;		/* MMAP_CTRL + 0x1c */
+	u32 cs3_stop;		/* MMAP_CTRL + 0x20 */
+	u32 cs4_start;		/* MMAP_CTRL + 0x24 */
+	u32 cs4_stop;		/* MMAP_CTRL + 0x28 */
+	u32 cs5_start;		/* MMAP_CTRL + 0x2c */
+	u32 cs5_stop;		/* MMAP_CTRL + 0x30 */
+
+	u32 sdram0;		/* MMAP_CTRL + 0x34 */
+	u32 sdram1;		/* MMAP_CTRL + 0X38 */
+
+	u32 reserved[4];	/* MMAP_CTRL + 0x3c .. 0x48 */
+
+	u32 boot_start;		/* MMAP_CTRL + 0x4c */
+	u32 boot_stop;		/* MMAP_CTRL + 0x50 */
+
+	u32 ipbi_ws_ctrl;	/* MMAP_CTRL + 0x54 */
+
+	u32 cs6_start;		/* MMAP_CTRL + 0x58 */
+	u32 cs6_stop;		/* MMAP_CTRL + 0x5c */
+	u32 cs7_start;		/* MMAP_CTRL + 0x60 */
+	u32 cs7_stop;		/* MMAP_CTRL + 0x64 */
+};
+
+/* SDRAM control */
+struct mpc52xx_sdram {
+	u32 mode;		/* SDRAM + 0x00 */
+	u32 ctrl;		/* SDRAM + 0x04 */
+	u32 config1;		/* SDRAM + 0x08 */
+	u32 config2;		/* SDRAM + 0x0c */
+};
+
+/* SDMA */
+struct mpc52xx_sdma {
+	u32 taskBar;		/* SDMA + 0x00 */
+	u32 currentPointer;	/* SDMA + 0x04 */
+	u32 endPointer;		/* SDMA + 0x08 */
+	u32 variablePointer;	/* SDMA + 0x0c */
+
+	u8 IntVect1;		/* SDMA + 0x10 */
+	u8 IntVect2;		/* SDMA + 0x11 */
+	u16 PtdCntrl;		/* SDMA + 0x12 */
+
+	u32 IntPend;		/* SDMA + 0x14 */
+	u32 IntMask;		/* SDMA + 0x18 */
+
+	u16 tcr[16];		/* SDMA + 0x1c .. 0x3a */
+
+	u8 ipr[32];		/* SDMA + 0x3c .. 0x5b */
+
+	u32 cReqSelect;		/* SDMA + 0x5c */
+	u32 task_size0;		/* SDMA + 0x60 */
+	u32 task_size1;		/* SDMA + 0x64 */
+	u32 MDEDebug;		/* SDMA + 0x68 */
+	u32 ADSDebug;		/* SDMA + 0x6c */
+	u32 Value1;		/* SDMA + 0x70 */
+	u32 Value2;		/* SDMA + 0x74 */
+	u32 Control;		/* SDMA + 0x78 */
+	u32 Status;		/* SDMA + 0x7c */
+	u32 PTDDebug;		/* SDMA + 0x80 */
+};
+
+/* GPT */
+struct mpc52xx_gpt {
+	u32 mode;		/* GPTx + 0x00 */
+	u32 count;		/* GPTx + 0x04 */
+	u32 pwm;		/* GPTx + 0x08 */
+	u32 status;		/* GPTx + 0X0c */
+};
+
+/* GPIO */
+struct mpc52xx_gpio {
+	u32 port_config;	/* GPIO + 0x00 */
+	u32 simple_gpioe;	/* GPIO + 0x04 */
+	u32 simple_ode;		/* GPIO + 0x08 */
+	u32 simple_ddr;		/* GPIO + 0x0c */
+	u32 simple_dvo;		/* GPIO + 0x10 */
+	u32 simple_ival;	/* GPIO + 0x14 */
+	u8 outo_gpioe;		/* GPIO + 0x18 */
+	u8 reserved1[3];	/* GPIO + 0x19 */
+	u8 outo_dvo;		/* GPIO + 0x1c */
+	u8 reserved2[3];	/* GPIO + 0x1d */
+	u8 sint_gpioe;		/* GPIO + 0x20 */
+	u8 reserved3[3];	/* GPIO + 0x21 */
+	u8 sint_ode;		/* GPIO + 0x24 */
+	u8 reserved4[3];	/* GPIO + 0x25 */
+	u8 sint_ddr;		/* GPIO + 0x28 */
+	u8 reserved5[3];	/* GPIO + 0x29 */
+	u8 sint_dvo;		/* GPIO + 0x2c */
+	u8 reserved6[3];	/* GPIO + 0x2d */
+	u8 sint_inten;		/* GPIO + 0x30 */
+	u8 reserved7[3];	/* GPIO + 0x31 */
+	u16 sint_itype;		/* GPIO + 0x34 */
+	u16 reserved8;		/* GPIO + 0x36 */
+	u8 gpio_control;	/* GPIO + 0x38 */
+	u8 reserved9[3];	/* GPIO + 0x39 */
+	u8 sint_istat;		/* GPIO + 0x3c */
+	u8 sint_ival;		/* GPIO + 0x3d */
+	u8 bus_errs;		/* GPIO + 0x3e */
+	u8 reserved10;		/* GPIO + 0x3f */
+};
+
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD	4
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD	5
+#define MPC52xx_GPIO_PCI_DIS			(1<<15)
+
+/* GPIO with WakeUp*/
+struct mpc52xx_gpio_wkup {
+	u8 wkup_gpioe;		/* GPIO_WKUP + 0x00 */
+	u8 reserved1[3];	/* GPIO_WKUP + 0x03 */
+	u8 wkup_ode;		/* GPIO_WKUP + 0x04 */
+	u8 reserved2[3];	/* GPIO_WKUP + 0x05 */
+	u8 wkup_ddr;		/* GPIO_WKUP + 0x08 */
+	u8 reserved3[3];	/* GPIO_WKUP + 0x09 */
+	u8 wkup_dvo;		/* GPIO_WKUP + 0x0C */
+	u8 reserved4[3];	/* GPIO_WKUP + 0x0D */
+	u8 wkup_inten;		/* GPIO_WKUP + 0x10 */
+	u8 reserved5[3];	/* GPIO_WKUP + 0x11 */
+	u8 wkup_iinten;		/* GPIO_WKUP + 0x14 */
+	u8 reserved6[3];	/* GPIO_WKUP + 0x15 */
+	u16 wkup_itype;		/* GPIO_WKUP + 0x18 */
+	u8 reserved7[2];	/* GPIO_WKUP + 0x1A */
+	u8 wkup_maste;		/* GPIO_WKUP + 0x1C */
+	u8 reserved8[3];	/* GPIO_WKUP + 0x1D */
+	u8 wkup_ival;		/* GPIO_WKUP + 0x20 */
+	u8 reserved9[3];	/* GPIO_WKUP + 0x21 */
+	u8 wkup_istat;		/* GPIO_WKUP + 0x24 */
+	u8 reserved10[3];	/* GPIO_WKUP + 0x25 */
+};
+
+/* XLB Bus control */
+struct mpc52xx_xlb {
+	u8 reserved[0x40];
+	u32 config;		/* XLB + 0x40 */
+	u32 version;		/* XLB + 0x44 */
+	u32 status;		/* XLB + 0x48 */
+	u32 int_enable;		/* XLB + 0x4c */
+	u32 addr_capture;	/* XLB + 0x50 */
+	u32 bus_sig_capture;	/* XLB + 0x54 */
+	u32 addr_timeout;	/* XLB + 0x58 */
+	u32 data_timeout;	/* XLB + 0x5c */
+	u32 bus_act_timeout;	/* XLB + 0x60 */
+	u32 master_pri_enable;	/* XLB + 0x64 */
+	u32 master_priority;	/* XLB + 0x68 */
+	u32 base_address;	/* XLB + 0x6c */
+	u32 snoop_window;	/* XLB + 0x70 */
+};
+
+#define MPC52xx_XLB_CFG_PLDIS		(1 << 31)
+#define MPC52xx_XLB_CFG_SNOOP		(1 << 15)
+
+/* Clock Distribution control */
+struct mpc52xx_cdm {
+	u32 jtag_id;		/* CDM + 0x00  reg0 read only */
+	u32 rstcfg;		/* CDM + 0x04  reg1 read only */
+	u32 breadcrumb;		/* CDM + 0x08  reg2 */
+
+	u8 mem_clk_sel;		/* CDM + 0x0c  reg3 byte0 */
+	u8 xlb_clk_sel;		/* CDM + 0x0d  reg3 byte1 read only */
+	u8 ipb_clk_sel;		/* CDM + 0x0e  reg3 byte2 */
+	u8 pci_clk_sel;		/* CDM + 0x0f  reg3 byte3 */
+
+	u8 ext_48mhz_en;	/* CDM + 0x10  reg4 byte0 */
+	u8 fd_enable;		/* CDM + 0x11  reg4 byte1 */
+	u16 fd_counters;	/* CDM + 0x12  reg4 byte2,3 */
+
+	u32 clk_enables;	/* CDM + 0x14  reg5 */
+
+	u8 osc_disable;		/* CDM + 0x18  reg6 byte0 */
+	u8 reserved0[3];	/* CDM + 0x19  reg6 byte1,2,3 */
+
+	u8 ccs_sleep_enable;	/* CDM + 0x1c  reg7 byte0 */
+	u8 osc_sleep_enable;	/* CDM + 0x1d  reg7 byte1 */
+	u8 reserved1;		/* CDM + 0x1e  reg7 byte2 */
+	u8 ccs_qreq_test;	/* CDM + 0x1f  reg7 byte3 */
+
+	u8 soft_reset;		/* CDM + 0x20  u8 byte0 */
+	u8 no_ckstp;		/* CDM + 0x21  u8 byte0 */
+	u8 reserved2[2];	/* CDM + 0x22  u8 byte1,2,3 */
+
+	u8 pll_lock;		/* CDM + 0x24  reg9 byte0 */
+	u8 pll_looselock;	/* CDM + 0x25  reg9 byte1 */
+	u8 pll_sm_lockwin;	/* CDM + 0x26  reg9 byte2 */
+	u8 reserved3;		/* CDM + 0x27  reg9 byte3 */
+
+	u16 reserved4;		/* CDM + 0x28  reg10 byte0,1 */
+	u16 mclken_div_psc1;	/* CDM + 0x2a  reg10 byte2,3 */
+
+	u16 reserved5;		/* CDM + 0x2c  reg11 byte0,1 */
+	u16 mclken_div_psc2;	/* CDM + 0x2e  reg11 byte2,3 */
+
+	u16 reserved6;		/* CDM + 0x30  reg12 byte0,1 */
+	u16 mclken_div_psc3;	/* CDM + 0x32  reg12 byte2,3 */
+
+	u16 reserved7;		/* CDM + 0x34  reg13 byte0,1 */
+	u16 mclken_div_psc6;	/* CDM + 0x36  reg13 byte2,3 */
+};
+
+/* Interrupt controller Register set */
+struct mpc52xx_intr {
+	u32 per_mask;		/* INTR + 0x00 */
+	u32 per_pri1;		/* INTR + 0x04 */
+	u32 per_pri2;		/* INTR + 0x08 */
+	u32 per_pri3;		/* INTR + 0x0c */
+	u32 ctrl;		/* INTR + 0x10 */
+	u32 main_mask;		/* INTR + 0x14 */
+	u32 main_pri1;		/* INTR + 0x18 */
+	u32 main_pri2;		/* INTR + 0x1c */
+	u32 reserved1;		/* INTR + 0x20 */
+	u32 enc_status;		/* INTR + 0x24 */
+	u32 crit_status;	/* INTR + 0x28 */
+	u32 main_status;	/* INTR + 0x2c */
+	u32 per_status;		/* INTR + 0x30 */
+	u32 reserved2;		/* INTR + 0x34 */
+	u32 per_error;		/* INTR + 0x38 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+
+/* ========================================================================= */
+/* Prototypes for MPC52xx sysdev                                             */
+/* ========================================================================= */
+
+#ifndef __ASSEMBLY__
+
+/* mpc52xx_common.c */
+extern void mpc5200_setup_xlb_arbiter(void);
+extern void mpc52xx_declare_of_platform_devices(void);
+extern int mpc5200_psc_ac97_gpio_reset(int psc_number);
+extern void mpc52xx_map_common_devices(void);
+extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
+extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
+extern void __noreturn mpc52xx_restart(char *cmd);
+
+/* mpc52xx_gpt.c */
+struct mpc52xx_gpt_priv;
+extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq);
+extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
+                            int continuous);
+extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt);
+extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt);
+
+/* mpc52xx_lpbfifo.c */
+#define MPC52XX_LPBFIFO_FLAG_READ		(0)
+#define MPC52XX_LPBFIFO_FLAG_WRITE		(1<<0)
+#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT	(1<<1)
+#define MPC52XX_LPBFIFO_FLAG_NO_DMA		(1<<2)
+#define MPC52XX_LPBFIFO_FLAG_POLL_DMA		(1<<3)
+
+struct mpc52xx_lpbfifo_request {
+	struct list_head list;
+
+	/* localplus bus address */
+	unsigned int cs;
+	size_t offset;
+
+	/* Memory address */
+	void *data;
+	phys_addr_t data_phys;
+
+	/* Details of transfer */
+	size_t size;
+	size_t pos;	/* current position of transfer */
+	int flags;
+	int defer_xfer_start;
+
+	/* What to do when finished */
+	void (*callback)(struct mpc52xx_lpbfifo_request *);
+
+	void *priv;		/* Driver private data */
+
+	/* statistics */
+	int irq_count;
+	int irq_ticks;
+	u8 last_byte;
+	int buffer_not_done_cnt;
+};
+
+extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_poll(void);
+extern int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req);
+
+/* mpc52xx_pic.c */
+extern void mpc52xx_init_irq(void);
+extern unsigned int mpc52xx_get_irq(void);
+
+/* mpc52xx_pci.c */
+#ifdef CONFIG_PCI
+extern int __init mpc52xx_add_bridge(struct device_node *node);
+extern void __init mpc52xx_setup_pci(void);
+#else
+static inline void mpc52xx_setup_pci(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PM
+struct mpc52xx_suspend {
+	void (*board_suspend_prepare)(void __iomem *mbar);
+	void (*board_resume_finish)(void __iomem *mbar);
+};
+
+extern struct mpc52xx_suspend mpc52xx_suspend;
+extern int __init mpc52xx_pm_init(void);
+extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
+
+/* lite5200 calls mpc5200 suspend functions, so here they are */
+extern int mpc52xx_pm_prepare(void);
+extern int mpc52xx_pm_enter(suspend_state_t);
+extern void mpc52xx_pm_finish(void);
+extern char saved_sram[0x4000]; /* reuse buffer from mpc52xx suspend */
+
+#ifdef CONFIG_PPC_LITE5200
+int __init lite5200_pm_init(void);
+#endif
+#endif /* CONFIG_PM */
+
+#endif /* __ASM_POWERPC_MPC52xx_H__ */
+
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
new file mode 100644
index 0000000..ec995b2
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -0,0 +1,352 @@
+/*
+ * include/asm-ppc/mpc52xx_psc.h
+ *
+ * Definitions of consts/structs to drive the Freescale MPC52xx OnChip
+ * PSCs. Theses are shared between multiple drivers since a PSC can be
+ * UART, AC97, IR, I2S, ... So this header is in asm-ppc.
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Based/Extracted from some header of the 2.4 originally written by
+ * Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_MPC52xx_PSC_H__
+#define __ASM_MPC52xx_PSC_H__
+
+#include <asm/types.h>
+
+/* Max number of PSCs */
+#ifdef CONFIG_PPC_MPC512x
+#define MPC52xx_PSC_MAXNUM     12
+#else
+#define MPC52xx_PSC_MAXNUM	6
+#endif
+
+/* Programmable Serial Controller (PSC) status register bits */
+#define MPC52xx_PSC_SR_UNEX_RX	0x0001
+#define MPC52xx_PSC_SR_DATA_VAL	0x0002
+#define MPC52xx_PSC_SR_DATA_OVR	0x0004
+#define MPC52xx_PSC_SR_CMDSEND	0x0008
+#define MPC52xx_PSC_SR_CDE	0x0080
+#define MPC52xx_PSC_SR_RXRDY	0x0100
+#define MPC52xx_PSC_SR_RXFULL	0x0200
+#define MPC52xx_PSC_SR_TXRDY	0x0400
+#define MPC52xx_PSC_SR_TXEMP	0x0800
+#define MPC52xx_PSC_SR_OE	0x1000
+#define MPC52xx_PSC_SR_PE	0x2000
+#define MPC52xx_PSC_SR_FE	0x4000
+#define MPC52xx_PSC_SR_RB	0x8000
+
+/* PSC Command values */
+#define MPC52xx_PSC_RX_ENABLE		0x0001
+#define MPC52xx_PSC_RX_DISABLE		0x0002
+#define MPC52xx_PSC_TX_ENABLE		0x0004
+#define MPC52xx_PSC_TX_DISABLE		0x0008
+#define MPC52xx_PSC_SEL_MODE_REG_1	0x0010
+#define MPC52xx_PSC_RST_RX		0x0020
+#define MPC52xx_PSC_RST_TX		0x0030
+#define MPC52xx_PSC_RST_ERR_STAT	0x0040
+#define MPC52xx_PSC_RST_BRK_CHG_INT	0x0050
+#define MPC52xx_PSC_START_BRK		0x0060
+#define MPC52xx_PSC_STOP_BRK		0x0070
+
+/* PSC TxRx FIFO status bits */
+#define MPC52xx_PSC_RXTX_FIFO_ERR	0x0040
+#define MPC52xx_PSC_RXTX_FIFO_UF	0x0020
+#define MPC52xx_PSC_RXTX_FIFO_OF	0x0010
+#define MPC52xx_PSC_RXTX_FIFO_FR	0x0008
+#define MPC52xx_PSC_RXTX_FIFO_FULL	0x0004
+#define MPC52xx_PSC_RXTX_FIFO_ALARM	0x0002
+#define MPC52xx_PSC_RXTX_FIFO_EMPTY	0x0001
+
+/* PSC interrupt status/mask bits */
+#define MPC52xx_PSC_IMR_UNEX_RX_SLOT 0x0001
+#define MPC52xx_PSC_IMR_DATA_VALID	0x0002
+#define MPC52xx_PSC_IMR_DATA_OVR	0x0004
+#define MPC52xx_PSC_IMR_CMD_SEND	0x0008
+#define MPC52xx_PSC_IMR_ERROR		0x0040
+#define MPC52xx_PSC_IMR_DEOF		0x0080
+#define MPC52xx_PSC_IMR_TXRDY		0x0100
+#define MPC52xx_PSC_IMR_RXRDY		0x0200
+#define MPC52xx_PSC_IMR_DB		0x0400
+#define MPC52xx_PSC_IMR_TXEMP		0x0800
+#define MPC52xx_PSC_IMR_ORERR		0x1000
+#define MPC52xx_PSC_IMR_IPC		0x8000
+
+/* PSC input port change bits */
+#define MPC52xx_PSC_CTS			0x01
+#define MPC52xx_PSC_DCD			0x02
+#define MPC52xx_PSC_D_CTS		0x10
+#define MPC52xx_PSC_D_DCD		0x20
+
+/* PSC acr bits */
+#define MPC52xx_PSC_IEC_CTS		0x01
+#define MPC52xx_PSC_IEC_DCD		0x02
+
+/* PSC output port bits */
+#define MPC52xx_PSC_OP_RTS		0x01
+#define MPC52xx_PSC_OP_RES		0x02
+
+/* PSC mode fields */
+#define MPC52xx_PSC_MODE_5_BITS			0x00
+#define MPC52xx_PSC_MODE_6_BITS			0x01
+#define MPC52xx_PSC_MODE_7_BITS			0x02
+#define MPC52xx_PSC_MODE_8_BITS			0x03
+#define MPC52xx_PSC_MODE_BITS_MASK		0x03
+#define MPC52xx_PSC_MODE_PAREVEN		0x00
+#define MPC52xx_PSC_MODE_PARODD			0x04
+#define MPC52xx_PSC_MODE_PARFORCE		0x08
+#define MPC52xx_PSC_MODE_PARNONE		0x10
+#define MPC52xx_PSC_MODE_ERR			0x20
+#define MPC52xx_PSC_MODE_FFULL			0x40
+#define MPC52xx_PSC_MODE_RXRTS			0x80
+
+#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS	0x00
+#define MPC52xx_PSC_MODE_ONE_STOP		0x07
+#define MPC52xx_PSC_MODE_TWO_STOP		0x0f
+#define MPC52xx_PSC_MODE_TXCTS			0x10
+
+#define MPC52xx_PSC_RFNUM_MASK	0x01ff
+
+#define MPC52xx_PSC_SICR_DTS1			(1 << 29)
+#define MPC52xx_PSC_SICR_SHDR			(1 << 28)
+#define MPC52xx_PSC_SICR_SIM_MASK		(0xf << 24)
+#define MPC52xx_PSC_SICR_SIM_UART		(0x0 << 24)
+#define MPC52xx_PSC_SICR_SIM_UART_DCD		(0x8 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_8		(0x1 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_16		(0x2 << 24)
+#define MPC52xx_PSC_SICR_SIM_AC97		(0x3 << 24)
+#define MPC52xx_PSC_SICR_SIM_SIR		(0x8 << 24)
+#define MPC52xx_PSC_SICR_SIM_SIR_DCD		(0xc << 24)
+#define MPC52xx_PSC_SICR_SIM_MIR		(0x5 << 24)
+#define MPC52xx_PSC_SICR_SIM_FIR		(0x6 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_24		(0x7 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_32		(0xf << 24)
+#define MPC52xx_PSC_SICR_ACRB			(0x8 << 24)
+#define MPC52xx_PSC_SICR_AWR			(1 << 30)
+#define MPC52xx_PSC_SICR_GENCLK			(1 << 23)
+#define MPC52xx_PSC_SICR_I2S			(1 << 22)
+#define MPC52xx_PSC_SICR_CLKPOL			(1 << 21)
+#define MPC52xx_PSC_SICR_SYNCPOL		(1 << 20)
+#define MPC52xx_PSC_SICR_CELLSLAVE		(1 << 19)
+#define MPC52xx_PSC_SICR_CELL2XCLK		(1 << 18)
+#define MPC52xx_PSC_SICR_ESAI			(1 << 17)
+#define MPC52xx_PSC_SICR_ENAC97			(1 << 16)
+#define MPC52xx_PSC_SICR_SPI			(1 << 15)
+#define MPC52xx_PSC_SICR_MSTR			(1 << 14)
+#define MPC52xx_PSC_SICR_CPOL			(1 << 13)
+#define MPC52xx_PSC_SICR_CPHA			(1 << 12)
+#define MPC52xx_PSC_SICR_USEEOF			(1 << 11)
+#define MPC52xx_PSC_SICR_DISABLEEOF		(1 << 10)
+
+/* Structure of the hardware registers */
+struct mpc52xx_psc {
+	union {
+		u8	mode;		/* PSC + 0x00 */
+		u8	mr2;
+	};
+	u8		reserved0[3];
+	union {				/* PSC + 0x04 */
+		u16	status;
+		u16	clock_select;
+	} sr_csr;
+#define mpc52xx_psc_status	sr_csr.status
+#define mpc52xx_psc_clock_select sr_csr.clock_select
+	u16		reserved1;
+	u8		command;	/* PSC + 0x08 */
+	u8		reserved2[3];
+	union {				/* PSC + 0x0c */
+		u8	buffer_8;
+		u16	buffer_16;
+		u32	buffer_32;
+	} buffer;
+#define mpc52xx_psc_buffer_8	buffer.buffer_8
+#define mpc52xx_psc_buffer_16	buffer.buffer_16
+#define mpc52xx_psc_buffer_32	buffer.buffer_32
+	union {				/* PSC + 0x10 */
+		u8	ipcr;
+		u8	acr;
+	} ipcr_acr;
+#define mpc52xx_psc_ipcr	ipcr_acr.ipcr
+#define mpc52xx_psc_acr		ipcr_acr.acr
+	u8		reserved3[3];
+	union {				/* PSC + 0x14 */
+		u16	isr;
+		u16	imr;
+	} isr_imr;
+#define mpc52xx_psc_isr		isr_imr.isr
+#define mpc52xx_psc_imr		isr_imr.imr
+	u16		reserved4;
+	u8		ctur;		/* PSC + 0x18 */
+	u8		reserved5[3];
+	u8		ctlr;		/* PSC + 0x1c */
+	u8		reserved6[3];
+	/* BitClkDiv field of CCR is byte swapped in
+	 * the hardware for mpc5200/b compatibility */
+	u32		ccr;		/* PSC + 0x20 */
+	u32		ac97_slots;	/* PSC + 0x24 */
+	u32		ac97_cmd;	/* PSC + 0x28 */
+	u32		ac97_data;	/* PSC + 0x2c */
+	u8		ivr;		/* PSC + 0x30 */
+	u8		reserved8[3];
+	u8		ip;		/* PSC + 0x34 */
+	u8		reserved9[3];
+	u8		op1;		/* PSC + 0x38 */
+	u8		reserved10[3];
+	u8		op0;		/* PSC + 0x3c */
+	u8		reserved11[3];
+	u32		sicr;		/* PSC + 0x40 */
+	u8		ircr1;		/* PSC + 0x44 */
+	u8		reserved13[3];
+	u8		ircr2;		/* PSC + 0x44 */
+	u8		reserved14[3];
+	u8		irsdr;		/* PSC + 0x4c */
+	u8		reserved15[3];
+	u8		irmdr;		/* PSC + 0x50 */
+	u8		reserved16[3];
+	u8		irfdr;		/* PSC + 0x54 */
+	u8		reserved17[3];
+};
+
+struct mpc52xx_psc_fifo {
+	u16		rfnum;		/* PSC + 0x58 */
+	u16		reserved18;
+	u16		tfnum;		/* PSC + 0x5c */
+	u16		reserved19;
+	u32		rfdata;		/* PSC + 0x60 */
+	u16		rfstat;		/* PSC + 0x64 */
+	u16		reserved20;
+	u8		rfcntl;		/* PSC + 0x68 */
+	u8		reserved21[5];
+	u16		rfalarm;	/* PSC + 0x6e */
+	u16		reserved22;
+	u16		rfrptr;		/* PSC + 0x72 */
+	u16		reserved23;
+	u16		rfwptr;		/* PSC + 0x76 */
+	u16		reserved24;
+	u16		rflrfptr;	/* PSC + 0x7a */
+	u16		reserved25;
+	u16		rflwfptr;	/* PSC + 0x7e */
+	u32		tfdata;		/* PSC + 0x80 */
+	u16		tfstat;		/* PSC + 0x84 */
+	u16		reserved26;
+	u8		tfcntl;		/* PSC + 0x88 */
+	u8		reserved27[5];
+	u16		tfalarm;	/* PSC + 0x8e */
+	u16		reserved28;
+	u16		tfrptr;		/* PSC + 0x92 */
+	u16		reserved29;
+	u16		tfwptr;		/* PSC + 0x96 */
+	u16		reserved30;
+	u16		tflrfptr;	/* PSC + 0x9a */
+	u16		reserved31;
+	u16		tflwfptr;	/* PSC + 0x9e */
+};
+
+#define MPC512x_PSC_FIFO_EOF		0x100
+#define MPC512x_PSC_FIFO_RESET_SLICE	0x80
+#define MPC512x_PSC_FIFO_ENABLE_SLICE	0x01
+#define MPC512x_PSC_FIFO_ENABLE_DMA	0x04
+
+#define MPC512x_PSC_FIFO_EMPTY		0x1
+#define MPC512x_PSC_FIFO_FULL		0x2
+#define MPC512x_PSC_FIFO_ALARM		0x4
+#define MPC512x_PSC_FIFO_URERR		0x8
+
+struct mpc512x_psc_fifo {
+	u32		reserved1[10];
+	u32		txcmd;		/* PSC + 0x80 */
+	u32		txalarm;	/* PSC + 0x84 */
+	u32		txsr;		/* PSC + 0x88 */
+	u32		txisr;		/* PSC + 0x8c */
+	u32		tximr;		/* PSC + 0x90 */
+	u32		txcnt;		/* PSC + 0x94 */
+	u32		txptr;		/* PSC + 0x98 */
+	u32		txsz;		/* PSC + 0x9c */
+	u32		reserved2[7];
+	union {
+		u8	txdata_8;
+		u16	txdata_16;
+		u32	txdata_32;
+	} txdata; 			/* PSC + 0xbc */
+#define txdata_8 txdata.txdata_8
+#define txdata_16 txdata.txdata_16
+#define txdata_32 txdata.txdata_32
+	u32		rxcmd;		/* PSC + 0xc0 */
+	u32		rxalarm;	/* PSC + 0xc4 */
+	u32		rxsr;		/* PSC + 0xc8 */
+	u32		rxisr;		/* PSC + 0xcc */
+	u32		rximr;		/* PSC + 0xd0 */
+	u32		rxcnt;		/* PSC + 0xd4 */
+	u32		rxptr;		/* PSC + 0xd8 */
+	u32		rxsz;		/* PSC + 0xdc */
+	u32		reserved3[7];
+	union {
+		u8	rxdata_8;
+		u16	rxdata_16;
+		u32	rxdata_32;
+	} rxdata; 			/* PSC + 0xfc */
+#define rxdata_8 rxdata.rxdata_8
+#define rxdata_16 rxdata.rxdata_16
+#define rxdata_32 rxdata.rxdata_32
+};
+
+struct mpc5125_psc {
+	u8		mr1;			/* PSC + 0x00 */
+	u8		reserved0[3];
+	u8		mr2;			/* PSC + 0x04 */
+	u8		reserved1[3];
+	struct {
+		u16		status;		/* PSC + 0x08 */
+		u8		reserved2[2];
+		u8		clock_select;	/* PSC + 0x0c */
+		u8		reserved3[3];
+	} sr_csr;
+	u8		command;		/* PSC + 0x10 */
+	u8		reserved4[3];
+	union {					/* PSC + 0x14 */
+		u8		buffer_8;
+		u16		buffer_16;
+		u32		buffer_32;
+	} buffer;
+	struct {
+		u8		ipcr;		/* PSC + 0x18 */
+		u8		reserved5[3];
+		u8		acr;		/* PSC + 0x1c */
+		u8		reserved6[3];
+	} ipcr_acr;
+	struct {
+		u16		isr;		/* PSC + 0x20 */
+		u8		reserved7[2];
+		u16		imr;		/* PSC + 0x24 */
+		u8		reserved8[2];
+	} isr_imr;
+	u8		ctur;			/* PSC + 0x28 */
+	u8		reserved9[3];
+	u8		ctlr;			/* PSC + 0x2c */
+	u8		reserved10[3];
+	u32		ccr;			/* PSC + 0x30 */
+	u32		ac97slots;		/* PSC + 0x34 */
+	u32		ac97cmd;		/* PSC + 0x38 */
+	u32		ac97data;		/* PSC + 0x3c */
+	u8		reserved11[4];
+	u8		ip;			/* PSC + 0x44 */
+	u8		reserved12[3];
+	u8		op1;			/* PSC + 0x48 */
+	u8		reserved13[3];
+	u8		op0;			/* PSC + 0x4c */
+	u8		reserved14[3];
+	u32		sicr;			/* PSC + 0x50 */
+	u8		reserved15[4];	/* make eq. sizeof(mpc52xx_psc) */
+};
+
+#endif  /* __ASM_MPC52xx_PSC_H__ */
diff --git a/arch/powerpc/include/asm/mpc5xxx.h b/arch/powerpc/include/asm/mpc5xxx.h
new file mode 100644
index 0000000..5ce9c5f
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc5xxx.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: John Rigby, <jrigby@freescale.com>, Friday Apr 13 2007
+ *
+ * Description:
+ * MPC5xxx Prototypes and definitions
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_POWERPC_MPC5xxx_H__
+#define __ASM_POWERPC_MPC5xxx_H__
+
+extern unsigned long mpc5xxx_get_bus_frequency(struct device_node *node);
+
+#endif /* __ASM_POWERPC_MPC5xxx_H__ */
+
diff --git a/arch/powerpc/include/asm/mpc6xx.h b/arch/powerpc/include/asm/mpc6xx.h
new file mode 100644
index 0000000..6ed9f4c
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc6xx.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_MPC6xx_H
+#define __ASM_POWERPC_MPC6xx_H
+
+void mpc6xx_enter_standby(void);
+
+#endif
diff --git a/arch/powerpc/include/asm/mpc8260.h b/arch/powerpc/include/asm/mpc8260.h
new file mode 100644
index 0000000..fd8c570
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc8260.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Since there are many different boards and no standard configuration,
+ * we have a unique include file for each.  Rather than change every
+ * file that has to include MPC8260 configuration, they all include
+ * this one and the configuration switching is done here.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_MPC8260_H__
+#define __ASM_POWERPC_MPC8260_H__
+
+#define MPC82XX_BCR_PLDP 0x00800000 /* Pipeline Maximum Depth */
+
+#ifdef CONFIG_8260
+
+#if defined(CONFIG_PQ2ADS) || defined (CONFIG_PQ2FADS)
+#include <platforms/82xx/pq2ads.h>
+#endif
+
+#ifdef CONFIG_PCI_8260
+#include <platforms/82xx/m82xx_pci.h>
+#endif
+
+#endif /* CONFIG_8260 */
+#endif /* !__ASM_POWERPC_MPC8260_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
new file mode 100644
index 0000000..213f3a8
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -0,0 +1,95 @@
+/*
+ * MPC85xx cpu type detection
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_PPC_MPC85XX_H
+#define __ASM_PPC_MPC85XX_H
+
+#define SVR_REV(svr)	((svr) & 0xFF)		/* SOC design resision */
+#define SVR_MAJ(svr)	(((svr) >>  4) & 0xF)	/* Major revision field*/
+#define SVR_MIN(svr)	(((svr) >>  0) & 0xF)	/* Minor revision field*/
+
+/* Some parts define SVR[0:23] as the SOC version */
+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF)	/* SOC Version fields */
+
+#define SVR_8533	0x803400
+#define SVR_8535	0x803701
+#define SVR_8536	0x803700
+#define SVR_8540	0x803000
+#define SVR_8541	0x807200
+#define SVR_8543	0x803200
+#define SVR_8544	0x803401
+#define SVR_8545	0x803102
+#define SVR_8547	0x803101
+#define SVR_8548	0x803100
+#define SVR_8555	0x807100
+#define SVR_8560	0x807000
+#define SVR_8567	0x807501
+#define SVR_8568	0x807500
+#define SVR_8569	0x808000
+#define SVR_8572	0x80E000
+#define SVR_P1010	0x80F100
+#define SVR_P1011	0x80E500
+#define SVR_P1012	0x80E501
+#define SVR_P1013	0x80E700
+#define SVR_P1014	0x80F101
+#define SVR_P1017	0x80F700
+#define SVR_P1020	0x80E400
+#define SVR_P1021	0x80E401
+#define SVR_P1022	0x80E600
+#define SVR_P1023	0x80F600
+#define SVR_P1024	0x80E402
+#define SVR_P1025	0x80E403
+#define SVR_P2010	0x80E300
+#define SVR_P2020	0x80E200
+#define SVR_P2040	0x821000
+#define SVR_P2041	0x821001
+#define SVR_P3041	0x821103
+#define SVR_P4040	0x820100
+#define SVR_P4080	0x820000
+#define SVR_P5010	0x822100
+#define SVR_P5020	0x822000
+#define SVR_P5021	0X820500
+#define SVR_P5040	0x820400
+#define SVR_T4240	0x824000
+#define SVR_T4120	0x824001
+#define SVR_T4160	0x824100
+#define SVR_T4080	0x824102
+#define SVR_C291	0x850000
+#define SVR_C292	0x850020
+#define SVR_C293	0x850030
+#define SVR_B4860	0X868000
+#define SVR_G4860	0x868001
+#define SVR_G4060	0x868003
+#define SVR_B4440	0x868100
+#define SVR_G4440	0x868101
+#define SVR_B4420	0x868102
+#define SVR_B4220	0x868103
+#define SVR_T1040	0x852000
+#define SVR_T1041	0x852001
+#define SVR_T1042	0x852002
+#define SVR_T1020	0x852100
+#define SVR_T1021	0x852101
+#define SVR_T1022	0x852102
+#define SVR_T2080	0x853000
+#define SVR_T2081	0x853100
+
+#define SVR_8610	0x80A000
+#define SVR_8641	0x809000
+#define SVR_8641D	0x809001
+
+#define SVR_9130	0x860001
+#define SVR_9131	0x860000
+#define SVR_9132	0x861000
+#define SVR_9232	0x861400
+
+#define SVR_Unknown	0xFFFFFF
+
+#endif
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
new file mode 100644
index 0000000..0abf2e7
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic.h
@@ -0,0 +1,497 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MPIC_H
+#define _ASM_POWERPC_MPIC_H
+#ifdef __KERNEL__
+
+#include <linux/irq.h>
+#include <asm/dcr.h>
+#include <asm/msi_bitmap.h>
+
+/*
+ * Global registers
+ */
+
+#define MPIC_GREG_BASE			0x01000
+
+#define MPIC_GREG_FEATURE_0		0x00000
+#define		MPIC_GREG_FEATURE_LAST_SRC_MASK		0x07ff0000
+#define		MPIC_GREG_FEATURE_LAST_SRC_SHIFT	16
+#define		MPIC_GREG_FEATURE_LAST_CPU_MASK		0x00001f00
+#define		MPIC_GREG_FEATURE_LAST_CPU_SHIFT	8
+#define		MPIC_GREG_FEATURE_VERSION_MASK		0xff
+#define MPIC_GREG_FEATURE_1		0x00010
+#define MPIC_GREG_GLOBAL_CONF_0		0x00020
+#define		MPIC_GREG_GCONF_RESET			0x80000000
+/* On the FSL mpic implementations the Mode field is expand to be
+ * 2 bits wide:
+ *	0b00 = pass through (interrupts routed to IRQ0)
+ *	0b01 = Mixed mode
+ *	0b10 = reserved
+ *	0b11 = External proxy / coreint
+ */
+#define		MPIC_GREG_GCONF_COREINT			0x60000000
+#define		MPIC_GREG_GCONF_8259_PTHROU_DIS		0x20000000
+#define		MPIC_GREG_GCONF_NO_BIAS			0x10000000
+#define		MPIC_GREG_GCONF_BASE_MASK		0x000fffff
+#define		MPIC_GREG_GCONF_MCK			0x08000000
+#define MPIC_GREG_GLOBAL_CONF_1		0x00030
+#define MPIC_GREG_VENDOR_0		0x00040
+#define MPIC_GREG_VENDOR_1		0x00050
+#define MPIC_GREG_VENDOR_2		0x00060
+#define MPIC_GREG_VENDOR_3		0x00070
+#define MPIC_GREG_VENDOR_ID		0x00080
+#define 	MPIC_GREG_VENDOR_ID_STEPPING_MASK	0x00ff0000
+#define 	MPIC_GREG_VENDOR_ID_STEPPING_SHIFT	16
+#define 	MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK	0x0000ff00
+#define 	MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT	8
+#define 	MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK	0x000000ff
+#define MPIC_GREG_PROCESSOR_INIT	0x00090
+#define MPIC_GREG_IPI_VECTOR_PRI_0	0x000a0
+#define MPIC_GREG_IPI_VECTOR_PRI_1	0x000b0
+#define MPIC_GREG_IPI_VECTOR_PRI_2	0x000c0
+#define MPIC_GREG_IPI_VECTOR_PRI_3	0x000d0
+#define MPIC_GREG_IPI_STRIDE		0x10
+#define MPIC_GREG_SPURIOUS		0x000e0
+#define MPIC_GREG_TIMER_FREQ		0x000f0
+
+/*
+ *
+ * Timer registers
+ */
+#define MPIC_TIMER_BASE			0x01100
+#define MPIC_TIMER_STRIDE		0x40
+#define MPIC_TIMER_GROUP_STRIDE		0x1000
+
+#define MPIC_TIMER_CURRENT_CNT		0x00000
+#define MPIC_TIMER_BASE_CNT		0x00010
+#define MPIC_TIMER_VECTOR_PRI		0x00020
+#define MPIC_TIMER_DESTINATION		0x00030
+
+/*
+ * Per-Processor registers
+ */
+
+#define MPIC_CPU_THISBASE		0x00000
+#define MPIC_CPU_BASE			0x20000
+#define MPIC_CPU_STRIDE			0x01000
+
+#define MPIC_CPU_IPI_DISPATCH_0		0x00040
+#define MPIC_CPU_IPI_DISPATCH_1		0x00050
+#define MPIC_CPU_IPI_DISPATCH_2		0x00060
+#define MPIC_CPU_IPI_DISPATCH_3		0x00070
+#define MPIC_CPU_IPI_DISPATCH_STRIDE	0x00010
+#define MPIC_CPU_CURRENT_TASK_PRI	0x00080
+#define 	MPIC_CPU_TASKPRI_MASK			0x0000000f
+#define MPIC_CPU_WHOAMI			0x00090
+#define 	MPIC_CPU_WHOAMI_MASK			0x0000001f
+#define MPIC_CPU_INTACK			0x000a0
+#define MPIC_CPU_EOI			0x000b0
+#define MPIC_CPU_MCACK			0x000c0
+
+/*
+ * Per-source registers
+ */
+
+#define MPIC_IRQ_BASE			0x10000
+#define MPIC_IRQ_STRIDE			0x00020
+#define MPIC_IRQ_VECTOR_PRI		0x00000
+#define 	MPIC_VECPRI_MASK			0x80000000
+#define 	MPIC_VECPRI_ACTIVITY			0x40000000	/* Read Only */
+#define 	MPIC_VECPRI_PRIORITY_MASK		0x000f0000
+#define 	MPIC_VECPRI_PRIORITY_SHIFT		16
+#define 	MPIC_VECPRI_VECTOR_MASK			0x000007ff
+#define 	MPIC_VECPRI_POLARITY_POSITIVE		0x00800000
+#define 	MPIC_VECPRI_POLARITY_NEGATIVE		0x00000000
+#define 	MPIC_VECPRI_POLARITY_MASK		0x00800000
+#define 	MPIC_VECPRI_SENSE_LEVEL			0x00400000
+#define 	MPIC_VECPRI_SENSE_EDGE			0x00000000
+#define 	MPIC_VECPRI_SENSE_MASK			0x00400000
+#define MPIC_IRQ_DESTINATION		0x00010
+
+#define MPIC_FSL_BRR1			0x00000
+#define 	MPIC_FSL_BRR1_VER			0x0000ffff
+
+#define MPIC_MAX_IRQ_SOURCES	2048
+#define MPIC_MAX_CPUS		32
+#define MPIC_MAX_ISU		32
+
+#define MPIC_MAX_ERR      32
+#define MPIC_FSL_ERR_INT  16
+
+/*
+ * Tsi108 implementation of MPIC has many differences from the original one
+ */
+
+/*
+ * Global registers
+ */
+
+#define TSI108_GREG_BASE		0x00000
+#define TSI108_GREG_FEATURE_0		0x00000
+#define TSI108_GREG_GLOBAL_CONF_0	0x00004
+#define TSI108_GREG_VENDOR_ID		0x0000c
+#define TSI108_GREG_IPI_VECTOR_PRI_0	0x00204		/* Doorbell 0 */
+#define TSI108_GREG_IPI_STRIDE		0x0c
+#define TSI108_GREG_SPURIOUS		0x00010
+#define TSI108_GREG_TIMER_FREQ		0x00014
+
+/*
+ * Timer registers
+ */
+#define TSI108_TIMER_BASE		0x0030
+#define TSI108_TIMER_STRIDE		0x10
+#define TSI108_TIMER_CURRENT_CNT	0x00000
+#define TSI108_TIMER_BASE_CNT		0x00004
+#define TSI108_TIMER_VECTOR_PRI		0x00008
+#define TSI108_TIMER_DESTINATION	0x0000c
+
+/*
+ * Per-Processor registers
+ */
+#define TSI108_CPU_BASE			0x00300
+#define TSI108_CPU_STRIDE		0x00040
+#define TSI108_CPU_IPI_DISPATCH_0	0x00200
+#define TSI108_CPU_IPI_DISPATCH_STRIDE	0x00000
+#define TSI108_CPU_CURRENT_TASK_PRI	0x00000
+#define TSI108_CPU_WHOAMI		0xffffffff
+#define TSI108_CPU_INTACK		0x00004
+#define TSI108_CPU_EOI			0x00008
+#define TSI108_CPU_MCACK		0x00004 /* Doesn't really exist here */
+
+/*
+ * Per-source registers
+ */
+#define TSI108_IRQ_BASE			0x00100
+#define TSI108_IRQ_STRIDE		0x00008
+#define TSI108_IRQ_VECTOR_PRI		0x00000
+#define TSI108_VECPRI_VECTOR_MASK	0x000000ff
+#define TSI108_VECPRI_POLARITY_POSITIVE	0x01000000
+#define TSI108_VECPRI_POLARITY_NEGATIVE	0x00000000
+#define TSI108_VECPRI_SENSE_LEVEL	0x02000000
+#define TSI108_VECPRI_SENSE_EDGE	0x00000000
+#define TSI108_VECPRI_POLARITY_MASK	0x01000000
+#define TSI108_VECPRI_SENSE_MASK	0x02000000
+#define TSI108_IRQ_DESTINATION		0x00004
+
+/* weird mpic register indices and mask bits in the HW info array */
+enum {
+	MPIC_IDX_GREG_BASE = 0,
+	MPIC_IDX_GREG_FEATURE_0,
+	MPIC_IDX_GREG_GLOBAL_CONF_0,
+	MPIC_IDX_GREG_VENDOR_ID,
+	MPIC_IDX_GREG_IPI_VECTOR_PRI_0,
+	MPIC_IDX_GREG_IPI_STRIDE,
+	MPIC_IDX_GREG_SPURIOUS,
+	MPIC_IDX_GREG_TIMER_FREQ,
+
+	MPIC_IDX_TIMER_BASE,
+	MPIC_IDX_TIMER_STRIDE,
+	MPIC_IDX_TIMER_CURRENT_CNT,
+	MPIC_IDX_TIMER_BASE_CNT,
+	MPIC_IDX_TIMER_VECTOR_PRI,
+	MPIC_IDX_TIMER_DESTINATION,
+
+	MPIC_IDX_CPU_BASE,
+	MPIC_IDX_CPU_STRIDE,
+	MPIC_IDX_CPU_IPI_DISPATCH_0,
+	MPIC_IDX_CPU_IPI_DISPATCH_STRIDE,
+	MPIC_IDX_CPU_CURRENT_TASK_PRI,
+	MPIC_IDX_CPU_WHOAMI,
+	MPIC_IDX_CPU_INTACK,
+	MPIC_IDX_CPU_EOI,
+	MPIC_IDX_CPU_MCACK,
+
+	MPIC_IDX_IRQ_BASE,
+	MPIC_IDX_IRQ_STRIDE,
+	MPIC_IDX_IRQ_VECTOR_PRI,
+
+	MPIC_IDX_VECPRI_VECTOR_MASK,
+	MPIC_IDX_VECPRI_POLARITY_POSITIVE,
+	MPIC_IDX_VECPRI_POLARITY_NEGATIVE,
+	MPIC_IDX_VECPRI_SENSE_LEVEL,
+	MPIC_IDX_VECPRI_SENSE_EDGE,
+	MPIC_IDX_VECPRI_POLARITY_MASK,
+	MPIC_IDX_VECPRI_SENSE_MASK,
+	MPIC_IDX_IRQ_DESTINATION,
+	MPIC_IDX_END
+};
+
+
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+/* Fixup table entry */
+struct mpic_irq_fixup
+{
+	u8 __iomem	*base;
+	u8 __iomem	*applebase;
+	u32		data;
+	unsigned int	index;
+};
+#endif /* CONFIG_MPIC_U3_HT_IRQS */
+
+
+enum mpic_reg_type {
+	mpic_access_mmio_le,
+	mpic_access_mmio_be,
+#ifdef CONFIG_PPC_DCR
+	mpic_access_dcr
+#endif
+};
+
+struct mpic_reg_bank {
+	u32 __iomem	*base;
+#ifdef CONFIG_PPC_DCR
+	dcr_host_t	dhost;
+#endif /* CONFIG_PPC_DCR */
+};
+
+struct mpic_irq_save {
+	u32		vecprio,
+			dest;
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+	u32		fixup_data;
+#endif
+};
+
+/* The instance data of a given MPIC */
+struct mpic
+{
+	/* The OpenFirmware dt node for this MPIC */
+	struct device_node *node;
+
+	/* The remapper for this MPIC */
+	struct irq_domain	*irqhost;
+
+	/* The "linux" controller struct */
+	struct irq_chip		hc_irq;
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+	struct irq_chip		hc_ht_irq;
+#endif
+#ifdef CONFIG_SMP
+	struct irq_chip		hc_ipi;
+#endif
+	struct irq_chip		hc_tm;
+	struct irq_chip		hc_err;
+	const char		*name;
+	/* Flags */
+	unsigned int		flags;
+	/* How many irq sources in a given ISU */
+	unsigned int		isu_size;
+	unsigned int		isu_shift;
+	unsigned int		isu_mask;
+	/* Number of sources */
+	unsigned int		num_sources;
+
+	/* vector numbers used for internal sources (ipi/timers) */
+	unsigned int		ipi_vecs[4];
+	unsigned int		timer_vecs[8];
+	/* vector numbers used for FSL MPIC error interrupts */
+	unsigned int		err_int_vecs[MPIC_MAX_ERR];
+
+	/* Spurious vector to program into unused sources */
+	unsigned int		spurious_vec;
+
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+	/* The fixup table */
+	struct mpic_irq_fixup	*fixups;
+	raw_spinlock_t	fixup_lock;
+#endif
+
+	/* Register access method */
+	enum mpic_reg_type	reg_type;
+
+	/* The physical base address of the MPIC */
+	phys_addr_t paddr;
+
+	/* The various ioremap'ed bases */
+	struct mpic_reg_bank	thiscpuregs;
+	struct mpic_reg_bank	gregs;
+	struct mpic_reg_bank	tmregs;
+	struct mpic_reg_bank	cpuregs[MPIC_MAX_CPUS];
+	struct mpic_reg_bank	isus[MPIC_MAX_ISU];
+
+	/* ioremap'ed base for error interrupt registers */
+	u32 __iomem	*err_regs;
+
+	/* Protected sources */
+	unsigned long		*protected;
+
+#ifdef CONFIG_MPIC_WEIRD
+	/* Pointer to HW info array */
+	u32			*hw_set;
+#endif
+
+#ifdef CONFIG_PCI_MSI
+	struct msi_bitmap	msi_bitmap;
+#endif
+
+#ifdef CONFIG_MPIC_BROKEN_REGREAD
+	u32			isu_reg0_shadow[MPIC_MAX_IRQ_SOURCES];
+#endif
+
+	/* link */
+	struct mpic		*next;
+
+#ifdef CONFIG_PM
+	struct mpic_irq_save	*save_data;
+#endif
+};
+
+extern struct bus_type mpic_subsys;
+
+/*
+ * MPIC flags (passed to mpic_alloc)
+ *
+ * The top 4 bits contain an MPIC bhw id that is used to index the
+ * register offsets and some masks when CONFIG_MPIC_WEIRD is set.
+ * Note setting any ID (leaving those bits to 0) means standard MPIC
+ */
+
+/*
+ * This is a secondary ("chained") controller; it only uses the CPU0
+ * registers.  Primary controllers have IPIs and affinity control.
+ */
+#define MPIC_SECONDARY			0x00000001
+
+/* Set this for a big-endian MPIC */
+#define MPIC_BIG_ENDIAN			0x00000002
+/* Broken U3 MPIC */
+#define MPIC_U3_HT_IRQS			0x00000004
+/* Broken IPI registers (autodetected) */
+#define MPIC_BROKEN_IPI			0x00000008
+/* Spurious vector requires EOI */
+#define MPIC_SPV_EOI			0x00000020
+/* No passthrough disable */
+#define MPIC_NO_PTHROU_DIS		0x00000040
+/* DCR based MPIC */
+#define MPIC_USES_DCR			0x00000080
+/* MPIC has 11-bit vector fields (or larger) */
+#define MPIC_LARGE_VECTORS		0x00000100
+/* Enable delivery of prio 15 interrupts as MCK instead of EE */
+#define MPIC_ENABLE_MCK			0x00000200
+/* Disable bias among target selection, spread interrupts evenly */
+#define MPIC_NO_BIAS			0x00000400
+/* Destination only supports a single CPU at a time */
+#define MPIC_SINGLE_DEST_CPU		0x00001000
+/* Enable CoreInt delivery of interrupts */
+#define MPIC_ENABLE_COREINT		0x00002000
+/* Do not reset the MPIC during initialization */
+#define MPIC_NO_RESET			0x00004000
+/* Freescale MPIC (compatible includes "fsl,mpic") */
+#define MPIC_FSL			0x00008000
+/* Freescale MPIC supports EIMR (error interrupt mask register).
+ * This flag is set for MPIC version >= 4.1 (version determined
+ * from the BRR1 register).
+*/
+#define MPIC_FSL_HAS_EIMR		0x00010000
+
+/* MPIC HW modification ID */
+#define MPIC_REGSET_MASK		0xf0000000
+#define MPIC_REGSET(val)		(((val) & 0xf ) << 28)
+#define MPIC_GET_REGSET(flags)		(((flags) >> 28) & 0xf)
+
+#define	MPIC_REGSET_STANDARD		MPIC_REGSET(0)	/* Original MPIC */
+#define	MPIC_REGSET_TSI108		MPIC_REGSET(1)	/* Tsi108/109 PIC */
+
+/* Get the version of primary MPIC */
+#ifdef CONFIG_MPIC
+extern u32 fsl_mpic_primary_get_version(void);
+#else
+static inline u32 fsl_mpic_primary_get_version(void)
+{
+	return 0;
+}
+#endif
+
+/* Allocate the controller structure and setup the linux irq descs
+ * for the range if interrupts passed in. No HW initialization is
+ * actually performed.
+ * 
+ * @phys_addr:	physial base address of the MPIC
+ * @flags:	flags, see constants above
+ * @isu_size:	number of interrupts in an ISU. Use 0 to use a
+ *              standard ISU-less setup (aka powermac)
+ * @irq_offset: first irq number to assign to this mpic
+ * @irq_count:  number of irqs to use with this mpic IRQ sources. Pass 0
+ *	        to match the number of sources
+ * @ipi_offset: first irq number to assign to this mpic IPI sources,
+ *		used only on primary mpic
+ * @senses:	array of sense values
+ * @senses_num: number of entries in the array
+ *
+ * Note about the sense array. If none is passed, all interrupts are
+ * setup to be level negative unless MPIC_U3_HT_IRQS is set in which
+ * case they are edge positive (and the array is ignored anyway).
+ * The values in the array start at the first source of the MPIC,
+ * that is senses[0] correspond to linux irq "irq_offset".
+ */
+extern struct mpic *mpic_alloc(struct device_node *node,
+			       phys_addr_t phys_addr,
+			       unsigned int flags,
+			       unsigned int isu_size,
+			       unsigned int irq_count,
+			       const char *name);
+
+/* Assign ISUs, to call before mpic_init()
+ *
+ * @mpic:	controller structure as returned by mpic_alloc()
+ * @isu_num:	ISU number
+ * @phys_addr:	physical address of the ISU
+ */
+extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
+			    phys_addr_t phys_addr);
+
+
+/* Initialize the controller. After this has been called, none of the above
+ * should be called again for this mpic
+ */
+extern void mpic_init(struct mpic *mpic);
+
+/*
+ * All of the following functions must only be used after the
+ * ISUs have been assigned and the controller fully initialized
+ * with mpic_init()
+ */
+
+
+/* Change the priority of an interrupt. Default is 8 for irqs and
+ * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
+ * IPI number is then the offset'ed (linux irq number mapped to the IPI)
+ */
+extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
+
+/* Setup a non-boot CPU */
+extern void mpic_setup_this_cpu(void);
+
+/* Clean up for kexec (or cpu offline or ...) */
+extern void mpic_teardown_this_cpu(int secondary);
+
+/* Get the current cpu priority for this cpu (0..15) */
+extern int mpic_cpu_get_priority(void);
+
+/* Set the current cpu priority for this cpu */
+extern void mpic_cpu_set_priority(int prio);
+
+/* Request IPIs on primary mpic */
+extern void mpic_request_ipis(void);
+
+/* Send a message (IPI) to a given target (cpu number or MSG_*) */
+void smp_mpic_message_pass(int target, int msg);
+
+/* Unmask a specific virq */
+extern void mpic_unmask_irq(struct irq_data *d);
+/* Mask a specific virq */
+extern void mpic_mask_irq(struct irq_data *d);
+/* EOI a specific virq */
+extern void mpic_end_irq(struct irq_data *d);
+
+/* Fetch interrupt from a given mpic */
+extern unsigned int mpic_get_one_irq(struct mpic *mpic);
+/* This one gets from the primary mpic */
+extern unsigned int mpic_get_irq(void);
+/* This one gets from the primary mpic via CoreInt*/
+extern unsigned int mpic_get_coreint_irq(void);
+/* Fetch Machine Check interrupt from primary mpic */
+extern unsigned int mpic_get_mcirq(void);
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_MPIC_H */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
new file mode 100644
index 0000000..088420d
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#ifndef _ASM_MPIC_MSGR_H
+#define _ASM_MPIC_MSGR_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/smp.h>
+#include <asm/io.h>
+
+struct mpic_msgr {
+	u32 __iomem *base;
+	u32 __iomem *mer;
+	int irq;
+	unsigned char in_use;
+	raw_spinlock_t lock;
+	int num;
+};
+
+/* Get a message register
+ *
+ * @reg_num:	the MPIC message register to get
+ *
+ * A pointer to the message register is returned.  If
+ * the message register asked for is already in use, then
+ * EBUSY is returned.  If the number given is not associated
+ * with an actual message register, then ENODEV is returned.
+ * Successfully getting the register marks it as in use.
+ */
+extern struct mpic_msgr *mpic_msgr_get(unsigned int reg_num);
+
+/* Relinquish a message register
+ *
+ * @msgr:	the message register to return
+ *
+ * Disables the given message register and marks it as free.
+ * After this call has completed successully the message
+ * register is available to be acquired by a call to
+ * mpic_msgr_get.
+ */
+extern void mpic_msgr_put(struct mpic_msgr *msgr);
+
+/* Enable a message register
+ *
+ * @msgr:	the message register to enable
+ *
+ * The given message register is enabled for sending
+ * messages.
+ */
+extern void mpic_msgr_enable(struct mpic_msgr *msgr);
+
+/* Disable a message register
+ *
+ * @msgr:	the message register to disable
+ *
+ * The given message register is disabled for sending
+ * messages.
+ */
+extern void mpic_msgr_disable(struct mpic_msgr *msgr);
+
+/* Write a message to a message register
+ *
+ * @msgr:	the message register to write to
+ * @message:	the message to write
+ *
+ * The given 32-bit message is written to the given message
+ * register.  Writing to an enabled message registers fires
+ * an interrupt.
+ */
+static inline void mpic_msgr_write(struct mpic_msgr *msgr, u32 message)
+{
+	out_be32(msgr->base, message);
+}
+
+/* Read a message from a message register
+ *
+ * @msgr:	the message register to read from
+ *
+ * Returns the 32-bit value currently in the given message register.
+ * Upon reading the register any interrupts for that register are
+ * cleared.
+ */
+static inline u32 mpic_msgr_read(struct mpic_msgr *msgr)
+{
+	return in_be32(msgr->base);
+}
+
+/* Clear a message register
+ *
+ * @msgr:	the message register to clear
+ *
+ * Clears any interrupts associated with the given message register.
+ */
+static inline void mpic_msgr_clear(struct mpic_msgr *msgr)
+{
+	(void) mpic_msgr_read(msgr);
+}
+
+/* Set the destination CPU for the message register
+ *
+ * @msgr:	the message register whose destination is to be set
+ * @cpu_num:	the Linux CPU number to bind the message register to
+ *
+ * Note that the CPU number given is the CPU number used by the kernel
+ * and *not* the actual hardware CPU number.
+ */
+static inline void mpic_msgr_set_destination(struct mpic_msgr *msgr,
+					     u32 cpu_num)
+{
+	out_be32(msgr->base, 1 << get_hard_smp_processor_id(cpu_num));
+}
+
+/* Get the IRQ number for the message register
+ * @msgr:	the message register whose IRQ is to be returned
+ *
+ * Returns the IRQ number associated with the given message register.
+ * 0 is returned if this message register is not capable of receiving
+ * interrupts.  What message register can and cannot receive interrupts is
+ * specified in the device tree for the system.
+ */
+static inline int mpic_msgr_get_irq(struct mpic_msgr *msgr)
+{
+	return msgr->irq;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/mpic_timer.h b/arch/powerpc/include/asm/mpic_timer.h
new file mode 100644
index 0000000..13e6702
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_timer.h
@@ -0,0 +1,46 @@
+/*
+ * arch/powerpc/include/asm/mpic_timer.h
+ *
+ * Header file for Mpic Global Timer
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Wang Dongsheng <Dongsheng.Wang@freescale.com>
+ *	   Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MPIC_TIMER__
+#define __MPIC_TIMER__
+
+#include <linux/interrupt.h>
+#include <linux/time.h>
+
+struct mpic_timer {
+	void			*dev;
+	struct cascade_priv	*cascade_handle;
+	unsigned int		num;
+	unsigned int		irq;
+};
+
+#ifdef CONFIG_MPIC_TIMER
+struct mpic_timer *mpic_request_timer(irq_handler_t fn,  void *dev,
+		time64_t time);
+void mpic_start_timer(struct mpic_timer *handle);
+void mpic_stop_timer(struct mpic_timer *handle);
+void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time);
+void mpic_free_timer(struct mpic_timer *handle);
+#else
+struct mpic_timer *mpic_request_timer(irq_handler_t fn,  void *dev,
+		time64_t time) { return NULL; }
+void mpic_start_timer(struct mpic_timer *handle) { }
+void mpic_stop_timer(struct mpic_timer *handle) { }
+void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time) { }
+void mpic_free_timer(struct mpic_timer *handle) { }
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/msi_bitmap.h b/arch/powerpc/include/asm/msi_bitmap.h
new file mode 100644
index 0000000..1ec7125
--- /dev/null
+++ b/arch/powerpc/include/asm/msi_bitmap.h
@@ -0,0 +1,36 @@
+#ifndef _POWERPC_SYSDEV_MSI_BITMAP_H
+#define _POWERPC_SYSDEV_MSI_BITMAP_H
+
+/*
+ * Copyright 2008, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/of.h>
+#include <asm/irq.h>
+
+struct msi_bitmap {
+	struct device_node	*of_node;
+	unsigned long		*bitmap;
+	spinlock_t		lock;
+	unsigned int		irq_count;
+	bool		 	bitmap_from_slab;
+};
+
+int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num);
+void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
+			    unsigned int num);
+void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq);
+
+int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp);
+
+int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
+		     struct device_node *of_node);
+void msi_bitmap_free(struct msi_bitmap *bmp);
+
+#endif /* _POWERPC_SYSDEV_MSI_BITMAP_H */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
new file mode 100644
index 0000000..bd9ba8d
--- /dev/null
+++ b/arch/powerpc/include/asm/nmi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_NMI_H
+#define _ASM_NMI_H
+
+#ifdef CONFIG_PPC_WATCHDOG
+extern void arch_touch_nmi_watchdog(void);
+#else
+static inline void arch_touch_nmi_watchdog(void) {}
+#endif
+
+#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
+extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+					   bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
+#endif /* _ASM_NMI_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
new file mode 100644
index 0000000..8825953
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGALLOC_32_H
+#define _ASM_POWERPC_PGALLOC_32_H
+
+#include <linux/threads.h>
+#include <linux/slab.h>
+
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation.  For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer.  In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value.  This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE	0xf
+
+extern void __bad_pte(pmd_t *pmd);
+
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({				\
+			BUG_ON(!(shift));		\
+			pgtable_cache[(shift) - 1];	\
+		})
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+			pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+/* #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); }) */
+#define pmd_free(mm, x) 		do { } while (0)
+#define __pmd_free_tlb(tlb,x,a)		do { } while (0)
+/* #define pgd_populate(mm, pmd, pte)      BUG() */
+
+#ifndef CONFIG_BOOKE
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+				       pte_t *pte)
+{
+	*pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+				pgtable_t pte_page)
+{
+	*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER |
+		      _PMD_PRESENT);
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#else
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+				       pte_t *pte)
+{
+	*pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+				pgtable_t pte_page)
+{
+	*pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+	pgtable_page_dtor(ptepage);
+	__free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+	if (!index_size) {
+		pgtable_page_dtor(virt_to_page(table));
+		free_page((unsigned long)table);
+	} else {
+		BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+		kmem_cache_free(PGT_CACHE(index_size), table);
+	}
+}
+
+#define check_pgt_cache()	do { } while (0)
+#define get_hugepd_cache_index(x)	(x)
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+				    void *table, int shift)
+{
+	unsigned long pgf = (unsigned long)table;
+	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+	pgf |= shift;
+	tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+	pgtable_free(table, shift);
+}
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+				    void *table, int shift)
+{
+	pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+				  unsigned long address)
+{
+	tlb_flush_pgtable(tlb, address);
+	pgtable_free_tlb(tlb, page_address(table), 0);
+}
+#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
new file mode 100644
index 0000000..a507a65
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
+
+#define __ARCH_USE_5LEVEL_HACK
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/mmu.h>			/* For sub-arch specific PPC_PIN_SIZE */
+#include <asm/asm-405.h>
+
+extern unsigned long ioremap_bot;
+
+#ifdef CONFIG_44x
+extern int icache_44x_need_flush;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#define PTE_INDEX_SIZE	PTE_SHIFT
+#define PMD_INDEX_SIZE	0
+#define PUD_INDEX_SIZE	0
+#define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE	0
+#define PUD_TABLE_SIZE	0
+#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif	/* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table.  The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS		0
+
+#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS	0UL
+
+#define pte_ERROR(e) \
+	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+		(unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
+ * value (for now) on others, from where we can start layout kernel
+ * virtual space that goes below PKMAP and FIXMAP
+ */
+#ifdef CONFIG_HIGHMEM
+#define KVIRT_TOP	PKMAP_BASE
+#else
+#define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
+#endif
+
+/*
+ * ioremap_bot starts at that address. Early ioremaps move down from there,
+ * until mem_init() at which point this becomes the top of the vmalloc
+ * and ioremap space
+ */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#else
+#define IOREMAP_TOP	KVIRT_TOP
+#endif
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 16MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from IOREMAP_TOP being run into the VM area allocations (growing upwards
+ * from VMALLOC_START).  For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system.  This really does become a problem for machines with good amounts
+ * of RAM.  -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#else
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#endif
+#define VMALLOC_END	ioremap_bot
+
+/*
+ * Bits in a linux-style PTE.  These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#if defined(CONFIG_40x)
+#include <asm/nohash/32/pte-40x.h>
+#elif defined(CONFIG_44x)
+#include <asm/nohash/32/pte-44x.h>
+#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
+#include <asm/nohash/pte-book3e.h>
+#elif defined(CONFIG_FSL_BOOKE)
+#include <asm/nohash/32/pte-fsl-booke.h>
+#elif defined(CONFIG_PPC_8xx)
+#include <asm/nohash/32/pte-8xx.h>
+#endif
+
+/* And here we include common definitions */
+#include <asm/pte-common.h>
+
+#ifndef __ASSEMBLY__
+
+#define pte_clear(mm, addr, ptep) \
+	do { pte_update(ptep, ~0, 0); } while (0)
+
+#define pmd_none(pmd)		(!pmd_val(pmd))
+#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
+#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	*pmdp = __pmd(0);
+}
+
+
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ */
+#ifndef CONFIG_PTE_64BIT
+static inline unsigned long pte_update(pte_t *p,
+				       unsigned long clr,
+				       unsigned long set)
+{
+#ifdef PTE_ATOMIC_UPDATES
+	unsigned long old, tmp;
+
+	__asm__ __volatile__("\
+1:	lwarx	%0,0,%3\n\
+	andc	%1,%0,%4\n\
+	or	%1,%1,%5\n"
+	PPC405_ERR77(0,%3)
+"	stwcx.	%1,0,%3\n\
+	bne-	1b"
+	: "=&r" (old), "=&r" (tmp), "=m" (*p)
+	: "r" (p), "r" (clr), "r" (set), "m" (*p)
+	: "cc" );
+#else /* PTE_ATOMIC_UPDATES */
+	unsigned long old = pte_val(*p);
+	*p = __pte((old & ~clr) | set);
+#endif /* !PTE_ATOMIC_UPDATES */
+
+#ifdef CONFIG_44x
+	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
+		icache_44x_need_flush = 1;
+#endif
+	return old;
+}
+#else /* CONFIG_PTE_64BIT */
+static inline unsigned long long pte_update(pte_t *p,
+					    unsigned long clr,
+					    unsigned long set)
+{
+#ifdef PTE_ATOMIC_UPDATES
+	unsigned long long old;
+	unsigned long tmp;
+
+	__asm__ __volatile__("\
+1:	lwarx	%L0,0,%4\n\
+	lwzx	%0,0,%3\n\
+	andc	%1,%L0,%5\n\
+	or	%1,%1,%6\n"
+	PPC405_ERR77(0,%3)
+"	stwcx.	%1,0,%4\n\
+	bne-	1b"
+	: "=&r" (old), "=&r" (tmp), "=m" (*p)
+	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+	: "cc" );
+#else /* PTE_ATOMIC_UPDATES */
+	unsigned long long old = pte_val(*p);
+	*p = __pte((old & ~(unsigned long long)clr) | set);
+#endif /* !PTE_ATOMIC_UPDATES */
+
+#ifdef CONFIG_44x
+	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
+		icache_44x_need_flush = 1;
+#endif
+	return old;
+}
+#endif /* CONFIG_PTE_64BIT */
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+{
+	unsigned long old;
+	old = pte_update(ptep, _PAGE_ACCESSED, 0);
+	return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+				       pte_t *ptep)
+{
+	return __pte(pte_update(ptep, ~0, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+				      pte_t *ptep)
+{
+	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
+}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+					   unsigned long addr, pte_t *ptep)
+{
+	ptep_set_wrprotect(mm, addr, ptep);
+}
+
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   int psize)
+{
+	unsigned long set = pte_val(entry) &
+		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+	unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);
+
+	pte_update(ptep, clr, set);
+
+	flush_tlb_page(vma, address);
+}
+
+static inline int pte_young(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)	((pte_val(A) ^ pte_val(B)) == 0)
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page.  The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler).  On everything else the pmd contains the physical address
+ * of the pte page.  -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_page_vaddr(pmd)	\
+	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)		\
+	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#else
+#define pmd_page_vaddr(pmd)	\
+	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)		\
+	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#endif
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address)		\
+	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr)	\
+	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
+				  pte_index(addr))
+#define pte_offset_map(dir, addr)		\
+	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte)		kunmap_atomic(pte)
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit.
+ *   -- paulus
+ */
+#define __swp_type(entry)		((entry).val & 0x1f)
+#define __swp_offset(entry)		((entry).val >> 5)
+#define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
+
+int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
new file mode 100644
index 0000000..bb4b3a4
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_40x_H
+#ifdef __KERNEL__
+
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully-associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively (see "mmu.h" for definitions).
+ *
+ * There are several potential gotchas here.  The 40x hardware TLBLO
+ * field looks like this:
+ *
+ * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
+ *
+ * Where possible we make the Linux PTE bits match up with this
+ *
+ * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
+ *   support down to 1k pages), this is done in the TLBMiss exception
+ *   handler.
+ * - We use only zones 0 (for kernel pages) and 1 (for user pages)
+ *   of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
+ *   miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
+ *   zone.
+ * - PRESENT *must* be in the bottom two bits because swap cache
+ *   entries use the top 30 bits.  Because 40x doesn't support SMP
+ *   anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
+ *   is cleared in the TLB miss handler before the TLB entry is loaded.
+ * - All other bits of the PTE are loaded into TLBLO without
+ *   modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
+ *   software PTE bits.  We actually use use bits 21, 24, 25, and
+ *   30 respectively for the software bits: ACCESSED, DIRTY, RW, and
+ *   PRESENT.
+ */
+
+#define	_PAGE_GUARDED	0x001	/* G: page is guarded from prefetch */
+#define _PAGE_PRESENT	0x002	/* software: PTE contains a translation */
+#define	_PAGE_NO_CACHE	0x004	/* I: caching is inhibited */
+#define	_PAGE_WRITETHRU	0x008	/* W: caching is write-through */
+#define	_PAGE_USER	0x010	/* matches one of the zone permission bits */
+#define	_PAGE_SPECIAL	0x020	/* software: Special page */
+#define	_PAGE_RW	0x040	/* software: Writes permitted */
+#define	_PAGE_DIRTY	0x080	/* software: dirty page */
+#define _PAGE_HWWRITE	0x100	/* hardware: Dirty & RW, set in exception */
+#define _PAGE_EXEC	0x200	/* hardware: EX permission */
+#define _PAGE_ACCESSED	0x400	/* software: R: page referenced */
+
+#define _PMD_PRESENT	0x400	/* PMD points to page of PTEs */
+#define _PMD_BAD	0x802
+#define _PMD_SIZE_4M	0x0c0
+#define _PMD_SIZE_16M	0x0e0
+
+/* Until my rework is finished, 40x still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES	1
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
new file mode 100644
index 0000000..f812c02
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_44x_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for PPC440
+ *
+ * Because of the 3 word TLB entries to support 36-bit addressing,
+ * the attribute are difficult to map in such a fashion that they
+ * are easily loaded during exception processing.  I decided to
+ * organize the entry so the ERPN is the only portion in the
+ * upper word of the PTE and the attribute bits below are packed
+ * in as sensibly as they can be in the area below a 4KB page size
+ * oriented RPN.  This at least makes it easy to load the RPN and
+ * ERPN fields in the TLB. -Matt
+ *
+ * This isn't entirely true anymore, at least some bits are now
+ * easier to move into the TLB from the PTE. -BenH.
+ *
+ * Note that these bits preclude future use of a page size
+ * less than 4KB.
+ *
+ *
+ * PPC 440 core has following TLB attribute fields;
+ *
+ *   TLB1:
+ *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ *   RPN.................................  -  -  -  -  -  - ERPN.......
+ *
+ *   TLB2:
+ *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ *   -  -  -  -  -    - U0 U1 U2 U3 W  I  M  G  E   - UX UW UR SX SW SR
+ *
+ * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
+ * TLB2 storage attribute fields. Those are:
+ *
+ *   TLB2:
+ *   0...10    11   12   13   14   15   16...31
+ *   no change WL1  IL1I IL1D IL2I IL2D no change
+ *
+ * There are some constrains and options, to decide mapping software bits
+ * into TLB entry.
+ *
+ *   - PRESENT *must* be in the bottom three bits because swap cache
+ *     entries use the top 29 bits for TLB2.
+ *
+ *   - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
+ *     because it doesn't support SMP. However, some later 460 variants
+ *     have -some- form of SMP support and so I keep the bit there for
+ *     future use
+ *
+ * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
+ * for memory protection related functions (see PTE structure in
+ * include/asm-ppc/mmu.h).  The _PAGE_XXX definitions in this file map to the
+ * above bits.  Note that the bit values are CPU specific, not architecture
+ * specific.
+ *
+ * The kernel PTE entry holds an arch-dependent swp_entry structure under
+ * certain situations. In other words, in such situations some portion of
+ * the PTE bits are used as a swp_entry. In the PPC implementation, the
+ * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
+ * hold protection values. That means the three protection bits are
+ * reserved for both PTE and SWAP entry at the most significant three
+ * LSBs.
+ *
+ * There are three protection bits available for SWAP entry:
+ *	_PAGE_PRESENT
+ *	_PAGE_HASHPTE (if HW has)
+ *
+ * So those three bits have to be inside of 0-2nd LSB of PTE.
+ *
+ */
+
+#define _PAGE_PRESENT	0x00000001		/* S: PTE valid */
+#define _PAGE_RW	0x00000002		/* S: Write permission */
+#define _PAGE_EXEC	0x00000004		/* H: Execute permission */
+#define _PAGE_ACCESSED	0x00000008		/* S: Page referenced */
+#define _PAGE_DIRTY	0x00000010		/* S: Page dirty */
+#define _PAGE_SPECIAL	0x00000020		/* S: Special page */
+#define _PAGE_USER	0x00000040		/* S: User page */
+#define _PAGE_ENDIAN	0x00000080		/* H: E bit */
+#define _PAGE_GUARDED	0x00000100		/* H: G bit */
+#define _PAGE_COHERENT	0x00000200		/* H: M bit */
+#define _PAGE_NO_CACHE	0x00000400		/* H: I bit */
+#define _PAGE_WRITETHRU	0x00000800		/* H: W bit */
+
+/* TODO: Add large page lowmem mapping support */
+#define _PMD_PRESENT	0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD	(~PAGE_MASK)
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK	0xffffffff00000000ULL
+
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
new file mode 100644
index 0000000..f04cb46
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H
+#define _ASM_POWERPC_NOHASH_32_PTE_8xx_H
+#ifdef __KERNEL__
+
+/*
+ * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
+ * We also use the two level tables, but we can put the real bits in them
+ * needed for the TLB and tablewalk.  These definitions require Mx_CTR.PPM = 0,
+ * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1.  The level 2 descriptor has
+ * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
+ * based upon user/super access.  The TLB does not have accessed nor write
+ * protect.  We assume that if the TLB get loaded with an entry it is
+ * accessed, and overload the changed bit for write protect.  We use
+ * two bits in the software pte that are supposed to be set to zero in
+ * the TLB entry (24 and 25) for these indicators.  Although the level 1
+ * descriptor contains the guarded and writethrough/copyback bits, we can
+ * set these at the page level since they get copied from the Mx_TWC
+ * register when the TLB entry is loaded.  We will use bit 27 for guard, since
+ * that is where it exists in the MD_TWC, and bit 26 for writethrough.
+ * These will get masked from the level 2 descriptor at TLB load time, and
+ * copied to the MD_TWC before it gets loaded.
+ * Large page sizes added.  We currently support two sizes, 4K and 8M.
+ * This also allows a TLB hander optimization because we can directly
+ * load the PMD into MD_TWC.  The 8M pages are only used for kernel
+ * mapping of well known areas.  The PMD (PGD) entries contain control
+ * flags in addition to the address, so care must be taken that the
+ * software no longer assumes these are only pointers.
+ */
+
+/* Definitions for 8xx embedded chips. */
+#define _PAGE_PRESENT	0x0001	/* Page is valid */
+#define _PAGE_NO_CACHE	0x0002	/* I: cache inhibit */
+#define _PAGE_PRIVILEGED	0x0004	/* No ASID (context) compare */
+#define _PAGE_HUGE	0x0008	/* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
+#define _PAGE_DIRTY	0x0100	/* C: page changed */
+
+/* These 4 software bits must be masked out when the L2 entry is loaded
+ * into the TLB.
+ */
+#define _PAGE_GUARDED	0x0010	/* Copied to L1 G entry in DTLB */
+#define _PAGE_SPECIAL	0x0020	/* SW entry */
+#define _PAGE_EXEC	0x0040	/* Copied to PP (bit 21) in ITLB */
+#define _PAGE_ACCESSED	0x0080	/* software: page referenced */
+
+#define _PAGE_NA	0x0200	/* Supervisor NA, User no access */
+#define _PAGE_RO	0x0600	/* Supervisor RO, User no access */
+
+#define _PMD_PRESENT	0x0001
+#define _PMD_BAD	0x0fd0
+#define _PMD_PAGE_MASK	0x000c
+#define _PMD_PAGE_8M	0x000c
+#define _PMD_PAGE_512K	0x0004
+#define _PMD_USER	0x0020	/* APG 1 */
+
+/* Until my rework is finished, 8xx still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES	1
+
+#ifdef CONFIG_PPC_16K_PAGES
+#define _PAGE_PSIZE	_PAGE_HUGE
+#endif
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
new file mode 100644
index 0000000..d1ee24e
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
+#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
+ * processors
+ *
+   MMU Assist Register 3:
+
+   32 33 34 35 36  ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+   RPN......................  0  0 U0 U1 U2 U3 UX SX UW SW UR SR
+
+   - PRESENT *must* be in the bottom three bits because swap cache
+     entries use the top 29 bits.
+
+*/
+
+/* Definitions for FSL Book-E Cores */
+#define _PAGE_PRESENT	0x00001	/* S: PTE contains a translation */
+#define _PAGE_USER	0x00002	/* S: User page (maps to UR) */
+#define _PAGE_RW	0x00004	/* S: Write permission (SW) */
+#define _PAGE_DIRTY	0x00008	/* S: Page dirty */
+#define _PAGE_EXEC	0x00010	/* H: SX permission */
+#define _PAGE_ACCESSED	0x00020	/* S: Page referenced */
+
+#define _PAGE_ENDIAN	0x00040	/* H: E bit */
+#define _PAGE_GUARDED	0x00080	/* H: G bit */
+#define _PAGE_COHERENT	0x00100	/* H: M bit */
+#define _PAGE_NO_CACHE	0x00200	/* H: I bit */
+#define _PAGE_WRITETHRU	0x00400	/* H: W bit */
+#define _PAGE_SPECIAL	0x00800 /* S: Special page */
+
+#define _PMD_PRESENT	0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD	(~PAGE_MASK)
+
+#define PTE_WIMGE_SHIFT (6)
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
new file mode 100644
index 0000000..777d62e
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/slice.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
+#define _ASM_POWERPC_NOHASH_32_SLICE_H
+
+#ifdef CONFIG_PPC_MM_SLICES
+
+#define SLICE_LOW_SHIFT		26	/* 64 slices */
+#define SLICE_LOW_TOP		(0x100000000ull)
+#define SLICE_NUM_LOW		(SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
+#define GET_LOW_SLICE_INDEX(addr)	((addr) >> SLICE_LOW_SHIFT)
+
+#define SLICE_HIGH_SHIFT	0
+#define SLICE_NUM_HIGH		0ul
+#define GET_HIGH_SLICE_INDEX(addr)	(addr & 0)
+
+#endif /* CONFIG_PPC_MM_SLICES */
+
+#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
new file mode 100644
index 0000000..e2d62d0
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -0,0 +1,187 @@
+#ifndef _ASM_POWERPC_PGALLOC_64_H
+#define _ASM_POWERPC_PGALLOC_64_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+
+struct vmemmap_backing {
+	struct vmemmap_backing *list;
+	unsigned long phys;
+	unsigned long virt_addr;
+};
+extern struct vmemmap_backing *vmemmap_list;
+
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation.  For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer.  In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value.  This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE	0xf
+
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({				\
+			BUG_ON(!(shift));		\
+			pgtable_cache[(shift) - 1];	\
+		})
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+			pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+#define pgd_populate(MM, PGD, PUD)	pgd_set(PGD, (unsigned long)PUD)
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+			pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	pud_set(pud, (unsigned long)pmd);
+}
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+				       pte_t *pte)
+{
+	pmd_set(pmd, (unsigned long)pte);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+				pgtable_t pte_page)
+{
+	pmd_set(pmd, (unsigned long)page_address(pte_page));
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+			pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+	kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
+}
+
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					  unsigned long address)
+{
+	return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+				      unsigned long address)
+{
+	struct page *page;
+	pte_t *pte;
+
+	pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
+	if (!pte)
+		return NULL;
+	page = virt_to_page(pte);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
+	return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+	pgtable_page_dtor(ptepage);
+	__free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, int shift)
+{
+	if (!shift) {
+		pgtable_page_dtor(virt_to_page(table));
+		free_page((unsigned long)table);
+	} else {
+		BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+		kmem_cache_free(PGT_CACHE(shift), table);
+	}
+}
+
+#define get_hugepd_cache_index(x)	(x)
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+	unsigned long pgf = (unsigned long)table;
+
+	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+	pgf |= shift;
+	tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+	pgtable_free(table, shift);
+}
+
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+	pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+				  unsigned long address)
+{
+	tlb_flush_pgtable(tlb, address);
+	pgtable_free_tlb(tlb, page_address(table), 0);
+}
+
+#define __pmd_free_tlb(tlb, pmd, addr)		      \
+	pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
+#ifndef CONFIG_PPC_64K_PAGES
+#define __pud_free_tlb(tlb, pud, addr)		      \
+	pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
+
+#endif /* CONFIG_PPC_64K_PAGES */
+
+#define check_pgt_cache()	do { } while (0)
+
+#endif /* _ASM_POWERPC_PGALLOC_64_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
new file mode 100644
index 0000000..c40ec32
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+
+#include <asm-generic/5level-fixup.h>
+
+/*
+ * Entries per page directory level.  The PTE level must use a 64b record
+ * for each page table entry.  The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define PTE_INDEX_SIZE  9
+#define PMD_INDEX_SIZE  7
+#define PUD_INDEX_SIZE  9
+#define PGD_INDEX_SIZE  9
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE	(sizeof(pud_t) << PUD_INDEX_SIZE)
+#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif	/* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD	(1 << PUD_INDEX_SIZE)
+#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE	(1UL << PUD_SHIFT)
+#define PUD_MASK	(~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT	(PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS		0
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS		0
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS		0
+
+
+/*
+ * 4-level page tables related bits
+ */
+
+#define pgd_none(pgd)		(!pgd_val(pgd))
+#define pgd_bad(pgd)		(pgd_val(pgd) == 0)
+#define pgd_present(pgd)	(pgd_val(pgd) != 0)
+#define pgd_page_vaddr(pgd)	(pgd_val(pgd) & ~PGD_MASKED_BITS)
+
+#ifndef __ASSEMBLY__
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+	*pgdp = __pgd(0);
+}
+
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+	return __pte(pgd_val(pgd));
+}
+
+static inline pgd_t pte_pgd(pte_t pte)
+{
+	return __pgd(pte_val(pte));
+}
+extern struct page *pgd_page(pgd_t pgd);
+
+#endif /* !__ASSEMBLY__ */
+
+#define pud_offset(pgdp, addr)	\
+  (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
+    (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+
+#define pud_ERROR(e) \
+	pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
+#define remap_4k_pfn(vma, addr, pfn, prot)	\
+	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+
+#endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
new file mode 100644
index 0000000..7cd6809
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -0,0 +1,339 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 non-hashed page table.
+ */
+
+#include <asm/nohash/64/pgtable-4k.h>
+#include <asm/barrier.h>
+#include <asm/asm-const.h>
+
+#ifdef CONFIG_PPC_64K_PAGES
+#error "Page size not supported"
+#endif
+
+#define FIRST_USER_ADDRESS	0UL
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+			    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PMD_CACHE_INDEX	(PMD_INDEX_SIZE + 1)
+#else
+#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
+#endif
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
+
+/*
+ * Define the address range of the kernel non-linear virtual area
+ */
+#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
+#define KERN_VIRT_SIZE	ASM_CONST(0x0000100000000000)
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies a quarter of it on Book3E
+ * (we keep a quarter for the virtual memmap)
+ */
+#define VMALLOC_START	KERN_VIRT_START
+#define VMALLOC_SIZE	(KERN_VIRT_SIZE >> 2)
+#define VMALLOC_END	(VMALLOC_START + VMALLOC_SIZE)
+
+/*
+ * The second half of the kernel virtual space is used for IO mappings,
+ * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
+ *
+ *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
+ *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ */
+#define KERN_IO_START	(KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
+#define FULL_IO_SIZE	0x80000000ul
+#define  ISA_IO_BASE	(KERN_IO_START)
+#define  ISA_IO_END	(KERN_IO_START + 0x10000ul)
+#define  PHB_IO_BASE	(ISA_IO_END)
+#define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
+#define IOREMAP_BASE	(PHB_IO_END)
+#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE)
+
+
+/*
+ * Region IDs
+ */
+#define REGION_SHIFT		60UL
+#define REGION_MASK		(0xfUL << REGION_SHIFT)
+#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID	(REGION_ID(VMALLOC_START))
+#define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
+#define USER_REGION_ID		(0UL)
+
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * after the vmalloc space on Book3E
+ */
+#define VMEMMAP_BASE		VMALLOC_END
+#define VMEMMAP_END		KERN_IO_START
+#define vmemmap			((struct page *)VMEMMAP_BASE)
+
+
+/*
+ * Include the PTE bits definitions
+ */
+#include <asm/nohash/pte-book3e.h>
+#include <asm/pte-common.h>
+
+#ifndef __ASSEMBLY__
+/* pte_clear moved to later in this file */
+
+#define PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
+
+static inline void pmd_set(pmd_t *pmdp, unsigned long val)
+{
+	*pmdp = __pmd(val);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	*pmdp = __pmd(0);
+}
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+	return __pte(pmd_val(pmd));
+}
+
+#define pmd_none(pmd)		(!pmd_val(pmd))
+#define	pmd_bad(pmd)		(!is_kernel_addr(pmd_val(pmd)) \
+				 || (pmd_val(pmd) & PMD_BAD_BITS))
+#define	pmd_present(pmd)	(!pmd_none(pmd))
+#define pmd_page_vaddr(pmd)	(pmd_val(pmd) & ~PMD_MASKED_BITS)
+extern struct page *pmd_page(pmd_t pmd);
+
+static inline void pud_set(pud_t *pudp, unsigned long val)
+{
+	*pudp = __pud(val);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+	*pudp = __pud(0);
+}
+
+#define pud_none(pud)		(!pud_val(pud))
+#define	pud_bad(pud)		(!is_kernel_addr(pud_val(pud)) \
+				 || (pud_val(pud) & PUD_BAD_BITS))
+#define pud_present(pud)	(pud_val(pud) != 0)
+#define pud_page_vaddr(pud)	(pud_val(pud) & ~PUD_MASKED_BITS)
+
+extern struct page *pud_page(pud_t pud);
+
+static inline pte_t pud_pte(pud_t pud)
+{
+	return __pte(pud_val(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+	return __pud(pte_val(pte));
+}
+#define pud_write(pud)		pte_write(pud_pte(pud))
+#define pgd_write(pgd)		pte_write(pgd_pte(pgd))
+
+static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+{
+	*pgdp = __pgd(val);
+}
+
+/*
+ * Find an entry in a page-table-directory.  We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
+
+#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
+
+#define pmd_offset(pudp,addr) \
+  (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+#define pte_offset_kernel(dir,addr) \
+  (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)			do { } while(0)
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* Atomic PTE updates */
+static inline unsigned long pte_update(struct mm_struct *mm,
+				       unsigned long addr,
+				       pte_t *ptep, unsigned long clr,
+				       unsigned long set,
+				       int huge)
+{
+#ifdef PTE_ATOMIC_UPDATES
+	unsigned long old, tmp;
+
+	__asm__ __volatile__(
+	"1:	ldarx	%0,0,%3		# pte_update\n\
+	andc	%1,%0,%4 \n\
+	or	%1,%1,%6\n\
+	stdcx.	%1,0,%3 \n\
+	bne-	1b"
+	: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
+	: "r" (ptep), "r" (clr), "m" (*ptep), "r" (set)
+	: "cc" );
+#else
+	unsigned long old = pte_val(*ptep);
+	*ptep = __pte((old & ~clr) | set);
+#endif
+	/* huge pages use the old page table lock */
+	if (!huge)
+		assert_pte_locked(mm, addr);
+
+	return old;
+}
+
+static inline int pte_young(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+					      unsigned long addr, pte_t *ptep)
+{
+	unsigned long old;
+
+	if (pte_young(*ptep))
+		return 0;
+	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+	return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep)		   \
+({									   \
+	int __r;							   \
+	__r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+	__r;								   \
+})
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+				      pte_t *ptep)
+{
+
+	if ((pte_val(*ptep) & _PAGE_RW) == 0)
+		return;
+
+	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+					   unsigned long addr, pte_t *ptep)
+{
+	if ((pte_val(*ptep) & _PAGE_RW) == 0)
+		return;
+
+	pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep)		\
+({									\
+	int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
+						  __ptep);		\
+	__young;							\
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long addr, pte_t *ptep)
+{
+	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+	return __pte(old);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+			     pte_t * ptep)
+{
+	pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   int psize)
+{
+	unsigned long bits = pte_val(entry) &
+		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+
+#ifdef PTE_ATOMIC_UPDATES
+	unsigned long old, tmp;
+
+	__asm__ __volatile__(
+	"1:	ldarx	%0,0,%4\n\
+		or	%0,%3,%0\n\
+		stdcx.	%0,0,%4\n\
+		bne-	1b"
+	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+	:"r" (bits), "r" (ptep), "m" (*ptep)
+	:"cc");
+#else
+	unsigned long old = pte_val(*ptep);
+	*ptep = __pte(old | bits);
+#endif
+
+	flush_tlb_page(vma, address);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)	((pte_val(A) ^ pte_val(B)) == 0)
+
+#define pte_ERROR(e) \
+	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() do { \
+	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+	} while (0)
+/*
+ * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
+ */
+#define SWP_TYPE_BITS 5
+#define __swp_type(x)		(((x).val >> _PAGE_BIT_SWAP_TYPE) \
+				& ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x)		((x).val >> PTE_RPN_SHIFT)
+#define __swp_entry(type, offset)	((swp_entry_t) { \
+					((type) << _PAGE_BIT_SWAP_TYPE) \
+					| ((offset) << PTE_RPN_SHIFT) })
+
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
+#define __swp_entry_to_pte(x)		__pte((x).val)
+
+extern int map_kernel_page(unsigned long ea, unsigned long pa,
+			   unsigned long flags);
+extern int __meminit vmemmap_create_mapping(unsigned long start,
+					    unsigned long page_size,
+					    unsigned long phys);
+extern void vmemmap_remove_mapping(unsigned long start,
+				   unsigned long page_size);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h
new file mode 100644
index 0000000..ad0d6e3
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/slice.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
+#define _ASM_POWERPC_NOHASH_64_SLICE_H
+
+#ifdef CONFIG_PPC_64K_PAGES
+#define get_slice_psize(mm, addr)	MMU_PAGE_64K
+#else /* CONFIG_PPC_64K_PAGES */
+#define get_slice_psize(mm, addr)	MMU_PAGE_4K
+#endif /* !CONFIG_PPC_64K_PAGES */
+#define slice_set_user_psize(mm, psize)	do { BUG(); } while (0)
+
+#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
new file mode 100644
index 0000000..0634f29
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_PGALLOC_H
+#define _ASM_POWERPC_NOHASH_PGALLOC_H
+
+#include <linux/mm.h>
+
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+#ifdef CONFIG_PPC64
+extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
+#else
+/* 44x etc which is BOOKE not BOOK3E */
+static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
+				     unsigned long address)
+{
+
+}
+#endif /* !CONFIG_PPC_BOOK3E */
+
+#ifdef CONFIG_PPC64
+#include <asm/nohash/64/pgalloc.h>
+#else
+#include <asm/nohash/32/pgalloc.h>
+#endif
+#endif /* _ASM_POWERPC_NOHASH_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
new file mode 100644
index 0000000..b321c82
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_PGTABLE_H
+
+#if defined(CONFIG_PPC64)
+#include <asm/nohash/64/pgtable.h>
+#else
+#include <asm/nohash/32/pgtable.h>
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)
+{
+	return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
+}
+static inline int pte_read(pte_t pte)		{ return 1; }
+static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
+static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * work for user pages and always return true for kernel pages.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+	return (pte_val(pte) &
+		(_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+	return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pte_present(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/*
+ * We only find page table entry in the last level
+ * Hence no need for other accessors
+ */
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+	/*
+	 * A read-only access is controlled by _PAGE_USER bit.
+	 * We have _PAGE_READ set for WRITE and EXECUTE
+	 */
+	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+		return false;
+
+	if (write && !pte_write(pte))
+		return false;
+
+	return true;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
+	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+		     pgprot_val(pgprot)); }
+static inline unsigned long pte_pfn(pte_t pte)	{
+	return pte_val(pte) >> PTE_RPN_SHIFT; }
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	pte_basic_t ptev;
+
+	ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
+	ptev |= _PAGE_RO;
+	return __pte(ptev);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	pte_basic_t ptev;
+
+	ptev = pte_val(pte) & ~_PAGE_RO;
+	ptev |= _PAGE_RW;
+	return __pte(ptev);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_HUGE);
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+		       pte_t pte);
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, pte_t pte, int percpu)
+{
+	/* Second case is 32-bit with 64-bit PTE.  In this case, we
+	 * can just store as long as we do the two halves in the right order
+	 * with a barrier in between.
+	 * In the percpu case, we also fallback to the simple update
+	 */
+	if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
+		__asm__ __volatile__("\
+			stw%U0%X0 %2,%0\n\
+			eieio\n\
+			stw%U0%X0 %L2,%1"
+		: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+		: "r" (pte) : "memory");
+		return;
+	}
+	/* Anything else just stores the PTE normally. That covers all 64-bit
+	 * cases, and 32-bit non-hash with 32-bit PTEs.
+	 */
+	*ptep = pte;
+
+	/*
+	 * With hardware tablewalk, a sync is needed to ensure that
+	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
+	 * mappings, we can't tolerate spurious faults, so make sure
+	 * the new PTE will be seen the first time.
+	 */
+	if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
+		mb();
+}
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+				 pte_t *ptep, pte_t entry, int dirty);
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+			 _PAGE_WRITETHRU)
+
+#define pgprot_noncached(prot)	  (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+				            _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+				            _PAGE_NO_CACHE))
+
+#define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+				            _PAGE_COHERENT))
+
+#if _PAGE_WRITETHRU != 0
+#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+				            _PAGE_COHERENT | _PAGE_WRITETHRU))
+#endif
+
+#define pgprot_cached_noncoherent(prot) \
+		(__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
+
+#define pgprot_writecombine pgprot_noncached_wc
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+				     unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int hugepd_ok(hugepd_t hpd)
+{
+#ifdef CONFIG_PPC_8xx
+	return ((hpd_val(hpd) & 0x4) != 0);
+#else
+	/* We clear the top bit to indicate hugepd */
+	return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
+#endif
+}
+
+static inline int pmd_huge(pmd_t pmd)
+{
+	return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+	return 0;
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+	return 0;
+}
+#define pgd_huge		pgd_huge
+
+#define is_hugepd(hpd)		(hugepd_ok(hpd))
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
new file mode 100644
index 0000000..12730b8
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/pte-book3e.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
+#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for processors compliant to the Book3E
+ * architecture 2.06 or later. The position of the PTE bits
+ * matches the HW definition of the optional Embedded Page Table
+ * category.
+ */
+
+/* Architected bits */
+#define _PAGE_PRESENT	0x000001 /* software: pte contains a translation */
+#define _PAGE_SW1	0x000002
+#define _PAGE_BIT_SWAP_TYPE	2
+#define _PAGE_BAP_SR	0x000004
+#define _PAGE_BAP_UR	0x000008
+#define _PAGE_BAP_SW	0x000010
+#define _PAGE_BAP_UW	0x000020
+#define _PAGE_BAP_SX	0x000040
+#define _PAGE_BAP_UX	0x000080
+#define _PAGE_PSIZE_MSK	0x000f00
+#define _PAGE_PSIZE_4K	0x000200
+#define _PAGE_PSIZE_8K	0x000300
+#define _PAGE_PSIZE_16K	0x000400
+#define _PAGE_PSIZE_32K	0x000500
+#define _PAGE_PSIZE_64K	0x000600
+#define _PAGE_PSIZE_128K	0x000700
+#define _PAGE_PSIZE_256K	0x000800
+#define _PAGE_PSIZE_512K	0x000900
+#define _PAGE_PSIZE_1M	0x000a00
+#define _PAGE_PSIZE_2M	0x000b00
+#define _PAGE_PSIZE_4M	0x000c00
+#define _PAGE_PSIZE_8M	0x000d00
+#define _PAGE_PSIZE_16M	0x000e00
+#define _PAGE_PSIZE_32M	0x000f00
+#define _PAGE_DIRTY	0x001000 /* C: page changed */
+#define _PAGE_SW0	0x002000
+#define _PAGE_U3	0x004000
+#define _PAGE_U2	0x008000
+#define _PAGE_U1	0x010000
+#define _PAGE_U0	0x020000
+#define _PAGE_ACCESSED	0x040000
+#define _PAGE_ENDIAN	0x080000
+#define _PAGE_GUARDED	0x100000
+#define _PAGE_COHERENT	0x200000 /* M: enforce memory coherence */
+#define _PAGE_NO_CACHE	0x400000 /* I: cache inhibit */
+#define _PAGE_WRITETHRU	0x800000 /* W: cache write-through */
+
+/* "Higher level" linux bit combinations */
+#define _PAGE_EXEC		_PAGE_BAP_UX /* .. and was cache cleaned */
+#define _PAGE_RW		(_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+#define _PAGE_KERNEL_RW		(_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO		(_PAGE_BAP_SR)
+#define _PAGE_KERNEL_RWX	(_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
+#define _PAGE_KERNEL_ROX	(_PAGE_BAP_SR | _PAGE_BAP_SX)
+#define _PAGE_USER		(_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
+#define _PAGE_PRIVILEGED	(_PAGE_BAP_SR)
+
+#define _PAGE_SPECIAL	_PAGE_SW0
+
+/* Base page size */
+#ifdef CONFIG_PPC_64K_PAGES
+#define _PAGE_PSIZE	_PAGE_PSIZE_64K
+#define PTE_RPN_SHIFT	(28)
+#else
+#define _PAGE_PSIZE	_PAGE_PSIZE_4K
+#define	PTE_RPN_SHIFT	(24)
+#endif
+
+#define PTE_WIMGE_SHIFT (19)
+#define PTE_BAP_SHIFT	(2)
+
+/* On 32-bit, we never clear the top part of the PTE */
+#ifdef CONFIG_PPC32
+#define _PTE_NONE_MASK	0xffffffff00000000ULL
+#define _PMD_PRESENT	0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD	(~PAGE_MASK)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
new file mode 100644
index 0000000..b1d8fec
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_TLBFLUSH_H
+#define _ASM_POWERPC_NOHASH_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - local_flush_tlb_mm(mm, full) flushes the specified mm context on
+ *                           the local processor
+ *  - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
+ *  - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *
+ */
+
+/*
+ * TLB flushing for software loaded TLB chips
+ *
+ * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * flush_tlb_kernel_range are best implemented as tlbia vs
+ * specific tlbie's
+ */
+
+struct vm_area_struct;
+struct mm_struct;
+
+#define MMU_NO_CONTEXT      	((unsigned int)-1)
+
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			    unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+				   int tsize, int ind);
+
+#ifdef CONFIG_SMP
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+			     int tsize, int ind);
+#else
+#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma,addr)	local_flush_tlb_page(vma,addr)
+#define __flush_tlb_page(mm,addr,p,i)	__local_flush_tlb_page(mm,addr,p,i)
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
new file mode 100644
index 0000000..09a518b
--- /dev/null
+++ b/arch/powerpc/include/asm/nvram.h
@@ -0,0 +1,107 @@
+/*
+ * NVRAM definitions and access functions.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_NVRAM_H
+#define _ASM_POWERPC_NVRAM_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <uapi/asm/nvram.h>
+
+/*
+ * Set oops header version to distinguish between old and new format header.
+ * lnx,oops-log partition max size is 4000, header version > 4000 will
+ * help in identifying new header.
+ */
+#define OOPS_HDR_VERSION 5000
+
+struct err_log_info {
+	__be32 error_type;
+	__be32 seq_num;
+};
+
+struct nvram_os_partition {
+	const char *name;
+	int req_size;	/* desired size, in bytes */
+	int min_size;	/* minimum acceptable size (0 means req_size) */
+	long size;	/* size of data portion (excluding err_log_info) */
+	long index;	/* offset of data portion of partition */
+	bool os_partition; /* partition initialized by OS, not FW */
+};
+
+struct oops_log_info {
+	__be16 version;
+	__be16 report_length;
+	__be64 timestamp;
+} __attribute__((packed));
+
+extern struct nvram_os_partition oops_log_partition;
+
+#ifdef CONFIG_PPC_PSERIES
+extern struct nvram_os_partition rtas_log_partition;
+
+extern int nvram_write_error_log(char * buff, int length,
+					 unsigned int err_type, unsigned int err_seq);
+extern int nvram_read_error_log(char * buff, int length,
+					 unsigned int * err_type, unsigned int *err_seq);
+extern int nvram_clear_error_log(void);
+extern int pSeries_nvram_init(void);
+#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef CONFIG_MMIO_NVRAM
+extern int mmio_nvram_init(void);
+#else
+static inline int mmio_nvram_init(void)
+{
+	return -ENODEV;
+}
+#endif
+
+extern int __init nvram_scan_partitions(void);
+extern loff_t nvram_create_partition(const char *name, int sig,
+				     int req_size, int min_size);
+extern int nvram_remove_partition(const char *name, int sig,
+					const char *exceptions[]);
+extern int nvram_get_partition_size(loff_t data_index);
+extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
+
+/* Return partition offset in nvram */
+extern int	pmac_get_partition(int partition);
+
+/* Direct access to XPRAM on PowerMacs */
+extern u8	pmac_xpram_read(int xpaddr);
+extern void	pmac_xpram_write(int xpaddr, u8 data);
+
+/* Synchronize NVRAM */
+extern void	nvram_sync(void);
+
+/* Initialize NVRAM OS partition */
+extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
+
+/* Initialize NVRAM oops partition */
+extern void __init nvram_init_oops_partition(int rtas_partition_exists);
+
+/* Read a NVRAM partition */
+extern int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+				int length, unsigned int *err_type,
+				unsigned int *error_log_cnt);
+
+/* Write to NVRAM OS partition */
+extern int nvram_write_os_partition(struct nvram_os_partition *part,
+				    char *buff, int length,
+				    unsigned int err_type,
+				    unsigned int error_log_cnt);
+
+/* Determine NVRAM size */
+extern ssize_t nvram_get_size(void);
+
+/* Normal access to NVRAM */
+extern unsigned char nvram_read_byte(int i);
+extern void nvram_write_byte(unsigned char c, int i);
+#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/include/asm/ohare.h b/arch/powerpc/include/asm/ohare.h
new file mode 100644
index 0000000..da3371f
--- /dev/null
+++ b/arch/powerpc/include/asm/ohare.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_OHARE_H
+#define _ASM_POWERPC_OHARE_H
+#ifdef __KERNEL__
+/*
+ * ohare.h: definitions for using the "O'Hare" I/O controller chip.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ *
+ * BenH: Changed to match those of heathrow (but not all of them). Please
+ *       check if I didn't break anything (especially the media bay).
+ */
+
+/* offset from ohare base for feature control register */
+#define OHARE_MBCR	0x34
+#define OHARE_FCR	0x38
+
+/*
+ * Bits in feature control register.
+ * These were mostly derived by experiment on a powerbook 3400
+ * and may differ for other machines.
+ */
+#define OH_SCC_RESET		1
+#define OH_BAY_POWER_N		2	/* a guess */
+#define OH_BAY_PCI_ENABLE	4	/* a guess */
+#define OH_BAY_IDE_ENABLE	8
+#define OH_BAY_FLOPPY_ENABLE	0x10
+#define OH_IDE0_ENABLE		0x20
+#define OH_IDE0_RESET_N		0x40	/* a guess */
+#define OH_BAY_DEV_MASK		0x1c
+#define OH_BAY_RESET_N		0x80
+#define OH_IOBUS_ENABLE		0x100	/* IOBUS seems to be IDE */
+#define OH_SCC_ENABLE		0x200
+#define OH_MESH_ENABLE		0x400
+#define OH_FLOPPY_ENABLE	0x800
+#define OH_SCCA_IO		0x4000
+#define OH_SCCB_IO		0x8000
+#define OH_VIA_ENABLE		0x10000	/* Is apparently wrong, to be verified */
+#define OH_IDE1_RESET_N		0x800000
+
+/*
+ * Bits to set in the feature control register on PowerBooks.
+ */
+#define PBOOK_FEATURES		(OH_IDE_ENABLE | OH_SCC_ENABLE | \
+				 OH_MESH_ENABLE | OH_SCCA_IO | OH_SCCB_IO)
+
+/*
+ * A magic value to put into the feature control register of the
+ * "ohare" I/O controller on Starmaxes to enable the IDE CD interface.
+ * Contributed by Harry Eaton.
+ */
+#define STARMAX_FEATURES	0xbeff7a
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_OHARE_H */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
new file mode 100644
index 0000000..8365353
--- /dev/null
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -0,0 +1,1130 @@
+/*
+ * OPAL API definitions.
+ *
+ * Copyright 2011-2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __OPAL_API_H
+#define __OPAL_API_H
+
+/****** OPAL APIs ******/
+
+/* Return codes */
+#define OPAL_SUCCESS		0
+#define OPAL_PARAMETER		-1
+#define OPAL_BUSY		-2
+#define OPAL_PARTIAL		-3
+#define OPAL_CONSTRAINED	-4
+#define OPAL_CLOSED		-5
+#define OPAL_HARDWARE		-6
+#define OPAL_UNSUPPORTED	-7
+#define OPAL_PERMISSION		-8
+#define OPAL_NO_MEM		-9
+#define OPAL_RESOURCE		-10
+#define OPAL_INTERNAL_ERROR	-11
+#define OPAL_BUSY_EVENT		-12
+#define OPAL_HARDWARE_FROZEN	-13
+#define OPAL_WRONG_STATE	-14
+#define OPAL_ASYNC_COMPLETION	-15
+#define OPAL_EMPTY		-16
+#define OPAL_I2C_TIMEOUT	-17
+#define OPAL_I2C_INVALID_CMD	-18
+#define OPAL_I2C_LBUS_PARITY	-19
+#define OPAL_I2C_BKEND_OVERRUN	-20
+#define OPAL_I2C_BKEND_ACCESS	-21
+#define OPAL_I2C_ARBT_LOST	-22
+#define OPAL_I2C_NACK_RCVD	-23
+#define OPAL_I2C_STOP_ERR	-24
+#define OPAL_XIVE_PROVISIONING	-31
+#define OPAL_XIVE_FREE_ACTIVE	-32
+#define OPAL_TIMEOUT		-33
+
+/* API Tokens (in r0) */
+#define OPAL_INVALID_CALL		       -1
+#define OPAL_TEST				0
+#define OPAL_CONSOLE_WRITE			1
+#define OPAL_CONSOLE_READ			2
+#define OPAL_RTC_READ				3
+#define OPAL_RTC_WRITE				4
+#define OPAL_CEC_POWER_DOWN			5
+#define OPAL_CEC_REBOOT				6
+#define OPAL_READ_NVRAM				7
+#define OPAL_WRITE_NVRAM			8
+#define OPAL_HANDLE_INTERRUPT			9
+#define OPAL_POLL_EVENTS			10
+#define OPAL_PCI_SET_HUB_TCE_MEMORY		11
+#define OPAL_PCI_SET_PHB_TCE_MEMORY		12
+#define OPAL_PCI_CONFIG_READ_BYTE		13
+#define OPAL_PCI_CONFIG_READ_HALF_WORD  	14
+#define OPAL_PCI_CONFIG_READ_WORD		15
+#define OPAL_PCI_CONFIG_WRITE_BYTE		16
+#define OPAL_PCI_CONFIG_WRITE_HALF_WORD		17
+#define OPAL_PCI_CONFIG_WRITE_WORD		18
+#define OPAL_SET_XIVE				19
+#define OPAL_GET_XIVE				20
+#define OPAL_GET_COMPLETION_TOKEN_STATUS	21 /* obsolete */
+#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER	22
+#define OPAL_PCI_EEH_FREEZE_STATUS		23
+#define OPAL_PCI_SHPC				24
+#define OPAL_CONSOLE_WRITE_BUFFER_SPACE		25
+#define OPAL_PCI_EEH_FREEZE_CLEAR		26
+#define OPAL_PCI_PHB_MMIO_ENABLE		27
+#define OPAL_PCI_SET_PHB_MEM_WINDOW		28
+#define OPAL_PCI_MAP_PE_MMIO_WINDOW		29
+#define OPAL_PCI_SET_PHB_TABLE_MEMORY		30
+#define OPAL_PCI_SET_PE				31
+#define OPAL_PCI_SET_PELTV			32
+#define OPAL_PCI_SET_MVE			33
+#define OPAL_PCI_SET_MVE_ENABLE			34
+#define OPAL_PCI_GET_XIVE_REISSUE		35
+#define OPAL_PCI_SET_XIVE_REISSUE		36
+#define OPAL_PCI_SET_XIVE_PE			37
+#define OPAL_GET_XIVE_SOURCE			38
+#define OPAL_GET_MSI_32				39
+#define OPAL_GET_MSI_64				40
+#define OPAL_START_CPU				41
+#define OPAL_QUERY_CPU_STATUS			42
+#define OPAL_WRITE_OPPANEL			43 /* unimplemented */
+#define OPAL_PCI_MAP_PE_DMA_WINDOW		44
+#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL		45
+#define OPAL_PCI_RESET				49
+#define OPAL_PCI_GET_HUB_DIAG_DATA		50
+#define OPAL_PCI_GET_PHB_DIAG_DATA		51
+#define OPAL_PCI_FENCE_PHB			52
+#define OPAL_PCI_REINIT				53
+#define OPAL_PCI_MASK_PE_ERROR			54
+#define OPAL_SET_SLOT_LED_STATUS		55
+#define OPAL_GET_EPOW_STATUS			56
+#define OPAL_SET_SYSTEM_ATTENTION_LED		57
+#define OPAL_RESERVED1				58
+#define OPAL_RESERVED2				59
+#define OPAL_PCI_NEXT_ERROR			60
+#define OPAL_PCI_EEH_FREEZE_STATUS2		61
+#define OPAL_PCI_POLL				62
+#define OPAL_PCI_MSI_EOI			63
+#define OPAL_PCI_GET_PHB_DIAG_DATA2		64
+#define OPAL_XSCOM_READ				65
+#define OPAL_XSCOM_WRITE			66
+#define OPAL_LPC_READ				67
+#define OPAL_LPC_WRITE				68
+#define OPAL_RETURN_CPU				69
+#define OPAL_REINIT_CPUS			70
+#define OPAL_ELOG_READ				71
+#define OPAL_ELOG_WRITE				72
+#define OPAL_ELOG_ACK				73
+#define OPAL_ELOG_RESEND			74
+#define OPAL_ELOG_SIZE				75
+#define OPAL_FLASH_VALIDATE			76
+#define OPAL_FLASH_MANAGE			77
+#define OPAL_FLASH_UPDATE			78
+#define OPAL_RESYNC_TIMEBASE			79
+#define OPAL_CHECK_TOKEN			80
+#define OPAL_DUMP_INIT				81
+#define OPAL_DUMP_INFO				82
+#define OPAL_DUMP_READ				83
+#define OPAL_DUMP_ACK				84
+#define OPAL_GET_MSG				85
+#define OPAL_CHECK_ASYNC_COMPLETION		86
+#define OPAL_SYNC_HOST_REBOOT			87
+#define OPAL_SENSOR_READ			88
+#define OPAL_GET_PARAM				89
+#define OPAL_SET_PARAM				90
+#define OPAL_DUMP_RESEND			91
+#define OPAL_ELOG_SEND				92	/* Deprecated */
+#define OPAL_PCI_SET_PHB_CAPI_MODE		93
+#define OPAL_DUMP_INFO2				94
+#define OPAL_WRITE_OPPANEL_ASYNC		95
+#define OPAL_PCI_ERR_INJECT			96
+#define OPAL_PCI_EEH_FREEZE_SET			97
+#define OPAL_HANDLE_HMI				98
+#define OPAL_CONFIG_CPU_IDLE_STATE		99
+#define OPAL_SLW_SET_REG			100
+#define OPAL_REGISTER_DUMP_REGION		101
+#define OPAL_UNREGISTER_DUMP_REGION		102
+#define OPAL_WRITE_TPO				103
+#define OPAL_READ_TPO				104
+#define OPAL_GET_DPO_STATUS			105
+#define OPAL_OLD_I2C_REQUEST			106	/* Deprecated */
+#define OPAL_IPMI_SEND				107
+#define OPAL_IPMI_RECV				108
+#define OPAL_I2C_REQUEST			109
+#define OPAL_FLASH_READ				110
+#define OPAL_FLASH_WRITE			111
+#define OPAL_FLASH_ERASE			112
+#define OPAL_PRD_MSG				113
+#define OPAL_LEDS_GET_INDICATOR			114
+#define OPAL_LEDS_SET_INDICATOR			115
+#define OPAL_CEC_REBOOT2			116
+#define OPAL_CONSOLE_FLUSH			117
+#define OPAL_GET_DEVICE_TREE			118
+#define OPAL_PCI_GET_PRESENCE_STATE		119
+#define OPAL_PCI_GET_POWER_STATE		120
+#define OPAL_PCI_SET_POWER_STATE		121
+#define OPAL_INT_GET_XIRR			122
+#define	OPAL_INT_SET_CPPR			123
+#define OPAL_INT_EOI				124
+#define OPAL_INT_SET_MFRR			125
+#define OPAL_PCI_TCE_KILL			126
+#define OPAL_NMMU_SET_PTCR			127
+#define OPAL_XIVE_RESET				128
+#define OPAL_XIVE_GET_IRQ_INFO			129
+#define OPAL_XIVE_GET_IRQ_CONFIG		130
+#define OPAL_XIVE_SET_IRQ_CONFIG		131
+#define OPAL_XIVE_GET_QUEUE_INFO		132
+#define OPAL_XIVE_SET_QUEUE_INFO		133
+#define OPAL_XIVE_DONATE_PAGE			134
+#define OPAL_XIVE_ALLOCATE_VP_BLOCK		135
+#define OPAL_XIVE_FREE_VP_BLOCK			136
+#define OPAL_XIVE_GET_VP_INFO			137
+#define OPAL_XIVE_SET_VP_INFO			138
+#define OPAL_XIVE_ALLOCATE_IRQ			139
+#define OPAL_XIVE_FREE_IRQ			140
+#define OPAL_XIVE_SYNC				141
+#define OPAL_XIVE_DUMP				142
+#define OPAL_XIVE_RESERVED3			143
+#define OPAL_XIVE_RESERVED4			144
+#define OPAL_SIGNAL_SYSTEM_RESET		145
+#define OPAL_NPU_INIT_CONTEXT			146
+#define OPAL_NPU_DESTROY_CONTEXT		147
+#define OPAL_NPU_MAP_LPAR			148
+#define OPAL_IMC_COUNTERS_INIT			149
+#define OPAL_IMC_COUNTERS_START			150
+#define OPAL_IMC_COUNTERS_STOP			151
+#define OPAL_GET_POWERCAP			152
+#define OPAL_SET_POWERCAP			153
+#define OPAL_GET_POWER_SHIFT_RATIO		154
+#define OPAL_SET_POWER_SHIFT_RATIO		155
+#define OPAL_SENSOR_GROUP_CLEAR			156
+#define OPAL_PCI_SET_P2P			157
+#define OPAL_QUIESCE				158
+#define OPAL_NPU_SPA_SETUP			159
+#define OPAL_NPU_SPA_CLEAR_CACHE		160
+#define OPAL_NPU_TL_SET				161
+#define OPAL_SENSOR_READ_U64			162
+#define OPAL_SENSOR_GROUP_ENABLE		163
+#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR		164
+#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR		165
+#define	OPAL_NX_COPROC_INIT			167
+#define OPAL_LAST				167
+
+#define QUIESCE_HOLD			1 /* Spin all calls at entry */
+#define QUIESCE_REJECT			2 /* Fail all calls with OPAL_BUSY */
+#define QUIESCE_LOCK_BREAK		3 /* Set to ignore locks. */
+#define QUIESCE_RESUME			4 /* Un-quiesce */
+#define QUIESCE_RESUME_FAST_REBOOT	5 /* Un-quiesce, fast reboot */
+
+/* Device tree flags */
+
+/*
+ * Flags set in power-mgmt nodes in device tree describing
+ * idle states that are supported in the platform.
+ */
+
+#define OPAL_PM_TIMEBASE_STOP		0x00000002
+#define OPAL_PM_LOSE_HYP_CONTEXT	0x00002000
+#define OPAL_PM_LOSE_FULL_CONTEXT	0x00004000
+#define OPAL_PM_NAP_ENABLED		0x00010000
+#define OPAL_PM_SLEEP_ENABLED		0x00020000
+#define OPAL_PM_WINKLE_ENABLED		0x00040000
+#define OPAL_PM_SLEEP_ENABLED_ER1	0x00080000 /* with workaround */
+#define OPAL_PM_STOP_INST_FAST		0x00100000
+#define OPAL_PM_STOP_INST_DEEP		0x00200000
+
+/*
+ * OPAL_CONFIG_CPU_IDLE_STATE parameters
+ */
+#define OPAL_CONFIG_IDLE_FASTSLEEP	1
+#define OPAL_CONFIG_IDLE_UNDO		0
+#define OPAL_CONFIG_IDLE_APPLY		1
+
+#ifndef __ASSEMBLY__
+
+/* Other enums */
+enum OpalFreezeState {
+	OPAL_EEH_STOPPED_NOT_FROZEN = 0,
+	OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
+	OPAL_EEH_STOPPED_DMA_FREEZE = 2,
+	OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
+	OPAL_EEH_STOPPED_RESET = 4,
+	OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
+	OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
+};
+
+enum OpalEehFreezeActionToken {
+	OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
+	OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
+	OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
+
+	OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
+	OPAL_EEH_ACTION_SET_FREEZE_DMA  = 2,
+	OPAL_EEH_ACTION_SET_FREEZE_ALL  = 3
+};
+
+enum OpalPciStatusToken {
+	OPAL_EEH_NO_ERROR	= 0,
+	OPAL_EEH_IOC_ERROR	= 1,
+	OPAL_EEH_PHB_ERROR	= 2,
+	OPAL_EEH_PE_ERROR	= 3,
+	OPAL_EEH_PE_MMIO_ERROR	= 4,
+	OPAL_EEH_PE_DMA_ERROR	= 5
+};
+
+enum OpalPciErrorSeverity {
+	OPAL_EEH_SEV_NO_ERROR	= 0,
+	OPAL_EEH_SEV_IOC_DEAD	= 1,
+	OPAL_EEH_SEV_PHB_DEAD	= 2,
+	OPAL_EEH_SEV_PHB_FENCED	= 3,
+	OPAL_EEH_SEV_PE_ER	= 4,
+	OPAL_EEH_SEV_INF	= 5
+};
+
+enum OpalErrinjectType {
+	OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR	= 0,
+	OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64	= 1,
+};
+
+enum OpalErrinjectFunc {
+	/* IOA bus specific errors */
+	OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR	= 0,
+	OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA	= 1,
+	OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR	= 2,
+	OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA	= 3,
+	OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR	= 4,
+	OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA	= 5,
+	OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR	= 6,
+	OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA	= 7,
+	OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR	= 8,
+	OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA	= 9,
+	OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR	= 10,
+	OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA	= 11,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR	= 12,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA	= 13,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER	= 14,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET	= 15,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR	= 16,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA	= 17,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER	= 18,
+	OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET	= 19,
+};
+
+enum OpalMmioWindowType {
+	OPAL_M32_WINDOW_TYPE = 1,
+	OPAL_M64_WINDOW_TYPE = 2,
+	OPAL_IO_WINDOW_TYPE  = 3
+};
+
+enum OpalExceptionHandler {
+	OPAL_MACHINE_CHECK_HANDLER	    = 1,
+	OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
+	OPAL_SOFTPATCH_HANDLER		    = 3
+};
+
+enum OpalPendingState {
+	OPAL_EVENT_OPAL_INTERNAL   = 0x1,
+	OPAL_EVENT_NVRAM	   = 0x2,
+	OPAL_EVENT_RTC		   = 0x4,
+	OPAL_EVENT_CONSOLE_OUTPUT  = 0x8,
+	OPAL_EVENT_CONSOLE_INPUT   = 0x10,
+	OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+	OPAL_EVENT_ERROR_LOG	   = 0x40,
+	OPAL_EVENT_EPOW		   = 0x80,
+	OPAL_EVENT_LED_STATUS	   = 0x100,
+	OPAL_EVENT_PCI_ERROR	   = 0x200,
+	OPAL_EVENT_DUMP_AVAIL	   = 0x400,
+	OPAL_EVENT_MSG_PENDING	   = 0x800,
+};
+
+enum OpalThreadStatus {
+	OPAL_THREAD_INACTIVE = 0x0,
+	OPAL_THREAD_STARTED = 0x1,
+	OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
+};
+
+enum OpalPciBusCompare {
+	OpalPciBusAny	= 0,	/* Any bus number match */
+	OpalPciBus3Bits	= 2,	/* Match top 3 bits of bus number */
+	OpalPciBus4Bits	= 3,	/* Match top 4 bits of bus number */
+	OpalPciBus5Bits	= 4,	/* Match top 5 bits of bus number */
+	OpalPciBus6Bits	= 5,	/* Match top 6 bits of bus number */
+	OpalPciBus7Bits	= 6,	/* Match top 7 bits of bus number */
+	OpalPciBusAll	= 7,	/* Match bus number exactly */
+};
+
+enum OpalDeviceCompare {
+	OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
+	OPAL_COMPARE_RID_DEVICE_NUMBER = 1
+};
+
+enum OpalFuncCompare {
+	OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
+	OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
+};
+
+enum OpalPeAction {
+	OPAL_UNMAP_PE = 0,
+	OPAL_MAP_PE = 1
+};
+
+enum OpalPeltvAction {
+	OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+	OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+	OPAL_DISABLE_MVE = 0,
+	OPAL_ENABLE_MVE = 1
+};
+
+enum OpalM64Action {
+	OPAL_DISABLE_M64 = 0,
+	OPAL_ENABLE_M64_SPLIT = 1,
+	OPAL_ENABLE_M64_NON_SPLIT = 2
+};
+
+enum OpalPciResetScope {
+	OPAL_RESET_PHB_COMPLETE		= 1,
+	OPAL_RESET_PCI_LINK		= 2,
+	OPAL_RESET_PHB_ERROR		= 3,
+	OPAL_RESET_PCI_HOT		= 4,
+	OPAL_RESET_PCI_FUNDAMENTAL	= 5,
+	OPAL_RESET_PCI_IODA_TABLE	= 6
+};
+
+enum OpalPciReinitScope {
+	/*
+	 * Note: we chose values that do not overlap
+	 * OpalPciResetScope as OPAL v2 used the same
+	 * enum for both
+	 */
+	OPAL_REINIT_PCI_DEV = 1000
+};
+
+enum OpalPciResetState {
+	OPAL_DEASSERT_RESET = 0,
+	OPAL_ASSERT_RESET   = 1
+};
+
+enum OpalPciSlotPresence {
+	OPAL_PCI_SLOT_EMPTY	= 0,
+	OPAL_PCI_SLOT_PRESENT	= 1
+};
+
+enum OpalPciSlotPower {
+	OPAL_PCI_SLOT_POWER_OFF	= 0,
+	OPAL_PCI_SLOT_POWER_ON	= 1,
+	OPAL_PCI_SLOT_OFFLINE	= 2,
+	OPAL_PCI_SLOT_ONLINE	= 3
+};
+
+enum OpalSlotLedType {
+	OPAL_SLOT_LED_TYPE_ID = 0,	/* IDENTIFY LED */
+	OPAL_SLOT_LED_TYPE_FAULT = 1,	/* FAULT LED */
+	OPAL_SLOT_LED_TYPE_ATTN = 2,	/* System Attention LED */
+	OPAL_SLOT_LED_TYPE_MAX = 3
+};
+
+enum OpalSlotLedState {
+	OPAL_SLOT_LED_STATE_OFF = 0,	/* LED is OFF */
+	OPAL_SLOT_LED_STATE_ON = 1	/* LED is ON */
+};
+
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+	OPAL_LPC_MEM	= 0,
+	OPAL_LPC_IO	= 1,
+	OPAL_LPC_FW	= 2,
+};
+
+enum opal_msg_type {
+	OPAL_MSG_ASYNC_COMP	= 0,	/* params[0] = token, params[1] = rc,
+					 * additional params function-specific
+					 */
+	OPAL_MSG_MEM_ERR	= 1,
+	OPAL_MSG_EPOW		= 2,
+	OPAL_MSG_SHUTDOWN	= 3,	/* params[0] = 1 reboot, 0 shutdown */
+	OPAL_MSG_HMI_EVT	= 4,
+	OPAL_MSG_DPO		= 5,
+	OPAL_MSG_PRD		= 6,
+	OPAL_MSG_OCC		= 7,
+	OPAL_MSG_TYPE_MAX,
+};
+
+struct opal_msg {
+	__be32 msg_type;
+	__be32 reserved;
+	__be64 params[8];
+};
+
+/* System parameter permission */
+enum OpalSysparamPerm {
+	OPAL_SYSPARAM_READ  = 0x1,
+	OPAL_SYSPARAM_WRITE = 0x2,
+	OPAL_SYSPARAM_RW    = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
+};
+
+enum {
+	OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
+};
+
+struct opal_ipmi_msg {
+	uint8_t version;
+	uint8_t netfn;
+	uint8_t cmd;
+	uint8_t data[];
+};
+
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+	OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+	OPAL_MEM_ERR_TYPE_RESILIENCE	= 0,
+	OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+	OPAL_MEM_RESILIENCE_CE		= 0,
+	OPAL_MEM_RESILIENCE_UE,
+	OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+	OPAL_MEM_DYNAMIC_DEALLOC	= 0,
+};
+
+struct OpalMemoryErrorData {
+	enum OpalMemErr_Version	version:8;	/* 0x00 */
+	enum OpalMemErrType	type:8;		/* 0x01 */
+	__be16			flags;		/* 0x02 */
+	uint8_t			reserved_1[4];	/* 0x04 */
+
+	union {
+		/* Memory Resilience corrected/uncorrected error info */
+		struct {
+			enum OpalMemErr_ResilErrType	resil_err_type:8;
+			uint8_t				reserved_1[7];
+			__be64				physical_address_start;
+			__be64				physical_address_end;
+		} resilience;
+		/* Dynamic memory deallocation error info */
+		struct {
+			enum OpalMemErr_DynErrType	dyn_err_type:8;
+			uint8_t				reserved_1[7];
+			__be64				physical_address_start;
+			__be64				physical_address_end;
+		} dyn_dealloc;
+	} u;
+};
+
+/* HMI interrupt event */
+enum OpalHMI_Version {
+	OpalHMIEvt_V1 = 1,
+	OpalHMIEvt_V2 = 2,
+};
+
+enum OpalHMI_Severity {
+	OpalHMI_SEV_NO_ERROR = 0,
+	OpalHMI_SEV_WARNING = 1,
+	OpalHMI_SEV_ERROR_SYNC = 2,
+	OpalHMI_SEV_FATAL = 3,
+};
+
+enum OpalHMI_Disposition {
+	OpalHMI_DISPOSITION_RECOVERED = 0,
+	OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum OpalHMI_ErrType {
+	OpalHMI_ERROR_MALFUNC_ALERT	= 0,
+	OpalHMI_ERROR_PROC_RECOV_DONE,
+	OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
+	OpalHMI_ERROR_PROC_RECOV_MASKED,
+	OpalHMI_ERROR_TFAC,
+	OpalHMI_ERROR_TFMR_PARITY,
+	OpalHMI_ERROR_HA_OVERFLOW_WARN,
+	OpalHMI_ERROR_XSCOM_FAIL,
+	OpalHMI_ERROR_XSCOM_DONE,
+	OpalHMI_ERROR_SCOM_FIR,
+	OpalHMI_ERROR_DEBUG_TRIG_FIR,
+	OpalHMI_ERROR_HYP_RESOURCE,
+	OpalHMI_ERROR_CAPP_RECOVERY,
+};
+
+enum OpalHMI_XstopType {
+	CHECKSTOP_TYPE_UNKNOWN	=	0,
+	CHECKSTOP_TYPE_CORE	=	1,
+	CHECKSTOP_TYPE_NX	=	2,
+};
+
+enum OpalHMI_CoreXstopReason {
+	CORE_CHECKSTOP_IFU_REGFILE		= 0x00000001,
+	CORE_CHECKSTOP_IFU_LOGIC		= 0x00000002,
+	CORE_CHECKSTOP_PC_DURING_RECOV		= 0x00000004,
+	CORE_CHECKSTOP_ISU_REGFILE		= 0x00000008,
+	CORE_CHECKSTOP_ISU_LOGIC		= 0x00000010,
+	CORE_CHECKSTOP_FXU_LOGIC		= 0x00000020,
+	CORE_CHECKSTOP_VSU_LOGIC		= 0x00000040,
+	CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE	= 0x00000080,
+	CORE_CHECKSTOP_LSU_REGFILE		= 0x00000100,
+	CORE_CHECKSTOP_PC_FWD_PROGRESS		= 0x00000200,
+	CORE_CHECKSTOP_LSU_LOGIC		= 0x00000400,
+	CORE_CHECKSTOP_PC_LOGIC			= 0x00000800,
+	CORE_CHECKSTOP_PC_HYP_RESOURCE		= 0x00001000,
+	CORE_CHECKSTOP_PC_HANG_RECOV_FAILED	= 0x00002000,
+	CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED	= 0x00004000,
+	CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ	= 0x00008000,
+	CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ	= 0x00010000,
+};
+
+enum OpalHMI_NestAccelXstopReason {
+	NX_CHECKSTOP_SHM_INVAL_STATE_ERR	= 0x00000001,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1	= 0x00000002,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2	= 0x00000004,
+	NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR	= 0x00000008,
+	NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR	= 0x00000010,
+	NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR	= 0x00000020,
+	NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR	= 0x00000040,
+	NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR	= 0x00000080,
+	NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR	= 0x00000100,
+	NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR	= 0x00000200,
+	NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR	= 0x00000400,
+	NX_CHECKSTOP_DMA_CRB_UE			= 0x00000800,
+	NX_CHECKSTOP_DMA_CRB_SUE		= 0x00001000,
+	NX_CHECKSTOP_PBI_ISN_UE			= 0x00002000,
+};
+
+struct OpalHMIEvent {
+	uint8_t		version;	/* 0x00 */
+	uint8_t		severity;	/* 0x01 */
+	uint8_t		type;		/* 0x02 */
+	uint8_t		disposition;	/* 0x03 */
+	uint8_t		reserved_1[4];	/* 0x04 */
+
+	__be64		hmer;
+	/* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
+	__be64		tfmr;
+
+	/* version 2 and later */
+	union {
+		/*
+		 * checkstop info (Core/NX).
+		 * Valid for OpalHMI_ERROR_MALFUNC_ALERT.
+		 */
+		struct {
+			uint8_t	xstop_type;	/* enum OpalHMI_XstopType */
+			uint8_t reserved_1[3];
+			__be32  xstop_reason;
+			union {
+				__be32 pir;	/* for CHECKSTOP_TYPE_CORE */
+				__be32 chip_id;	/* for CHECKSTOP_TYPE_NX */
+			} u;
+		} xstop_error;
+	} u;
+};
+
+enum {
+	OPAL_P7IOC_DIAG_TYPE_NONE	= 0,
+	OPAL_P7IOC_DIAG_TYPE_RGC	= 1,
+	OPAL_P7IOC_DIAG_TYPE_BI		= 2,
+	OPAL_P7IOC_DIAG_TYPE_CI		= 3,
+	OPAL_P7IOC_DIAG_TYPE_MISC	= 4,
+	OPAL_P7IOC_DIAG_TYPE_I2C	= 5,
+	OPAL_P7IOC_DIAG_TYPE_LAST	= 6
+};
+
+struct OpalIoP7IOCErrorData {
+	__be16 type;
+
+	/* GEM */
+	__be64 gemXfir;
+	__be64 gemRfir;
+	__be64 gemRirqfir;
+	__be64 gemMask;
+	__be64 gemRwof;
+
+	/* LEM */
+	__be64 lemFir;
+	__be64 lemErrMask;
+	__be64 lemAction0;
+	__be64 lemAction1;
+	__be64 lemWof;
+
+	union {
+		struct OpalIoP7IOCRgcErrorData {
+			__be64 rgcStatus;	/* 3E1C10 */
+			__be64 rgcLdcp;		/* 3E1C18 */
+		}rgc;
+		struct OpalIoP7IOCBiErrorData {
+			__be64 biLdcp0;		/* 3C0100, 3C0118 */
+			__be64 biLdcp1;		/* 3C0108, 3C0120 */
+			__be64 biLdcp2;		/* 3C0110, 3C0128 */
+			__be64 biFenceStatus;	/* 3C0130, 3C0130 */
+
+			uint8_t biDownbound;	/* BI Downbound or Upbound */
+		}bi;
+		struct OpalIoP7IOCCiErrorData {
+			__be64 ciPortStatus;	/* 3Dn008 */
+			__be64 ciPortLdcp;	/* 3Dn010 */
+
+			uint8_t ciPort;		/* Index of CI port: 0/1 */
+		}ci;
+	};
+};
+
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+	OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
+};
+
+enum {
+	OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+	OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2,
+	OPAL_PHB_ERROR_DATA_TYPE_PHB4 = 3
+};
+
+enum {
+	OPAL_P7IOC_NUM_PEST_REGS = 128,
+	OPAL_PHB3_NUM_PEST_REGS = 256,
+	OPAL_PHB4_NUM_PEST_REGS = 512
+};
+
+struct OpalIoPhbErrorCommon {
+	__be32 version;
+	__be32 ioType;
+	__be32 len;
+};
+
+struct OpalIoP7IOCPhbErrorData {
+	struct OpalIoPhbErrorCommon common;
+
+	__be32 brdgCtl;
+
+	// P7IOC utl regs
+	__be32 portStatusReg;
+	__be32 rootCmplxStatus;
+	__be32 busAgentStatus;
+
+	// P7IOC cfg regs
+	__be32 deviceStatus;
+	__be32 slotStatus;
+	__be32 linkStatus;
+	__be32 devCmdStatus;
+	__be32 devSecStatus;
+
+	// cfg AER regs
+	__be32 rootErrorStatus;
+	__be32 uncorrErrorStatus;
+	__be32 corrErrorStatus;
+	__be32 tlpHdr1;
+	__be32 tlpHdr2;
+	__be32 tlpHdr3;
+	__be32 tlpHdr4;
+	__be32 sourceId;
+
+	__be32 rsv3;
+
+	// Record data about the call to allocate a buffer.
+	__be64 errorClass;
+	__be64 correlator;
+
+	//P7IOC MMIO Error Regs
+	__be64 p7iocPlssr;                // n120
+	__be64 p7iocCsr;                  // n110
+	__be64 lemFir;                    // nC00
+	__be64 lemErrorMask;              // nC18
+	__be64 lemWOF;                    // nC40
+	__be64 phbErrorStatus;            // nC80
+	__be64 phbFirstErrorStatus;       // nC88
+	__be64 phbErrorLog0;              // nCC0
+	__be64 phbErrorLog1;              // nCC8
+	__be64 mmioErrorStatus;           // nD00
+	__be64 mmioFirstErrorStatus;      // nD08
+	__be64 mmioErrorLog0;             // nD40
+	__be64 mmioErrorLog1;             // nD48
+	__be64 dma0ErrorStatus;           // nD80
+	__be64 dma0FirstErrorStatus;      // nD88
+	__be64 dma0ErrorLog0;             // nDC0
+	__be64 dma0ErrorLog1;             // nDC8
+	__be64 dma1ErrorStatus;           // nE00
+	__be64 dma1FirstErrorStatus;      // nE08
+	__be64 dma1ErrorLog0;             // nE40
+	__be64 dma1ErrorLog1;             // nE48
+	__be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
+	__be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb3ErrorData {
+	struct OpalIoPhbErrorCommon common;
+
+	__be32 brdgCtl;
+
+	/* PHB3 UTL regs */
+	__be32 portStatusReg;
+	__be32 rootCmplxStatus;
+	__be32 busAgentStatus;
+
+	/* PHB3 cfg regs */
+	__be32 deviceStatus;
+	__be32 slotStatus;
+	__be32 linkStatus;
+	__be32 devCmdStatus;
+	__be32 devSecStatus;
+
+	/* cfg AER regs */
+	__be32 rootErrorStatus;
+	__be32 uncorrErrorStatus;
+	__be32 corrErrorStatus;
+	__be32 tlpHdr1;
+	__be32 tlpHdr2;
+	__be32 tlpHdr3;
+	__be32 tlpHdr4;
+	__be32 sourceId;
+
+	__be32 rsv3;
+
+	/* Record data about the call to allocate a buffer */
+	__be64 errorClass;
+	__be64 correlator;
+
+	/* PHB3 MMIO Error Regs */
+	__be64 nFir;			/* 000 */
+	__be64 nFirMask;		/* 003 */
+	__be64 nFirWOF;		/* 008 */
+	__be64 phbPlssr;		/* 120 */
+	__be64 phbCsr;		/* 110 */
+	__be64 lemFir;		/* C00 */
+	__be64 lemErrorMask;		/* C18 */
+	__be64 lemWOF;		/* C40 */
+	__be64 phbErrorStatus;	/* C80 */
+	__be64 phbFirstErrorStatus;	/* C88 */
+	__be64 phbErrorLog0;		/* CC0 */
+	__be64 phbErrorLog1;		/* CC8 */
+	__be64 mmioErrorStatus;	/* D00 */
+	__be64 mmioFirstErrorStatus;	/* D08 */
+	__be64 mmioErrorLog0;		/* D40 */
+	__be64 mmioErrorLog1;		/* D48 */
+	__be64 dma0ErrorStatus;	/* D80 */
+	__be64 dma0FirstErrorStatus;	/* D88 */
+	__be64 dma0ErrorLog0;		/* DC0 */
+	__be64 dma0ErrorLog1;		/* DC8 */
+	__be64 dma1ErrorStatus;	/* E00 */
+	__be64 dma1FirstErrorStatus;	/* E08 */
+	__be64 dma1ErrorLog0;		/* E40 */
+	__be64 dma1ErrorLog1;		/* E48 */
+	__be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
+	__be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb4ErrorData {
+	struct OpalIoPhbErrorCommon common;
+
+	__be32 brdgCtl;
+
+	/* PHB4 cfg regs */
+	__be32 deviceStatus;
+	__be32 slotStatus;
+	__be32 linkStatus;
+	__be32 devCmdStatus;
+	__be32 devSecStatus;
+
+	/* cfg AER regs */
+	__be32 rootErrorStatus;
+	__be32 uncorrErrorStatus;
+	__be32 corrErrorStatus;
+	__be32 tlpHdr1;
+	__be32 tlpHdr2;
+	__be32 tlpHdr3;
+	__be32 tlpHdr4;
+	__be32 sourceId;
+
+	/* PHB4 ETU Error Regs */
+	__be64 nFir;				/* 000 */
+	__be64 nFirMask;			/* 003 */
+	__be64 nFirWOF;				/* 008 */
+	__be64 phbPlssr;			/* 120 */
+	__be64 phbCsr;				/* 110 */
+	__be64 lemFir;				/* C00 */
+	__be64 lemErrorMask;			/* C18 */
+	__be64 lemWOF;				/* C40 */
+	__be64 phbErrorStatus;			/* C80 */
+	__be64 phbFirstErrorStatus;		/* C88 */
+	__be64 phbErrorLog0;			/* CC0 */
+	__be64 phbErrorLog1;			/* CC8 */
+	__be64 phbTxeErrorStatus;		/* D00 */
+	__be64 phbTxeFirstErrorStatus;		/* D08 */
+	__be64 phbTxeErrorLog0;			/* D40 */
+	__be64 phbTxeErrorLog1;			/* D48 */
+	__be64 phbRxeArbErrorStatus;		/* D80 */
+	__be64 phbRxeArbFirstErrorStatus;	/* D88 */
+	__be64 phbRxeArbErrorLog0;		/* DC0 */
+	__be64 phbRxeArbErrorLog1;		/* DC8 */
+	__be64 phbRxeMrgErrorStatus;		/* E00 */
+	__be64 phbRxeMrgFirstErrorStatus;	/* E08 */
+	__be64 phbRxeMrgErrorLog0;		/* E40 */
+	__be64 phbRxeMrgErrorLog1;		/* E48 */
+	__be64 phbRxeTceErrorStatus;		/* E80 */
+	__be64 phbRxeTceFirstErrorStatus;	/* E88 */
+	__be64 phbRxeTceErrorLog0;		/* EC0 */
+	__be64 phbRxeTceErrorLog1;		/* EC8 */
+
+	/* PHB4 REGB Error Regs */
+	__be64 phbPblErrorStatus;		/* 1900 */
+	__be64 phbPblFirstErrorStatus;		/* 1908 */
+	__be64 phbPblErrorLog0;			/* 1940 */
+	__be64 phbPblErrorLog1;			/* 1948 */
+	__be64 phbPcieDlpErrorLog1;		/* 1AA0 */
+	__be64 phbPcieDlpErrorLog2;		/* 1AA8 */
+	__be64 phbPcieDlpErrorStatus;		/* 1AB0 */
+	__be64 phbRegbErrorStatus;		/* 1C00 */
+	__be64 phbRegbFirstErrorStatus;		/* 1C08 */
+	__be64 phbRegbErrorLog0;		/* 1C40 */
+	__be64 phbRegbErrorLog1;		/* 1C48 */
+
+	__be64 pestA[OPAL_PHB4_NUM_PEST_REGS];
+	__be64 pestB[OPAL_PHB4_NUM_PEST_REGS];
+};
+
+enum {
+	OPAL_REINIT_CPUS_HILE_BE	= (1 << 0),
+	OPAL_REINIT_CPUS_HILE_LE	= (1 << 1),
+
+	/* These two define the base MMU mode of the host on P9
+	 *
+	 * On P9 Nimbus DD2.0 and Cumlus (and later), KVM can still
+	 * create hash guests in "radix" mode with care (full core
+	 * switch only).
+	 */
+	OPAL_REINIT_CPUS_MMU_HASH	= (1 << 2),
+	OPAL_REINIT_CPUS_MMU_RADIX	= (1 << 3),
+
+	OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED = (1 << 4),
+};
+
+typedef struct oppanel_line {
+	__be64 line;
+	__be64 line_len;
+} oppanel_line_t;
+
+enum opal_prd_msg_type {
+	OPAL_PRD_MSG_TYPE_INIT = 0,	/* HBRT --> OPAL */
+	OPAL_PRD_MSG_TYPE_FINI,		/* HBRT/kernel --> OPAL */
+	OPAL_PRD_MSG_TYPE_ATTN,		/* HBRT <-- OPAL */
+	OPAL_PRD_MSG_TYPE_ATTN_ACK,	/* HBRT --> OPAL */
+	OPAL_PRD_MSG_TYPE_OCC_ERROR,	/* HBRT <-- OPAL */
+	OPAL_PRD_MSG_TYPE_OCC_RESET,	/* HBRT <-- OPAL */
+};
+
+struct opal_prd_msg_header {
+	uint8_t		type;
+	uint8_t		pad[1];
+	__be16		size;
+};
+
+struct opal_prd_msg;
+
+#define OCC_RESET                       0
+#define OCC_LOAD                        1
+#define OCC_THROTTLE                    2
+#define OCC_MAX_THROTTLE_STATUS         5
+
+struct opal_occ_msg {
+	__be64 type;
+	__be64 chip;
+	__be64 throttle_status;
+};
+
+/*
+ * SG entries
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+	__be64 data;
+	__be64 length;
+};
+
+/*
+ * Candidate image SG list.
+ *
+ * length = VER | length
+ */
+struct opal_sg_list {
+	__be64 length;
+	__be64 next;
+	struct opal_sg_entry entry[];
+};
+
+/*
+ * Dump region ID range usable by the OS
+ */
+#define OPAL_DUMP_REGION_HOST_START		0x80
+#define OPAL_DUMP_REGION_LOG_BUF		0x80
+#define OPAL_DUMP_REGION_HOST_END		0xFF
+
+/* CAPI modes for PHB */
+enum {
+	OPAL_PHB_CAPI_MODE_PCIE		= 0,
+	OPAL_PHB_CAPI_MODE_CAPI		= 1,
+	OPAL_PHB_CAPI_MODE_SNOOP_OFF    = 2,
+	OPAL_PHB_CAPI_MODE_SNOOP_ON	= 3,
+	OPAL_PHB_CAPI_MODE_DMA		= 4,
+	OPAL_PHB_CAPI_MODE_DMA_TVT1	= 5,
+};
+
+/* OPAL I2C request */
+struct opal_i2c_request {
+	uint8_t	type;
+#define OPAL_I2C_RAW_READ	0
+#define OPAL_I2C_RAW_WRITE	1
+#define OPAL_I2C_SM_READ	2
+#define OPAL_I2C_SM_WRITE	3
+	uint8_t flags;
+#define OPAL_I2C_ADDR_10	0x01	/* Not supported yet */
+	uint8_t	subaddr_sz;		/* Max 4 */
+	uint8_t reserved;
+	__be16 addr;			/* 7 or 10 bit address */
+	__be16 reserved2;
+	__be32 subaddr;		/* Sub-address if any */
+	__be32 size;			/* Data size */
+	__be64 buffer_ra;		/* Buffer real address */
+};
+
+/*
+ * EPOW status sharing (OPAL and the host)
+ *
+ * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
+ * with individual elements being 16 bits wide to fetch the system
+ * wide EPOW status. Each element in the buffer will contain the
+ * EPOW status in it's bit representation for a particular EPOW sub
+ * class as defined here. So multiple detailed EPOW status bits
+ * specific for any sub class can be represented in a single buffer
+ * element as it's bit representation.
+ */
+
+/* System EPOW type */
+enum OpalSysEpow {
+	OPAL_SYSEPOW_POWER	= 0,	/* Power EPOW */
+	OPAL_SYSEPOW_TEMP	= 1,	/* Temperature EPOW */
+	OPAL_SYSEPOW_COOLING	= 2,	/* Cooling EPOW */
+	OPAL_SYSEPOW_MAX	= 3,	/* Max EPOW categories */
+};
+
+/* Power EPOW */
+enum OpalSysPower {
+	OPAL_SYSPOWER_UPS	= 0x0001, /* System on UPS power */
+	OPAL_SYSPOWER_CHNG	= 0x0002, /* System power config change */
+	OPAL_SYSPOWER_FAIL	= 0x0004, /* System impending power failure */
+	OPAL_SYSPOWER_INCL	= 0x0008, /* System incomplete power */
+};
+
+/* Temperature EPOW */
+enum OpalSysTemp {
+	OPAL_SYSTEMP_AMB	= 0x0001, /* System over ambient temperature */
+	OPAL_SYSTEMP_INT	= 0x0002, /* System over internal temperature */
+	OPAL_SYSTEMP_HMD	= 0x0004, /* System over ambient humidity */
+};
+
+/* Cooling EPOW */
+enum OpalSysCooling {
+	OPAL_SYSCOOL_INSF	= 0x0001, /* System insufficient cooling */
+};
+
+/* Argument to OPAL_CEC_REBOOT2() */
+enum {
+	OPAL_REBOOT_NORMAL		= 0,
+	OPAL_REBOOT_PLATFORM_ERROR	= 1,
+};
+
+/* Argument to OPAL_PCI_TCE_KILL */
+enum {
+	OPAL_PCI_TCE_KILL_PAGES,
+	OPAL_PCI_TCE_KILL_PE,
+	OPAL_PCI_TCE_KILL_ALL,
+};
+
+/* The xive operation mode indicates the active "API" and
+ * corresponds to the "mode" parameter of the opal_xive_reset()
+ * call
+ */
+enum {
+	OPAL_XIVE_MODE_EMU	= 0,
+	OPAL_XIVE_MODE_EXPL	= 1,
+};
+
+/* Flags for OPAL_XIVE_GET_IRQ_INFO */
+enum {
+	OPAL_XIVE_IRQ_TRIGGER_PAGE	= 0x00000001,
+	OPAL_XIVE_IRQ_STORE_EOI		= 0x00000002,
+	OPAL_XIVE_IRQ_LSI		= 0x00000004,
+	OPAL_XIVE_IRQ_SHIFT_BUG		= 0x00000008,
+	OPAL_XIVE_IRQ_MASK_VIA_FW	= 0x00000010,
+	OPAL_XIVE_IRQ_EOI_VIA_FW	= 0x00000020,
+};
+
+/* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */
+enum {
+	OPAL_XIVE_EQ_ENABLED		= 0x00000001,
+	OPAL_XIVE_EQ_ALWAYS_NOTIFY	= 0x00000002,
+	OPAL_XIVE_EQ_ESCALATE		= 0x00000004,
+};
+
+/* Flags for OPAL_XIVE_GET/SET_VP_INFO */
+enum {
+	OPAL_XIVE_VP_ENABLED		= 0x00000001,
+	OPAL_XIVE_VP_SINGLE_ESCALATION	= 0x00000002,
+};
+
+/* "Any chip" replacement for chip ID for allocation functions */
+enum {
+	OPAL_XIVE_ANY_CHIP		= 0xffffffff,
+};
+
+/* Xive sync options */
+enum {
+	/* This bits are cumulative, arg is a girq */
+	XIVE_SYNC_EAS			= 0x00000001, /* Sync irq source */
+	XIVE_SYNC_QUEUE			= 0x00000002, /* Sync irq target */
+};
+
+/* Dump options */
+enum {
+	XIVE_DUMP_TM_HYP	= 0,
+	XIVE_DUMP_TM_POOL	= 1,
+	XIVE_DUMP_TM_OS		= 2,
+	XIVE_DUMP_TM_USER	= 3,
+	XIVE_DUMP_VP		= 4,
+	XIVE_DUMP_EMU_STATE	= 5,
+};
+
+/* "type" argument options for OPAL_IMC_COUNTERS_* calls */
+enum {
+	OPAL_IMC_COUNTERS_NEST = 1,
+	OPAL_IMC_COUNTERS_CORE = 2,
+};
+
+
+/* PCI p2p descriptor */
+#define OPAL_PCI_P2P_ENABLE		0x1
+#define OPAL_PCI_P2P_LOAD		0x2
+#define OPAL_PCI_P2P_STORE		0x4
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
new file mode 100644
index 0000000..ff38664
--- /dev/null
+++ b/arch/powerpc/include/asm/opal.h
@@ -0,0 +1,389 @@
+/*
+ * PowerNV OPAL definitions.
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_OPAL_H
+#define _ASM_POWERPC_OPAL_H
+
+#include <asm/opal-api.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/notifier.h>
+
+/* We calculate number of sg entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+
+/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
+#define OPAL_BUSY_DELAY_MS	10
+
+/* /sys/firmware/opal */
+extern struct kobject *opal_kobj;
+
+/* /ibm,opal */
+extern struct device_node *opal_node;
+
+/* API functions */
+int64_t opal_invalid_call(void);
+int64_t opal_npu_destroy_context(uint64_t phb_id, uint64_t pid, uint64_t bdf);
+int64_t opal_npu_init_context(uint64_t phb_id, int pasid, uint64_t msr,
+			uint64_t bdf);
+int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid,
+			uint64_t lpcr);
+int64_t opal_npu_spa_setup(uint64_t phb_id, uint32_t bdfn,
+			uint64_t addr, uint64_t PE_mask);
+int64_t opal_npu_spa_clear_cache(uint64_t phb_id, uint32_t bdfn,
+				uint64_t PE_handle);
+int64_t opal_npu_tl_set(uint64_t phb_id, uint32_t bdfn, long cap,
+			uint64_t rate_phys, uint32_t size);
+int64_t opal_console_write(int64_t term_number, __be64 *length,
+			   const uint8_t *buffer);
+int64_t opal_console_read(int64_t term_number, __be64 *length,
+			  uint8_t *buffer);
+int64_t opal_console_write_buffer_space(int64_t term_number,
+					__be64 *length);
+int64_t opal_console_flush(int64_t term_number);
+int64_t opal_rtc_read(__be32 *year_month_day,
+		      __be64 *hour_minute_second_millisecond);
+int64_t opal_rtc_write(uint32_t year_month_day,
+		       uint64_t hour_minute_second_millisecond);
+int64_t opal_tpo_read(uint64_t token, __be32 *year_mon_day, __be32 *hour_min);
+int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day,
+		       uint32_t hour_min);
+int64_t opal_cec_power_down(uint64_t request);
+int64_t opal_cec_reboot(void);
+int64_t opal_cec_reboot2(uint32_t reboot_type, const char *diag);
+int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
+int64_t opal_poll_events(__be64 *outstanding_event_mask);
+int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
+				    uint64_t tce_mem_size);
+int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
+				    uint64_t tce_mem_size);
+int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
+				  uint64_t offset, uint8_t *data);
+int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
+				       uint64_t offset, __be16 *data);
+int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
+				  uint64_t offset, __be32 *data);
+int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
+				   uint64_t offset, uint8_t data);
+int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
+					uint64_t offset, uint16_t data);
+int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
+				   uint64_t offset, uint32_t data);
+int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
+int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
+int64_t opal_register_exception_handler(uint64_t opal_exception,
+					uint64_t handler_address,
+					uint64_t glue_cache_line);
+int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
+				   uint8_t *freeze_state,
+				   __be16 *pci_error_type,
+				   __be64 *phb_status);
+int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
+				  uint64_t eeh_action_token);
+int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number,
+				uint64_t eeh_action_token);
+int64_t opal_pci_err_inject(uint64_t phb_id, uint32_t pe_no, uint32_t type,
+			    uint32_t func, uint64_t addr, uint64_t mask);
+int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
+
+
+
+int64_t opal_pci_phb_mmio_enable(uint64_t phb_id, uint16_t window_type,
+				 uint16_t window_num, uint16_t enable);
+int64_t opal_pci_set_phb_mem_window(uint64_t phb_id, uint16_t window_type,
+				    uint16_t window_num,
+				    uint64_t starting_real_address,
+				    uint64_t starting_pci_address,
+				    uint64_t size);
+int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
+				    uint16_t window_type, uint16_t window_num,
+				    uint16_t segment_num);
+int64_t opal_pci_set_phb_table_memory(uint64_t phb_id, uint64_t rtt_addr,
+				      uint64_t ivt_addr, uint64_t ivt_len,
+				      uint64_t reject_array_addr,
+				      uint64_t peltv_addr);
+int64_t opal_pci_set_pe(uint64_t phb_id, uint64_t pe_number, uint64_t bus_dev_func,
+			uint8_t bus_compare, uint8_t dev_compare, uint8_t func_compare,
+			uint8_t pe_action);
+int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe, uint32_t child_pe,
+			   uint8_t state);
+int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number, uint32_t pe_number);
+int64_t opal_pci_set_mve_enable(uint64_t phb_id, uint32_t mve_number,
+				uint32_t state);
+int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number,
+				  uint8_t *p_bit, uint8_t *q_bit);
+int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number,
+				  uint8_t p_bit, uint8_t q_bit);
+int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
+int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
+			     uint32_t xive_num);
+int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
+			     __be32 *interrupt_source_number);
+int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
+			uint8_t msi_range, __be32 *msi_address,
+			__be32 *message_data);
+int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
+			uint32_t xive_num, uint8_t msi_range,
+			__be64 *msi_address, __be32 *message_data);
+int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
+int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
+int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
+int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number, uint16_t window_id,
+				   uint16_t tce_levels, uint64_t tce_table_addr,
+				   uint64_t tce_table_size, uint64_t tce_page_size);
+int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
+					uint16_t dma_window_number, uint64_t pci_start_addr,
+					uint64_t pci_mem_size);
+int64_t opal_pci_reset(uint64_t id, uint8_t reset_scope, uint8_t assert_state);
+
+int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer,
+				   uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
+				   uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
+				    uint64_t diag_buffer_len);
+int64_t opal_pci_fence_phb(uint64_t phb_id);
+int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
+int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
+int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
+int64_t opal_get_epow_status(__be16 *epow_status, __be16 *num_epow_classes);
+int64_t opal_get_dpo_status(__be64 *dpo_timeout);
+int64_t opal_set_system_attention_led(uint8_t led_action);
+int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
+			    __be16 *pci_error_type, __be16 *severity);
+int64_t opal_pci_poll(uint64_t id);
+int64_t opal_return_cpu(void);
+int64_t opal_check_token(uint64_t token);
+int64_t opal_reinit_cpus(uint64_t flags);
+
+int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
+int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
+
+int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+		       uint32_t addr, uint32_t data, uint32_t sz);
+int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+		      uint32_t addr, __be32 *data, uint32_t sz);
+
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
+int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_send_ack_elog(uint64_t log_id);
+void opal_resend_pending_logs(void);
+
+int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
+int64_t opal_manage_flash(uint8_t op);
+int64_t opal_update_flash(uint64_t blk_list);
+int64_t opal_dump_init(uint8_t dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
+int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
+int64_t opal_dump_ack(uint32_t dump_id);
+int64_t opal_dump_resend_notification(void);
+
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_write_oppanel_async(uint64_t token, oppanel_line_t *lines,
+					uint64_t num_lines);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
+int64_t opal_sync_host_reboot(void);
+int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
+		uint64_t length);
+int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
+		uint64_t length);
+int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
+int64_t opal_sensor_read_u64(u32 sensor_hndl, int token, __be64 *sensor_data);
+int64_t opal_handle_hmi(void);
+int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
+int64_t opal_unregister_dump_region(uint32_t id);
+int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
+int64_t opal_config_cpu_idle_state(uint64_t state, uint64_t flag);
+int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
+int64_t opal_pci_get_pbcq_tunnel_bar(uint64_t phb_id, uint64_t *addr);
+int64_t opal_pci_set_pbcq_tunnel_bar(uint64_t phb_id, uint64_t addr);
+int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
+		uint64_t msg_len);
+int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
+		uint64_t *msg_len);
+int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
+			 struct opal_i2c_request *oreq);
+int64_t opal_prd_msg(struct opal_prd_msg *msg);
+int64_t opal_leds_get_ind(char *loc_code, __be64 *led_mask,
+			  __be64 *led_value, __be64 *max_led_type);
+int64_t opal_leds_set_ind(uint64_t token, char *loc_code, const u64 led_mask,
+			  const u64 led_value, __be64 *max_led_type);
+
+int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
+		uint64_t size, uint64_t token);
+int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf,
+		uint64_t size, uint64_t token);
+int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size,
+		uint64_t token);
+int64_t opal_get_device_tree(uint32_t phandle, uint64_t buf, uint64_t len);
+int64_t opal_pci_get_presence_state(uint64_t id, uint64_t data);
+int64_t opal_pci_get_power_state(uint64_t id, uint64_t data);
+int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
+				 uint64_t data);
+int64_t opal_pci_poll2(uint64_t id, uint64_t data);
+
+int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_int_set_cppr(uint8_t cppr);
+int64_t opal_int_eoi(uint32_t xirr);
+int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
+int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
+			  uint32_t pe_num, uint32_t tce_size,
+			  uint64_t dma_addr, uint32_t npages);
+int64_t opal_nmmu_set_ptcr(uint64_t chip_id, uint64_t ptcr);
+int64_t opal_xive_reset(uint64_t version);
+int64_t opal_xive_get_irq_info(uint32_t girq,
+			       __be64 *out_flags,
+			       __be64 *out_eoi_page,
+			       __be64 *out_trig_page,
+			       __be32 *out_esb_shift,
+			       __be32 *out_src_chip);
+int64_t opal_xive_get_irq_config(uint32_t girq, __be64 *out_vp,
+				 uint8_t *out_prio, __be32 *out_lirq);
+int64_t opal_xive_set_irq_config(uint32_t girq, uint64_t vp, uint8_t prio,
+				 uint32_t lirq);
+int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
+				 __be64 *out_qpage,
+				 __be64 *out_qsize,
+				 __be64 *out_qeoi_page,
+				 __be32 *out_escalate_irq,
+				 __be64 *out_qflags);
+int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
+				 uint64_t qpage,
+				 uint64_t qsize,
+				 uint64_t qflags);
+int64_t opal_xive_donate_page(uint32_t chip_id, uint64_t addr);
+int64_t opal_xive_alloc_vp_block(uint32_t alloc_order);
+int64_t opal_xive_free_vp_block(uint64_t vp);
+int64_t opal_xive_get_vp_info(uint64_t vp,
+			      __be64 *out_flags,
+			      __be64 *out_cam_value,
+			      __be64 *out_report_cl_pair,
+			      __be32 *out_chip_id);
+int64_t opal_xive_set_vp_info(uint64_t vp,
+			      uint64_t flags,
+			      uint64_t report_cl_pair);
+int64_t opal_xive_allocate_irq(uint32_t chip_id);
+int64_t opal_xive_free_irq(uint32_t girq);
+int64_t opal_xive_sync(uint32_t type, uint32_t id);
+int64_t opal_xive_dump(uint32_t type, uint32_t id);
+int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target,
+			uint64_t desc, uint16_t pe_number);
+
+int64_t opal_imc_counters_init(uint32_t type, uint64_t address,
+							uint64_t cpu_pir);
+int64_t opal_imc_counters_start(uint32_t type, uint64_t cpu_pir);
+int64_t opal_imc_counters_stop(uint32_t type, uint64_t cpu_pir);
+
+int opal_get_powercap(u32 handle, int token, u32 *pcap);
+int opal_set_powercap(u32 handle, int token, u32 pcap);
+int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr);
+int opal_set_power_shift_ratio(u32 handle, int token, u32 psr);
+int opal_sensor_group_clear(u32 group_hndl, int token);
+int opal_sensor_group_enable(u32 group_hndl, int token, bool enable);
+int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct);
+
+s64 opal_signal_system_reset(s32 cpu);
+s64 opal_quiesce(u64 shutdown_type, s32 cpu);
+
+/* Internal functions */
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+				   int depth, void *data);
+extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
+				 const char *uname, int depth, void *data);
+extern void opal_configure_cores(void);
+
+extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
+extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_put_chars_atomic(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_flush_chars(uint32_t vtermno, bool wait);
+extern int opal_flush_console(uint32_t vtermno);
+
+extern void hvc_opal_init_early(void);
+
+extern int opal_notifier_register(struct notifier_block *nb);
+extern int opal_notifier_unregister(struct notifier_block *nb);
+
+extern int opal_message_notifier_register(enum opal_msg_type msg_type,
+						struct notifier_block *nb);
+extern int opal_message_notifier_unregister(enum opal_msg_type msg_type,
+					    struct notifier_block *nb);
+extern void opal_notifier_enable(void);
+extern void opal_notifier_disable(void);
+extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
+
+extern int opal_async_get_token_interruptible(void);
+extern int opal_async_release_token(int token);
+extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
+extern int opal_async_wait_response_interruptible(uint64_t token,
+		struct opal_msg *msg);
+extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
+extern int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data);
+extern int sensor_group_enable(u32 grp_hndl, bool enable);
+
+struct rtc_time;
+extern time64_t opal_get_boot_time(void);
+extern void opal_nvram_init(void);
+extern void opal_flash_update_init(void);
+extern void opal_flash_update_print_message(void);
+extern int opal_elog_init(void);
+extern void opal_platform_dump_init(void);
+extern void opal_sys_param_init(void);
+extern void opal_msglog_init(void);
+extern void opal_msglog_sysfs_init(void);
+extern int opal_async_comp_init(void);
+extern int opal_sensor_init(void);
+extern int opal_hmi_handler_init(void);
+extern int opal_event_init(void);
+
+extern int opal_machine_check(struct pt_regs *regs);
+extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
+extern int opal_hmi_exception_early(struct pt_regs *regs);
+extern int opal_handle_hmi_exception(struct pt_regs *regs);
+
+extern void opal_shutdown(void);
+extern int opal_resync_timebase(void);
+
+extern void opal_lpc_init(void);
+
+extern void opal_kmsg_init(void);
+
+extern int opal_event_request(unsigned int opal_event_nr);
+
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+					     unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
+extern int opal_error_code(int rc);
+
+ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count);
+
+static inline int opal_get_async_rc(struct opal_msg msg)
+{
+	if (msg.msg_type != OPAL_MSG_ASYNC_COMP)
+		return OPAL_PARAMETER;
+	else
+		return be64_to_cpu(msg.params[1]);
+}
+
+void opal_wake_poller(void);
+
+void opal_powercap_init(void);
+void opal_psr_init(void);
+void opal_sensor_groups_init(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
new file mode 100644
index 0000000..61fe5d6
--- /dev/null
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * Based on alpha version.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
+#define _ASM_POWERPC_OPROFILE_IMPL_H
+#ifdef __KERNEL__
+
+#define OP_MAX_COUNTER 8
+
+/* Per-counter configuration as set via oprofilefs.  */
+struct op_counter_config {
+	unsigned long enabled;
+	unsigned long event;
+	unsigned long count;
+	/* Classic doesn't support per-counter user/kernel selection */
+	unsigned long kernel;
+	unsigned long user;
+	unsigned long unit_mask;
+};
+
+/* System-wide configuration as set via oprofilefs.  */
+struct op_system_config {
+#ifdef CONFIG_PPC64
+	unsigned long mmcr0;
+	unsigned long mmcr1;
+	unsigned long mmcra;
+#ifdef CONFIG_OPROFILE_CELL
+	/* Register for oprofile user tool to check cell kernel profiling
+	 * support.
+	 */
+	unsigned long cell_support;
+#endif
+#endif
+	unsigned long enable_kernel;
+	unsigned long enable_user;
+};
+
+/* Per-arch configuration */
+struct op_powerpc_model {
+	int (*reg_setup) (struct op_counter_config *,
+			   struct op_system_config *,
+			   int num_counters);
+	int  (*cpu_setup) (struct op_counter_config *);
+	int  (*start) (struct op_counter_config *);
+	int  (*global_start) (struct op_counter_config *);
+	void (*stop) (void);
+	void (*global_stop) (void);
+	int (*sync_start)(void);
+	int (*sync_stop)(void);
+	void (*handle_interrupt) (struct pt_regs *,
+				  struct op_counter_config *);
+	int num_counters;
+};
+
+extern struct op_powerpc_model op_model_fsl_emb;
+extern struct op_powerpc_model op_model_power4;
+extern struct op_powerpc_model op_model_7450;
+extern struct op_powerpc_model op_model_cell;
+extern struct op_powerpc_model op_model_pa6t;
+
+
+/* All the classic PPC parts use these */
+static inline unsigned int classic_ctr_read(unsigned int i)
+{
+	switch(i) {
+	case 0:
+		return mfspr(SPRN_PMC1);
+	case 1:
+		return mfspr(SPRN_PMC2);
+	case 2:
+		return mfspr(SPRN_PMC3);
+	case 3:
+		return mfspr(SPRN_PMC4);
+	case 4:
+		return mfspr(SPRN_PMC5);
+	case 5:
+		return mfspr(SPRN_PMC6);
+
+/* No PPC32 chip has more than 6 so far */
+#ifdef CONFIG_PPC64
+	case 6:
+		return mfspr(SPRN_PMC7);
+	case 7:
+		return mfspr(SPRN_PMC8);
+#endif
+	default:
+		return 0;
+	}
+}
+
+static inline void classic_ctr_write(unsigned int i, unsigned int val)
+{
+	switch(i) {
+	case 0:
+		mtspr(SPRN_PMC1, val);
+		break;
+	case 1:
+		mtspr(SPRN_PMC2, val);
+		break;
+	case 2:
+		mtspr(SPRN_PMC3, val);
+		break;
+	case 3:
+		mtspr(SPRN_PMC4, val);
+		break;
+	case 4:
+		mtspr(SPRN_PMC5, val);
+		break;
+	case 5:
+		mtspr(SPRN_PMC6, val);
+		break;
+
+/* No PPC32 chip has more than 6, yet */
+#ifdef CONFIG_PPC64
+	case 6:
+		mtspr(SPRN_PMC7, val);
+		break;
+	case 7:
+		mtspr(SPRN_PMC8, val);
+		break;
+#endif
+	default:
+		break;
+	}
+}
+
+
+extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
new file mode 100644
index 0000000..ad4f161
--- /dev/null
+++ b/arch/powerpc/include/asm/paca.h
@@ -0,0 +1,272 @@
+/*
+ * This control block defines the PACA which defines the processor
+ * specific data for each logical processor on the system.
+ * There are some pointers defined that are utilized by PLIC.
+ *
+ * C 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_PACA_H
+#define _ASM_POWERPC_PACA_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC64
+
+#include <linux/string.h>
+#include <asm/types.h>
+#include <asm/lppaca.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+#ifdef CONFIG_PPC_BOOK3E
+#include <asm/exception-64e.h>
+#else
+#include <asm/exception-64s.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_asm.h>
+#endif
+#include <asm/accounting.h>
+#include <asm/hmi.h>
+#include <asm/cpuidle.h>
+#include <asm/atomic.h>
+
+register struct paca_struct *local_paca asm("r13");
+
+#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
+extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
+/*
+ * Add standard checks that preemption cannot occur when using get_paca():
+ * otherwise the paca_struct it points to may be the wrong one just after.
+ */
+#define get_paca()	((void) debug_smp_processor_id(), local_paca)
+#else
+#define get_paca()	local_paca
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+#define get_lppaca()	(get_paca()->lppaca_ptr)
+#endif
+
+#define get_slb_shadow()	(get_paca()->slb_shadow_ptr)
+
+struct task_struct;
+
+/*
+ * Defines the layout of the paca.
+ *
+ * This structure is not directly accessed by firmware or the service
+ * processor.
+ */
+struct paca_struct {
+#ifdef CONFIG_PPC_PSERIES
+	/*
+	 * Because hw_cpu_id, unlike other paca fields, is accessed
+	 * routinely from other CPUs (from the IRQ code), we stick to
+	 * read-only (after boot) fields in the first cacheline to
+	 * avoid cacheline bouncing.
+	 */
+
+	struct lppaca *lppaca_ptr;	/* Pointer to LpPaca for PLIC */
+#endif /* CONFIG_PPC_PSERIES */
+
+	/*
+	 * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c 
+	 * load lock_token and paca_index with a single lwz
+	 * instruction.  They must travel together and be properly
+	 * aligned.
+	 */
+#ifdef __BIG_ENDIAN__
+	u16 lock_token;			/* Constant 0x8000, used in locks */
+	u16 paca_index;			/* Logical processor number */
+#else
+	u16 paca_index;			/* Logical processor number */
+	u16 lock_token;			/* Constant 0x8000, used in locks */
+#endif
+
+	u64 kernel_toc;			/* Kernel TOC address */
+	u64 kernelbase;			/* Base address of kernel */
+	u64 kernel_msr;			/* MSR while running in kernel */
+	void *emergency_sp;		/* pointer to emergency stack */
+	u64 data_offset;		/* per cpu data offset */
+	s16 hw_cpu_id;			/* Physical processor number */
+	u8 cpu_start;			/* At startup, processor spins until */
+					/* this becomes non-zero. */
+	u8 kexec_state;		/* set when kexec down has irqs off */
+#ifdef CONFIG_PPC_BOOK3S_64
+	struct slb_shadow *slb_shadow_ptr;
+	struct dtl_entry *dispatch_log;
+	struct dtl_entry *dispatch_log_end;
+#endif
+	u64 dscr_default;		/* per-CPU default DSCR */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+	/*
+	 * Now, starting in cacheline 2, the exception save areas
+	 */
+	/* used for most interrupts/exceptions */
+	u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
+	u64 exslb[EX_SIZE];	/* used for SLB/segment table misses
+ 				 * on the linear mapping */
+	/* SLB related definitions */
+	u16 vmalloc_sllp;
+	u16 slb_cache_ptr;
+	u32 slb_cache[SLB_CACHE_ENTRIES];
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3E
+	u64 exgen[8] __aligned(0x40);
+	/* Keep pgd in the same cacheline as the start of extlb */
+	pgd_t *pgd __aligned(0x40); /* Current PGD */
+	pgd_t *kernel_pgd;		/* Kernel PGD */
+
+	/* Shared by all threads of a core -- points to tcd of first thread */
+	struct tlb_core_data *tcd_ptr;
+
+	/*
+	 * We can have up to 3 levels of reentrancy in the TLB miss handler,
+	 * in each of four exception levels (normal, crit, mcheck, debug).
+	 */
+	u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];
+	u64 exmc[8];		/* used for machine checks */
+	u64 excrit[8];		/* used for crit interrupts */
+	u64 exdbg[8];		/* used for debug interrupts */
+
+	/* Kernel stack pointers for use by special exceptions */
+	void *mc_kstack;
+	void *crit_kstack;
+	void *dbg_kstack;
+
+	struct tlb_core_data tcd;
+#endif /* CONFIG_PPC_BOOK3E */
+
+#ifdef CONFIG_PPC_BOOK3S
+	mm_context_id_t mm_ctx_id;
+#ifdef CONFIG_PPC_MM_SLICES
+	unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
+	unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
+	unsigned long mm_ctx_slb_addr_limit;
+#else
+	u16 mm_ctx_user_psize;
+	u16 mm_ctx_sllp;
+#endif
+#endif
+
+	/*
+	 * then miscellaneous read-write fields
+	 */
+	struct task_struct *__current;	/* Pointer to current */
+	u64 kstack;			/* Saved Kernel stack addr */
+	u64 stab_rr;			/* stab/slb round-robin counter */
+	u64 saved_r1;			/* r1 save for RTAS calls or PM or EE=0 */
+	u64 saved_msr;			/* MSR saved here by enter_rtas */
+	u16 trap_save;			/* Used when bad stack is encountered */
+	u8 irq_soft_mask;		/* mask for irq soft masking */
+	u8 irq_happened;		/* irq happened while soft-disabled */
+	u8 io_sync;			/* writel() needs spin_unlock sync */
+	u8 irq_work_pending;		/* IRQ_WORK interrupt while soft-disable */
+	u8 nap_state_lost;		/* NV GPR values lost in power7_idle */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	u8 pmcregs_in_use;		/* pseries puts this in lppaca */
+#endif
+	u64 sprg_vdso;			/* Saved user-visible sprg */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	u64 tm_scratch;                 /* TM scratch area for reclaim */
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+	/* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */
+	u32 *core_idle_state_ptr;
+	u8 thread_idle_state;		/* PNV_THREAD_RUNNING/NAP/SLEEP	*/
+	/* Mask to indicate thread id in core */
+	u8 thread_mask;
+	/* Mask to denote subcore sibling threads */
+	u8 subcore_sibling_mask;
+	/* Flag to request this thread not to stop */
+	atomic_t dont_stop;
+	/* The PSSCR value that the kernel requested before going to stop */
+	u64 requested_psscr;
+
+	/*
+	 * Save area for additional SPRs that need to be
+	 * saved/restored during cpuidle stop.
+	 */
+	struct stop_sprs stop_sprs;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+	/* Non-maskable exceptions that are not performance critical */
+	u64 exnmi[EX_SIZE];	/* used for system reset (nmi) */
+	u64 exmc[EX_SIZE];	/* used for machine checks */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+	/* Exclusive stacks for system reset and machine check exception. */
+	void *nmi_emergency_sp;
+	void *mc_emergency_sp;
+
+	u16 in_nmi;			/* In nmi handler */
+
+	/*
+	 * Flag to check whether we are in machine check early handler
+	 * and already using emergency stack.
+	 */
+	u16 in_mce;
+	u8 hmi_event_available;		/* HMI event is available */
+	u8 hmi_p9_special_emu;		/* HMI P9 special emulation */
+#endif
+	u8 ftrace_enabled;		/* Hard disable ftrace */
+
+	/* Stuff for accurate time accounting */
+	struct cpu_accounting_data accounting;
+	u64 dtl_ridx;			/* read index in dispatch log */
+	struct dtl_entry *dtl_curr;	/* pointer corresponding to dtl_ridx */
+
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+	/* We use this to store guest state in */
+	struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
+#endif
+	struct kvmppc_host_state kvm_hstate;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	/*
+	 * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
+	 * more details
+	 */
+	struct sibling_subcore_state *sibling_subcore_state;
+#endif
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+	/*
+	 * rfi fallback flush must be in its own cacheline to prevent
+	 * other paca data leaking into the L1d
+	 */
+	u64 exrfi[EX_SIZE] __aligned(0x80);
+	void *rfi_flush_fallback_area;
+	u64 l1d_flush_size;
+#endif
+#ifdef CONFIG_PPC_PSERIES
+	u8 *mce_data_buf;		/* buffer to hold per cpu rtas errlog */
+#endif /* CONFIG_PPC_PSERIES */
+} ____cacheline_aligned;
+
+extern void copy_mm_to_paca(struct mm_struct *mm);
+extern struct paca_struct **paca_ptrs;
+extern void initialise_paca(struct paca_struct *new_paca, int cpu);
+extern void setup_paca(struct paca_struct *new_paca);
+extern void allocate_paca_ptrs(void);
+extern void allocate_paca(int cpu);
+extern void free_unused_pacas(void);
+
+#else /* CONFIG_PPC64 */
+
+static inline void allocate_paca_ptrs(void) { };
+static inline void allocate_paca(int cpu) { };
+static inline void free_unused_pacas(void) { };
+
+#endif /* CONFIG_PPC64 */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PACA_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
new file mode 100644
index 0000000..f6a1265
--- /dev/null
+++ b/arch/powerpc/include/asm/page.h
@@ -0,0 +1,357 @@
+#ifndef _ASM_POWERPC_PAGE_H
+#define _ASM_POWERPC_PAGE_H
+
+/*
+ * Copyright (C) 2001,2005 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/kernel.h>
+#else
+#include <asm/types.h>
+#endif
+#include <asm/asm-const.h>
+
+/*
+ * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
+ * on PPC44x). For PPC64 we support either 4K or 64K software
+ * page size. When using 64K pages however, whether we are really supporting
+ * 64K pages in HW or not is irrelevant to those definitions.
+ */
+#if defined(CONFIG_PPC_256K_PAGES)
+#define PAGE_SHIFT		18
+#elif defined(CONFIG_PPC_64K_PAGES)
+#define PAGE_SHIFT		16
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define PAGE_SHIFT		14
+#else
+#define PAGE_SHIFT		12
+#endif
+
+#define PAGE_SIZE		(ASM_CONST(1) << PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HUGETLB_PAGE
+extern bool hugetlb_disabled;
+extern unsigned int HPAGE_SHIFT;
+#else
+#define HPAGE_SHIFT PAGE_SHIFT
+#endif
+#define HPAGE_SIZE		((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
+#define HUGE_MAX_HSTATE		(MMU_PAGE_COUNT-1)
+#endif
+
+/*
+ * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
+ * assign PAGE_MASK to a larger type it gets extended the way we want
+ * (i.e. with 1s in the high bits)
+ */
+#define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
+
+/*
+ * KERNELBASE is the virtual address of the start of the kernel, it's often
+ * the same as PAGE_OFFSET, but _might not be_.
+ *
+ * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
+ *
+ * PAGE_OFFSET is the virtual address of the start of lowmem.
+ *
+ * PHYSICAL_START is the physical address of the start of the kernel.
+ *
+ * MEMORY_START is the physical address of the start of lowmem.
+ *
+ * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
+ * ppc32 and based on how they are set we determine MEMORY_START.
+ *
+ * For the linear mapping the following equation should be true:
+ * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
+ *
+ * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
+ *
+ * There are two ways to determine a physical address from a virtual one:
+ * va = pa + PAGE_OFFSET - MEMORY_START
+ * va = pa + KERNELBASE - PHYSICAL_START
+ *
+ * If you want to know something's offset from the start of the kernel you
+ * should subtract KERNELBASE.
+ *
+ * If you want to test if something's a kernel address, use is_kernel_addr().
+ */
+
+#define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
+#define PAGE_OFFSET	ASM_CONST(CONFIG_PAGE_OFFSET)
+#define LOAD_OFFSET	ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
+
+#if defined(CONFIG_NONSTATIC_KERNEL)
+#ifndef __ASSEMBLY__
+
+extern phys_addr_t memstart_addr;
+extern phys_addr_t kernstart_addr;
+
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
+extern long long virt_phys_offset;
+#endif
+
+#endif /* __ASSEMBLY__ */
+#define PHYSICAL_START	kernstart_addr
+
+#else	/* !CONFIG_NONSTATIC_KERNEL */
+#define PHYSICAL_START	ASM_CONST(CONFIG_PHYSICAL_START)
+#endif
+
+/* See Description below for VIRT_PHYS_OFFSET */
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+#ifdef CONFIG_RELOCATABLE
+#define VIRT_PHYS_OFFSET virt_phys_offset
+#else
+#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
+#endif
+#endif
+
+#ifdef CONFIG_PPC64
+#define MEMORY_START	0UL
+#elif defined(CONFIG_NONSTATIC_KERNEL)
+#define MEMORY_START	memstart_addr
+#else
+#define MEMORY_START	(PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
+#endif
+
+#ifdef CONFIG_FLATMEM
+#define ARCH_PFN_OFFSET		((unsigned long)(MEMORY_START >> PAGE_SHIFT))
+#ifndef __ASSEMBLY__
+extern unsigned long max_mapnr;
+static inline bool pfn_valid(unsigned long pfn)
+{
+	unsigned long min_pfn = ARCH_PFN_OFFSET;
+
+	return pfn >= min_pfn && pfn < max_mapnr;
+}
+#endif
+#endif
+
+#define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
+#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * On hash the vmalloc and other regions alias to the kernel region when passed
+ * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
+ * return true for some vmalloc addresses, which is incorrect. So explicitly
+ * check that the address is in the kernel region.
+ */
+#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
+				pfn_valid(virt_to_pfn(kaddr)))
+#else
+#define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
+#endif
+
+/*
+ * On Book-E parts we need __va to parse the device tree and we can't
+ * determine MEMORY_START until then.  However we can determine PHYSICAL_START
+ * from information at hand (program counter, TLB lookup).
+ *
+ * On BookE with RELOCATABLE && PPC32
+ *
+ *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
+ *   address without any restriction on the page alignment.
+ *
+ *   We find the runtime address of _stext and relocate ourselves based on 
+ *   the following calculation:
+ *
+ *  	  virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
+ *  				MODULO(_stext.run,256M)
+ *   and create the following mapping:
+ *
+ * 	  ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
+ *
+ *   When we process relocations, we cannot depend on the
+ *   existing equation for the __va()/__pa() translations:
+ *
+ * 	   __va(x) = (x)  - PHYSICAL_START + KERNELBASE
+ *
+ *   Where:
+ *   	 PHYSICAL_START = kernstart_addr = Physical address of _stext
+ *  	 KERNELBASE = Compiled virtual address of _stext.
+ *
+ *   This formula holds true iff, kernel load address is TLB page aligned.
+ *
+ *   In our case, we need to also account for the shift in the kernel Virtual 
+ *   address.
+ *
+ *   E.g.,
+ *
+ *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
+ *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
+ *
+ *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
+ *                 = 0xbc100000 , which is wrong.
+ *
+ *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
+ *      	according to our mapping.
+ *
+ *   Hence we use the following formula to get the translations right:
+ *
+ * 	  __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
+ *
+ * 	  Where :
+ * 		PHYSICAL_START = dynamic load address.(kernstart_addr variable)
+ * 		Effective KERNELBASE = virtual_base =
+ * 				     = ALIGN_DOWN(KERNELBASE,256M) +
+ * 						MODULO(PHYSICAL_START,256M)
+ *
+ * 	To make the cost of __va() / __pa() more light weight, we introduce
+ * 	a new variable virt_phys_offset, which will hold :
+ *
+ * 	virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
+ * 			 = ALIGN_DOWN(KERNELBASE,256M) - 
+ * 			 	ALIGN_DOWN(PHYSICALSTART,256M)
+ *
+ * 	Hence :
+ *
+ * 	__va(x) = x - PHYSICAL_START + Effective KERNELBASE
+ * 		= x + virt_phys_offset
+ *
+ * 		and
+ * 	__pa(x) = x + PHYSICAL_START - Effective KERNELBASE
+ * 		= x - virt_phys_offset
+ * 		
+ * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
+ * the other definitions for __va & __pa.
+ */
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
+#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
+#else
+#ifdef CONFIG_PPC64
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
+#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+
+#else /* 32-bit, non book E */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
+#endif
+#endif
+
+/*
+ * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
+ * and needs to be executable.  This means the whole heap ends
+ * up being executable.
+ */
+#define VM_DATA_DEFAULT_FLAGS32 \
+	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+				 VM_READ | VM_WRITE | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_DATA_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifdef __powerpc64__
+#include <asm/page_64.h>
+#else
+#include <asm/page_32.h>
+#endif
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr, size)   __ALIGN_KERNEL(addr, size)
+#define _ALIGN_DOWN(addr, size)	((addr)&(~((typeof(addr))(size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr,size)     _ALIGN_UP(addr,size)
+
+/*
+ * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
+ * "kernelness", use is_kernel_addr() - it should do what you want.
+ */
+#ifdef CONFIG_PPC_BOOK3E_64
+#define is_kernel_addr(x)	((x) >= 0x8000000000000000ul)
+#else
+#define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
+#endif
+
+#ifndef CONFIG_PPC_BOOK3S_64
+/*
+ * Use the top bit of the higher-level page table entries to indicate whether
+ * the entries we point to contain hugepages.  This works because we know that
+ * the page tables live in kernel space.  If we ever decide to support having
+ * page tables at arbitrary addresses, this breaks and will have to change.
+ */
+#ifdef CONFIG_PPC64
+#define PD_HUGE 0x8000000000000000
+#else
+#define PD_HUGE 0x80000000
+#endif
+
+#else	/* CONFIG_PPC_BOOK3S_64 */
+/*
+ * Book3S 64 stores real addresses in the hugepd entries to
+ * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
+ */
+#define HUGEPD_ADDR_MASK	(0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+/*
+ * Some number of bits at the level of the page table that points to
+ * a hugepte are used to encode the size.  This masks those bits.
+ */
+#define HUGEPD_SHIFT_MASK     0x3f
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/pgtable-be-types.h>
+#else
+#include <asm/pgtable-types.h>
+#endif
+
+
+#ifndef CONFIG_HUGETLB_PAGE
+#define is_hugepd(pdep)		(0)
+#define pgd_huge(pgd)		(0)
+#endif /* CONFIG_HUGETLB_PAGE */
+
+struct page;
+extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
+extern void copy_user_page(void *to, void *from, unsigned long vaddr,
+		struct page *p);
+extern int page_is_ram(unsigned long pfn);
+extern int devmem_is_allowed(unsigned long pfn);
+
+#ifdef CONFIG_PPC_SMLPAR
+void arch_free_page(struct page *page, int order);
+#define HAVE_ARCH_FREE_PAGE
+#endif
+
+struct vm_area_struct;
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For BOOK3s 64 with 4k and 64K linux page size
+ * we want to use pointers, because the page table
+ * actually store pfn
+ */
+typedef pte_t *pgtable_t;
+#else
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
+typedef pte_t *pgtable_t;
+#else
+typedef struct page *pgtable_t;
+#endif
+#endif
+
+#include <asm-generic/memory_model.h>
+#endif /* __ASSEMBLY__ */
+#include <asm/slice.h>
+
+#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
new file mode 100644
index 0000000..5c378e9
--- /dev/null
+++ b/arch/powerpc/include/asm/page_32.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PAGE_32_H
+#define _ASM_POWERPC_PAGE_32_H
+
+#include <asm/cache.h>
+
+#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
+#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
+#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
+#endif
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS	VM_DATA_DEFAULT_FLAGS32
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
+#endif
+
+#ifdef CONFIG_PTE_64BIT
+#define PTE_FLAGS_OFFSET	4	/* offset of PTE flags, in bytes */
+#else
+#define PTE_FLAGS_OFFSET	0
+#endif
+
+#ifdef CONFIG_PPC_256K_PAGES
+#define PTE_SHIFT	(PAGE_SHIFT - PTE_T_LOG2 - 2)	/* 1/4 of a page */
+#else
+#define PTE_SHIFT	(PAGE_SHIFT - PTE_T_LOG2)	/* full page */
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
+ * physical addressing.
+ */
+#ifdef CONFIG_PTE_64BIT
+typedef unsigned long long pte_basic_t;
+#else
+typedef unsigned long pte_basic_t;
+#endif
+
+/*
+ * Clear page using the dcbz instruction, which doesn't cause any
+ * memory traffic (except to write out any cache lines which get
+ * displaced).  This only works on cacheable memory.
+ */
+static inline void clear_page(void *addr)
+{
+	unsigned int i;
+
+	for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
+		dcbz(addr);
+}
+extern void copy_page(void *to, void *from);
+
+#include <asm-generic/getorder.h>
+
+#define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
new file mode 100644
index 0000000..c0ce17e
--- /dev/null
+++ b/arch/powerpc/include/asm/page_64.h
@@ -0,0 +1,113 @@
+#ifndef _ASM_POWERPC_PAGE_64_H
+#define _ASM_POWERPC_PAGE_64_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/asm-const.h>
+
+/*
+ * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
+ * specific, every notion of page number shared with the firmware, TCEs,
+ * iommu, etc... still uses a page size of 4K.
+ */
+#define HW_PAGE_SHIFT		12
+#define HW_PAGE_SIZE		(ASM_CONST(1) << HW_PAGE_SHIFT)
+#define HW_PAGE_MASK		(~(HW_PAGE_SIZE-1))
+
+/*
+ * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
+ * HW_PAGE_SHIFT, that is 4K pages.
+ */
+#define PAGE_FACTOR		(PAGE_SHIFT - HW_PAGE_SHIFT)
+
+/* Segment size; normal 256M segments */
+#define SID_SHIFT		28
+#define SID_MASK		ASM_CONST(0xfffffffff)
+#define ESID_MASK		0xfffffffff0000000UL
+#define GET_ESID(x)		(((x) >> SID_SHIFT) & SID_MASK)
+
+/* 1T segments */
+#define SID_SHIFT_1T		40
+#define SID_MASK_1T		0xffffffUL
+#define ESID_MASK_1T		0xffffff0000000000UL
+#define GET_ESID_1T(x)		(((x) >> SID_SHIFT_1T) & SID_MASK_1T)
+
+#ifndef __ASSEMBLY__
+#include <asm/cache.h>
+
+typedef unsigned long pte_basic_t;
+
+static inline void clear_page(void *addr)
+{
+	unsigned long iterations;
+	unsigned long onex, twox, fourx, eightx;
+
+	iterations = ppc64_caches.l1d.blocks_per_page / 8;
+
+	/*
+	 * Some verisions of gcc use multiply instructions to
+	 * calculate the offsets so lets give it a hand to
+	 * do better.
+	 */
+	onex = ppc64_caches.l1d.block_size;
+	twox = onex << 1;
+	fourx = onex << 2;
+	eightx = onex << 3;
+
+	asm volatile(
+	"mtctr	%1	# clear_page\n\
+	.balign	16\n\
+1:	dcbz	0,%0\n\
+	dcbz	%3,%0\n\
+	dcbz	%4,%0\n\
+	dcbz	%5,%0\n\
+	dcbz	%6,%0\n\
+	dcbz	%7,%0\n\
+	dcbz	%8,%0\n\
+	dcbz	%9,%0\n\
+	add	%0,%0,%10\n\
+	bdnz+	1b"
+	: "=&r" (addr)
+	: "r" (iterations), "0" (addr), "b" (onex), "b" (twox),
+		"b" (twox+onex), "b" (fourx), "b" (fourx+onex),
+		"b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
+	: "ctr", "memory");
+}
+
+extern void copy_page(void *to, void *from);
+
+/* Log 2 of page table size */
+extern u64 ppc64_pft_size;
+
+#endif /* __ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS \
+	(is_32bit_task() ? \
+	 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+
+/*
+ * This is the default if a program doesn't have a PT_GNU_STACK
+ * program header entry. The PPC64 ELF ABI has a non executable stack
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+#define VM_STACK_DEFAULT_FLAGS32	(VM_READ | VM_WRITE | VM_EXEC | \
+					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
+					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS \
+	(is_32bit_task() ? \
+	 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
new file mode 100644
index 0000000..8abfb8f
--- /dev/null
+++ b/arch/powerpc/include/asm/parport.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000  Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_POWERPC_PARPORT_H
+#define _ASM_POWERPC_PARPORT_H
+#ifdef __KERNEL__
+
+#include <asm/prom.h>
+
+static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+	struct device_node *np;
+	const u32 *prop;
+	u32 io1, io2;
+	int propsize;
+	int count = 0;
+	int virq;
+
+	for_each_compatible_node(np, "parallel", "pnpPNP,400") {
+		prop = of_get_property(np, "reg", &propsize);
+		if (!prop || propsize > 6*sizeof(u32))
+			continue;
+		io1 = prop[1]; io2 = prop[2];
+
+		virq = irq_of_parse_and_map(np, 0);
+		if (!virq)
+			continue;
+
+		if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
+				!= NULL)
+			count++;
+	}
+	return count;
+}
+
+#endif /* __KERNEL__ */
+#endif /* !(_ASM_POWERPC_PARPORT_H) */
diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h
new file mode 100644
index 0000000..eafa5a5
--- /dev/null
+++ b/arch/powerpc/include/asm/pasemi_dma.h
@@ -0,0 +1,538 @@
+/*
+ * Copyright (C) 2006-2008 PA Semi, Inc
+ *
+ * Hardware register layout and descriptor formats for the on-board
+ * DMA engine on PA Semi PWRficient. Used by ethernet, function and security
+ * drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef ASM_PASEMI_DMA_H
+#define ASM_PASEMI_DMA_H
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+	u64 rx_sta[64];		/* RX channel status */
+	u64 tx_sta[20];		/* TX channel status */
+};
+
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+enum {
+	PAS_DMA_CAP_TXCH  = 0x44,	/* Transmit Channel Info      */
+	PAS_DMA_CAP_RXCH  = 0x48,	/* Transmit Channel Info      */
+	PAS_DMA_CAP_IFI	  = 0x4c,	/* Interface Info	      */
+	PAS_DMA_COM_TXCMD = 0x100,	/* Transmit Command Register  */
+	PAS_DMA_COM_TXSTA = 0x104,	/* Transmit Status Register   */
+	PAS_DMA_COM_RXCMD = 0x108,	/* Receive Command Register   */
+	PAS_DMA_COM_RXSTA = 0x10c,	/* Receive Status Register    */
+	PAS_DMA_COM_CFG   = 0x114,	/* Common config reg	      */
+	PAS_DMA_TXF_SFLG0 = 0x140,	/* Set flags                  */
+	PAS_DMA_TXF_SFLG1 = 0x144,	/* Set flags                  */
+	PAS_DMA_TXF_CFLG0 = 0x148,	/* Set flags                  */
+	PAS_DMA_TXF_CFLG1 = 0x14c,	/* Set flags                  */
+};
+
+
+#define PAS_DMA_CAP_TXCH_TCHN_M	0x00ff0000 /* # of TX channels */
+#define PAS_DMA_CAP_TXCH_TCHN_S	16
+
+#define PAS_DMA_CAP_RXCH_RCHN_M	0x00ff0000 /* # of RX channels */
+#define PAS_DMA_CAP_RXCH_RCHN_S	16
+
+#define PAS_DMA_CAP_IFI_IOFF_M	0xff000000 /* Cfg reg for intf pointers */
+#define PAS_DMA_CAP_IFI_IOFF_S	24
+#define PAS_DMA_CAP_IFI_NIN_M	0x00ff0000 /* # of interfaces */
+#define PAS_DMA_CAP_IFI_NIN_S	16
+
+#define PAS_DMA_COM_TXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
+
+
+/* Per-interface and per-channel registers */
+#define _PAS_DMA_RXINT_STRIDE		0x20
+#define PAS_DMA_RXINT_RCMDSTA(i)	(0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_RCMDSTA_EN	0x00000001
+#define    PAS_DMA_RXINT_RCMDSTA_ST	0x00000002
+#define    PAS_DMA_RXINT_RCMDSTA_MBT	0x00000008
+#define    PAS_DMA_RXINT_RCMDSTA_MDR	0x00000010
+#define    PAS_DMA_RXINT_RCMDSTA_MOO	0x00000020
+#define    PAS_DMA_RXINT_RCMDSTA_MBP	0x00000040
+#define    PAS_DMA_RXINT_RCMDSTA_BT	0x00000800
+#define    PAS_DMA_RXINT_RCMDSTA_DR	0x00001000
+#define    PAS_DMA_RXINT_RCMDSTA_OO	0x00002000
+#define    PAS_DMA_RXINT_RCMDSTA_BP	0x00004000
+#define    PAS_DMA_RXINT_RCMDSTA_TB	0x00008000
+#define    PAS_DMA_RXINT_RCMDSTA_ACT	0x00010000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_M	0xfffe0000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_S	17
+#define PAS_DMA_RXINT_CFG(i)		(0x204+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_CFG_RBP	0x80000000
+#define    PAS_DMA_RXINT_CFG_ITRR	0x40000000
+#define    PAS_DMA_RXINT_CFG_DHL_M	0x07000000
+#define    PAS_DMA_RXINT_CFG_DHL_S	24
+#define    PAS_DMA_RXINT_CFG_DHL(x)	(((x) << PAS_DMA_RXINT_CFG_DHL_S) & \
+					 PAS_DMA_RXINT_CFG_DHL_M)
+#define    PAS_DMA_RXINT_CFG_ITR	0x00400000
+#define    PAS_DMA_RXINT_CFG_LW		0x00200000
+#define    PAS_DMA_RXINT_CFG_L2		0x00100000
+#define    PAS_DMA_RXINT_CFG_HEN	0x00080000
+#define    PAS_DMA_RXINT_CFG_WIF	0x00000002
+#define    PAS_DMA_RXINT_CFG_WIL	0x00000001
+
+#define PAS_DMA_RXINT_INCR(i)		(0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_INCR_INCR_M	0x0000ffff
+#define    PAS_DMA_RXINT_INCR_INCR_S	0
+#define    PAS_DMA_RXINT_INCR_INCR(x)	((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i)		(0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEL_BRBL(x)	((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i)		(0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEU_BRBH(x)	((x) & 0xfff)
+#define    PAS_DMA_RXINT_BASEU_SIZ_M	0x3fff0000	/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXINT_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXINT_BASEU_SIZ(x)	(((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+					 PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
+#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
+#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
+#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
+#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
+#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
+#define    PAS_DMA_TXCHAN_TCMDSTA_SZ	0x00000800
+#define    PAS_DMA_TXCHAN_TCMDSTA_DB	0x00000400
+#define    PAS_DMA_TXCHAN_TCMDSTA_DE	0x00000200
+#define    PAS_DMA_TXCHAN_TCMDSTA_DA	0x00000100
+#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TY_COPY	0x00000001	/* Type = copy only */
+#define    PAS_DMA_TXCHAN_CFG_TY_FUNC	0x00000002	/* Type = function */
+#define    PAS_DMA_TXCHAN_CFG_TY_XOR	0x00000003	/* Type = xor only */
+#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
+#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
+#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+					 PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define    PAS_DMA_TXCHAN_CFG_LPDQ	0x00000800
+#define    PAS_DMA_TXCHAN_CFG_LPSQ	0x00000400
+#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000003c0
+#define    PAS_DMA_TXCHAN_CFG_WT_S	6
+#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+					 PAS_DMA_TXCHAN_CFG_WT_M)
+#define    PAS_DMA_TXCHAN_CFG_TRD	0x00010000	/* translate data */
+#define    PAS_DMA_TXCHAN_CFG_TRR	0x00008000	/* translate rings */
+#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
+#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
+#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
+#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_RXCHAN_CCMDSTA	0x800	/* Command / Status		*/
+#define _PAS_DMA_RXCHAN_CFG	0x804	/* Configuration		*/
+#define _PAS_DMA_RXCHAN_INCR	0x810	/* Descriptor increment		*/
+#define _PAS_DMA_RXCHAN_CNT	0x814	/* Descriptor count/offset	*/
+#define _PAS_DMA_RXCHAN_BASEL	0x818	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_RXCHAN_BASEU	0x81c	/*			(high)	*/
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ACT	0x00010000	/* Active */
+#define    PAS_DMA_RXCHAN_CCMDSTA_DU	0x00020000
+#define    PAS_DMA_RXCHAN_CCMDSTA_OD	0x00002000
+#define    PAS_DMA_RXCHAN_CCMDSTA_FD	0x00001000
+#define    PAS_DMA_RXCHAN_CCMDSTA_DT	0x00000800
+#define PAS_DMA_RXCHAN_CFG(c)     (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CFG_CTR	0x00000400
+#define    PAS_DMA_RXCHAN_CFG_HBU_M	0x00000380
+#define    PAS_DMA_RXCHAN_CFG_HBU_S	7
+#define    PAS_DMA_RXCHAN_CFG_HBU(x)	(((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+					 PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c)    (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c)   (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c)   (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_RXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+#define    PAS_STATUS_PCNT_M		0x000000000000ffffull
+#define    PAS_STATUS_PCNT_S		0
+#define    PAS_STATUS_DCNT_M		0x00000000ffff0000ull
+#define    PAS_STATUS_DCNT_S		16
+#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000ull
+#define    PAS_STATUS_BPCNT_S		32
+#define    PAS_STATUS_CAUSE_M		0xf000000000000000ull
+#define    PAS_STATUS_TIMER		0x1000000000000000ull
+#define    PAS_STATUS_ERROR		0x2000000000000000ull
+#define    PAS_STATUS_SOFT		0x4000000000000000ull
+#define    PAS_STATUS_INT		0x8000000000000000ull
+
+#define PAS_IOB_COM_PKTHDRCNT		0x120
+#define    PAS_IOB_COM_PKTHDRCNT_PKTHDR1_M	0x0fff0000
+#define    PAS_IOB_COM_PKTHDRCNT_PKTHDR1_S	16
+#define    PAS_IOB_COM_PKTHDRCNT_PKTHDR0_M	0x00000fff
+#define    PAS_IOB_COM_PKTHDRCNT_PKTHDR0_S	0
+
+#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	16
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	16
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+/* Transmit descriptor fields */
+#define	XCT_MACTX_T		0x8000000000000000ull
+#define	XCT_MACTX_ST		0x4000000000000000ull
+#define XCT_MACTX_NORES		0x0000000000000000ull
+#define XCT_MACTX_8BRES		0x1000000000000000ull
+#define XCT_MACTX_24BRES	0x2000000000000000ull
+#define XCT_MACTX_40BRES	0x3000000000000000ull
+#define XCT_MACTX_I		0x0800000000000000ull
+#define XCT_MACTX_O		0x0400000000000000ull
+#define XCT_MACTX_E		0x0200000000000000ull
+#define XCT_MACTX_VLAN_M	0x0180000000000000ull
+#define XCT_MACTX_VLAN_NOP	0x0000000000000000ull
+#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000ull
+#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
+#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
+#define XCT_MACTX_CRC_M		0x0060000000000000ull
+#define XCT_MACTX_CRC_NOP	0x0000000000000000ull
+#define XCT_MACTX_CRC_INSERT	0x0020000000000000ull
+#define XCT_MACTX_CRC_PAD	0x0040000000000000ull
+#define XCT_MACTX_CRC_REPLACE	0x0060000000000000ull
+#define XCT_MACTX_SS		0x0010000000000000ull
+#define XCT_MACTX_LLEN_M	0x00007fff00000000ull
+#define XCT_MACTX_LLEN_S	32ull
+#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & \
+				 XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M		0x00000000f8000000ull
+#define XCT_MACTX_IPH_S		27ull
+#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & \
+				 XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M		0x0000000007c00000ull
+#define XCT_MACTX_IPO_S		22ull
+#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & \
+				 XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M	0x0000000000000060ull
+#define XCT_MACTX_CSUM_NOP	0x0000000000000000ull
+#define XCT_MACTX_CSUM_TCP	0x0000000000000040ull
+#define XCT_MACTX_CSUM_UDP	0x0000000000000060ull
+#define XCT_MACTX_V6		0x0000000000000010ull
+#define XCT_MACTX_C		0x0000000000000004ull
+#define XCT_MACTX_AL2		0x0000000000000002ull
+
+/* Receive descriptor fields */
+#define	XCT_MACRX_T		0x8000000000000000ull
+#define	XCT_MACRX_ST		0x4000000000000000ull
+#define XCT_MACRX_RR_M		0x3000000000000000ull
+#define XCT_MACRX_RR_NORES	0x0000000000000000ull
+#define XCT_MACRX_RR_8BRES	0x1000000000000000ull
+#define XCT_MACRX_O		0x0400000000000000ull
+#define XCT_MACRX_E		0x0200000000000000ull
+#define XCT_MACRX_FF		0x0100000000000000ull
+#define XCT_MACRX_PF		0x0080000000000000ull
+#define XCT_MACRX_OB		0x0040000000000000ull
+#define XCT_MACRX_OD		0x0020000000000000ull
+#define XCT_MACRX_FS		0x0010000000000000ull
+#define XCT_MACRX_NB_M		0x000fc00000000000ull
+#define XCT_MACRX_NB_S		46ULL
+#define XCT_MACRX_NB(x)		((((long)(x)) << XCT_MACRX_NB_S) & \
+				 XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M	0x00003fff00000000ull
+#define XCT_MACRX_LLEN_S	32ULL
+#define XCT_MACRX_LLEN(x)	((((long)(x)) << XCT_MACRX_LLEN_S) & \
+				 XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC		0x0000000080000000ull
+#define XCT_MACRX_LEN_M		0x0000000060000000ull
+#define XCT_MACRX_LEN_TOOSHORT	0x0000000020000000ull
+#define XCT_MACRX_LEN_BELOWMIN	0x0000000040000000ull
+#define XCT_MACRX_LEN_TRUNC	0x0000000060000000ull
+#define XCT_MACRX_CAST_M	0x0000000018000000ull
+#define XCT_MACRX_CAST_UNI	0x0000000000000000ull
+#define XCT_MACRX_CAST_MULTI	0x0000000008000000ull
+#define XCT_MACRX_CAST_BROAD	0x0000000010000000ull
+#define XCT_MACRX_CAST_PAUSE	0x0000000018000000ull
+#define XCT_MACRX_VLC_M		0x0000000006000000ull
+#define XCT_MACRX_FM		0x0000000001000000ull
+#define XCT_MACRX_HTY_M		0x0000000000c00000ull
+#define XCT_MACRX_HTY_IPV4_OK	0x0000000000000000ull
+#define XCT_MACRX_HTY_IPV6 	0x0000000000400000ull
+#define XCT_MACRX_HTY_IPV4_BAD	0x0000000000800000ull
+#define XCT_MACRX_HTY_NONIP	0x0000000000c00000ull
+#define XCT_MACRX_IPP_M		0x00000000003f0000ull
+#define XCT_MACRX_IPP_S		16
+#define XCT_MACRX_CSUM_M	0x000000000000ffffull
+#define XCT_MACRX_CSUM_S	0
+
+#define XCT_PTR_T		0x8000000000000000ull
+#define XCT_PTR_LEN_M		0x7ffff00000000000ull
+#define XCT_PTR_LEN_S		44
+#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & \
+				 XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M		0x00000fffffffffffull
+#define XCT_PTR_ADDR_S		0
+#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & \
+				 XCT_PTR_ADDR_M)
+
+/* Receive interface 8byte result fields */
+#define XCT_RXRES_8B_L4O_M	0xff00000000000000ull
+#define XCT_RXRES_8B_L4O_S	56
+#define XCT_RXRES_8B_RULE_M	0x00ffff0000000000ull
+#define XCT_RXRES_8B_RULE_S	40
+#define XCT_RXRES_8B_EVAL_M	0x000000ffff000000ull
+#define XCT_RXRES_8B_EVAL_S	24
+#define XCT_RXRES_8B_HTYPE_M	0x0000000000f00000ull
+#define XCT_RXRES_8B_HASH_M	0x00000000000fffffull
+#define XCT_RXRES_8B_HASH_S	0
+
+/* Receive interface buffer fields */
+#define XCT_RXB_LEN_M		0x0ffff00000000000ull
+#define XCT_RXB_LEN_S		44
+#define XCT_RXB_LEN(x)		((((long)(x)) << XCT_RXB_LEN_S) & \
+				 XCT_RXB_LEN_M)
+#define XCT_RXB_ADDR_M		0x00000fffffffffffull
+#define XCT_RXB_ADDR_S		0
+#define XCT_RXB_ADDR(x)		((((long)(x)) << XCT_RXB_ADDR_S) & \
+				 XCT_RXB_ADDR_M)
+
+/* Copy descriptor fields */
+#define XCT_COPY_T		0x8000000000000000ull
+#define XCT_COPY_ST		0x4000000000000000ull
+#define XCT_COPY_RR_M		0x3000000000000000ull
+#define XCT_COPY_RR_NORES	0x0000000000000000ull
+#define XCT_COPY_RR_8BRES	0x1000000000000000ull
+#define XCT_COPY_RR_24BRES	0x2000000000000000ull
+#define XCT_COPY_RR_40BRES	0x3000000000000000ull
+#define XCT_COPY_I		0x0800000000000000ull
+#define XCT_COPY_O		0x0400000000000000ull
+#define XCT_COPY_E		0x0200000000000000ull
+#define XCT_COPY_STY_ZERO	0x01c0000000000000ull
+#define XCT_COPY_DTY_PREF	0x0038000000000000ull
+#define XCT_COPY_LLEN_M		0x0007ffff00000000ull
+#define XCT_COPY_LLEN_S		32
+#define XCT_COPY_LLEN(x)	((((long)(x)) << XCT_COPY_LLEN_S) & \
+				 XCT_COPY_LLEN_M)
+#define XCT_COPY_SE		0x0000000000000001ull
+
+/* Function descriptor fields */
+#define XCT_FUN_T		0x8000000000000000ull
+#define XCT_FUN_ST		0x4000000000000000ull
+#define XCT_FUN_RR_M		0x3000000000000000ull
+#define XCT_FUN_RR_NORES	0x0000000000000000ull
+#define XCT_FUN_RR_8BRES	0x1000000000000000ull
+#define XCT_FUN_RR_24BRES	0x2000000000000000ull
+#define XCT_FUN_RR_40BRES	0x3000000000000000ull
+#define XCT_FUN_I		0x0800000000000000ull
+#define XCT_FUN_O		0x0400000000000000ull
+#define XCT_FUN_E		0x0200000000000000ull
+#define XCT_FUN_FUN_M		0x01c0000000000000ull
+#define XCT_FUN_FUN_S		54
+#define XCT_FUN_FUN(x)		((((long)(x)) << XCT_FUN_FUN_S) & XCT_FUN_FUN_M)
+#define XCT_FUN_CRM_M		0x0038000000000000ull
+#define XCT_FUN_CRM_NOP		0x0000000000000000ull
+#define XCT_FUN_CRM_SIG		0x0008000000000000ull
+#define XCT_FUN_LLEN_M		0x0007ffff00000000ull
+#define XCT_FUN_LLEN_S		32
+#define XCT_FUN_LLEN(x)		((((long)(x)) << XCT_FUN_LLEN_S) & XCT_FUN_LLEN_M)
+#define XCT_FUN_SHL_M		0x00000000f8000000ull
+#define XCT_FUN_SHL_S		27
+#define XCT_FUN_SHL(x)		((((long)(x)) << XCT_FUN_SHL_S) & XCT_FUN_SHL_M)
+#define XCT_FUN_CHL_M		0x0000000007c00000ull
+#define XCT_FUN_HSZ_M		0x00000000003c0000ull
+#define XCT_FUN_ALG_M		0x0000000000038000ull
+#define XCT_FUN_HP		0x0000000000004000ull
+#define XCT_FUN_BCM_M		0x0000000000003800ull
+#define XCT_FUN_BCP_M		0x0000000000000600ull
+#define XCT_FUN_SIG_M		0x00000000000001f0ull
+#define XCT_FUN_SIG_TCP4	0x0000000000000140ull
+#define XCT_FUN_SIG_TCP6	0x0000000000000150ull
+#define XCT_FUN_SIG_UDP4	0x0000000000000160ull
+#define XCT_FUN_SIG_UDP6	0x0000000000000170ull
+#define XCT_FUN_A		0x0000000000000008ull
+#define XCT_FUN_C		0x0000000000000004ull
+#define XCT_FUN_AL2		0x0000000000000002ull
+#define XCT_FUN_SE		0x0000000000000001ull
+
+/* Function descriptor 8byte result fields */
+#define XCT_FUNRES_8B_CS_M	0x0000ffff00000000ull
+#define XCT_FUNRES_8B_CS_S	32
+#define XCT_FUNRES_8B_CRC_M	0x00000000ffffffffull
+#define XCT_FUNRES_8B_CRC_S	0
+
+/* Control descriptor fields */
+#define CTRL_CMD_T		0x8000000000000000ull
+#define CTRL_CMD_META_EVT	0x2000000000000000ull
+#define CTRL_CMD_O		0x0400000000000000ull
+#define CTRL_CMD_ETYPE_M	0x0038000000000000ull
+#define CTRL_CMD_ETYPE_EXT	0x0000000000000000ull
+#define CTRL_CMD_ETYPE_WSET	0x0020000000000000ull
+#define CTRL_CMD_ETYPE_WCLR	0x0028000000000000ull
+#define CTRL_CMD_ETYPE_SET	0x0030000000000000ull
+#define CTRL_CMD_ETYPE_CLR	0x0038000000000000ull
+#define CTRL_CMD_REG_M		0x000000000000007full
+#define CTRL_CMD_REG_S		0
+#define CTRL_CMD_REG(x)		((((long)(x)) << CTRL_CMD_REG_S) & \
+				 CTRL_CMD_REG_M)
+
+
+
+/* Prototypes for the shared DMA functions in the platform code. */
+
+/* DMA TX Channel type. Right now only limitations used are event types 0/1,
+ * for event-triggered DMA transactions.
+ */
+
+enum pasemi_dmachan_type {
+	RXCHAN = 0,		/* Any RX chan */
+	TXCHAN = 1,		/* Any TX chan */
+	TXCHAN_EVT0 = 0x1001,	/* TX chan in event class 0 (chan 0-9) */
+	TXCHAN_EVT1 = 0x2001,	/* TX chan in event class 1 (chan 10-19) */
+};
+
+struct pasemi_dmachan {
+	int		 chno;		/* Channel number */
+	enum pasemi_dmachan_type chan_type;	/* TX / RX */
+	u64		*status;	/* Ptr to cacheable status */
+	int		 irq;		/* IRQ used by channel */
+	unsigned int	 ring_size;	/* size of allocated ring */
+	dma_addr_t	 ring_dma;	/* DMA address for ring */
+	u64		*ring_virt;	/* Virt address for ring */
+	void		*priv;		/* Ptr to start of client struct */
+};
+
+/* Read/write the different registers in the I/O Bridge, Ethernet
+ * and DMA Controller
+ */
+extern unsigned int pasemi_read_iob_reg(unsigned int reg);
+extern void pasemi_write_iob_reg(unsigned int reg, unsigned int val);
+
+extern unsigned int pasemi_read_mac_reg(int intf, unsigned int reg);
+extern void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val);
+
+extern unsigned int pasemi_read_dma_reg(unsigned int reg);
+extern void pasemi_write_dma_reg(unsigned int reg, unsigned int val);
+
+/* Channel management routines */
+
+extern void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
+				   int total_size, int offset);
+extern void pasemi_dma_free_chan(struct pasemi_dmachan *chan);
+
+extern void pasemi_dma_start_chan(const struct pasemi_dmachan *chan,
+				  const u32 cmdsta);
+extern int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan);
+
+/* Common routines to allocate rings and buffers */
+
+extern int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size);
+extern void pasemi_dma_free_ring(struct pasemi_dmachan *chan);
+
+extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
+				  dma_addr_t *handle);
+extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
+				dma_addr_t *handle);
+
+/* Routines to allocate flags (events) for channel synchronization */
+extern int  pasemi_dma_alloc_flag(void);
+extern void pasemi_dma_free_flag(int flag);
+extern void pasemi_dma_set_flag(int flag);
+extern void pasemi_dma_clear_flag(int flag);
+
+/* Routines to allocate function engines */
+extern int  pasemi_dma_alloc_fun(void);
+extern void pasemi_dma_free_fun(int fun);
+
+/* Initialize the library, must be called before any other functions */
+extern int pasemi_dma_init(void);
+
+#endif /* ASM_PASEMI_DMA_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
new file mode 100644
index 0000000..94d4490
--- /dev/null
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -0,0 +1,295 @@
+#ifndef _ASM_POWERPC_PCI_BRIDGE_H
+#define _ASM_POWERPC_PCI_BRIDGE_H
+#ifdef __KERNEL__
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+
+struct device_node;
+
+/*
+ * PCI controller operations
+ */
+struct pci_controller_ops {
+	void		(*dma_dev_setup)(struct pci_dev *pdev);
+	void		(*dma_bus_setup)(struct pci_bus *bus);
+
+	int		(*probe_mode)(struct pci_bus *bus);
+
+	/* Called when pci_enable_device() is called. Returns true to
+	 * allow assignment/enabling of the device. */
+	bool		(*enable_device_hook)(struct pci_dev *pdev);
+
+	void		(*disable_device)(struct pci_dev *pdev);
+
+	void		(*release_device)(struct pci_dev *pdev);
+
+	/* Called during PCI resource reassignment */
+	resource_size_t (*window_alignment)(struct pci_bus *bus,
+					    unsigned long type);
+	void		(*setup_bridge)(struct pci_bus *bus,
+					unsigned long type);
+	void		(*reset_secondary_bus)(struct pci_dev *pdev);
+
+#ifdef CONFIG_PCI_MSI
+	int		(*setup_msi_irqs)(struct pci_dev *pdev,
+					  int nvec, int type);
+	void		(*teardown_msi_irqs)(struct pci_dev *pdev);
+#endif
+
+	int             (*dma_set_mask)(struct pci_dev *pdev, u64 dma_mask);
+	u64		(*dma_get_required_mask)(struct pci_dev *pdev);
+
+	void		(*shutdown)(struct pci_controller *hose);
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+	struct pci_bus *bus;
+	char is_dynamic;
+#ifdef CONFIG_PPC64
+	int node;
+#endif
+	struct device_node *dn;
+	struct list_head list_node;
+	struct device *parent;
+
+	int first_busno;
+	int last_busno;
+	int self_busno;
+	struct resource busn;
+
+	void __iomem *io_base_virt;
+#ifdef CONFIG_PPC64
+	void *io_base_alloc;
+#endif
+	resource_size_t io_base_phys;
+	resource_size_t pci_io_size;
+
+	/* Some machines have a special region to forward the ISA
+	 * "memory" cycles such as VGA memory regions. Left to 0
+	 * if unsupported
+	 */
+	resource_size_t	isa_mem_phys;
+	resource_size_t	isa_mem_size;
+
+	struct pci_controller_ops controller_ops;
+	struct pci_ops *ops;
+	unsigned int __iomem *cfg_addr;
+	void __iomem *cfg_data;
+
+	/*
+	 * Used for variants of PCI indirect handling and possible quirks:
+	 *  SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
+	 *  EXT_REG - provides access to PCI-e extended registers
+	 *  SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS
+	 *   on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
+	 *   to determine which bus number to match on when generating type0
+	 *   config cycles
+	 *  NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
+	 *   hanging if we don't have link and try to do config cycles to
+	 *   anything but the PHB.  Only allow talking to the PHB if this is
+	 *   set.
+	 *  BIG_ENDIAN - cfg_addr is a big endian register
+	 *  BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs on
+	 *   the PLB4.  Effectively disable MRM commands by setting this.
+	 *  FSL_CFG_REG_LINK - Freescale controller version in which the PCIe
+	 *   link status is in a RC PCIe cfg register (vs being a SoC register)
+	 */
+#define PPC_INDIRECT_TYPE_SET_CFG_TYPE		0x00000001
+#define PPC_INDIRECT_TYPE_EXT_REG		0x00000002
+#define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS	0x00000004
+#define PPC_INDIRECT_TYPE_NO_PCIE_LINK		0x00000008
+#define PPC_INDIRECT_TYPE_BIG_ENDIAN		0x00000010
+#define PPC_INDIRECT_TYPE_BROKEN_MRM		0x00000020
+#define PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK	0x00000040
+	u32 indirect_type;
+	/* Currently, we limit ourselves to 1 IO range and 3 mem
+	 * ranges since the common pci_bus structure can't handle more
+	 */
+	struct resource	io_resource;
+	struct resource mem_resources[3];
+	resource_size_t mem_offset[3];
+	int global_number;		/* PCI domain number */
+
+	resource_size_t dma_window_base_cur;
+	resource_size_t dma_window_size;
+
+#ifdef CONFIG_PPC64
+	unsigned long buid;
+	struct pci_dn *pci_data;
+#endif	/* CONFIG_PPC64 */
+
+	void *private_data;
+};
+
+/* These are used for config access before all the PCI probing
+   has been done. */
+extern int early_read_config_byte(struct pci_controller *hose, int bus,
+			int dev_fn, int where, u8 *val);
+extern int early_read_config_word(struct pci_controller *hose, int bus,
+			int dev_fn, int where, u16 *val);
+extern int early_read_config_dword(struct pci_controller *hose, int bus,
+			int dev_fn, int where, u32 *val);
+extern int early_write_config_byte(struct pci_controller *hose, int bus,
+			int dev_fn, int where, u8 val);
+extern int early_write_config_word(struct pci_controller *hose, int bus,
+			int dev_fn, int where, u16 val);
+extern int early_write_config_dword(struct pci_controller *hose, int bus,
+			int dev_fn, int where, u32 val);
+
+extern int early_find_capability(struct pci_controller *hose, int bus,
+				 int dev_fn, int cap);
+
+extern void setup_indirect_pci(struct pci_controller* hose,
+			       resource_size_t cfg_addr,
+			       resource_size_t cfg_data, u32 flags);
+
+extern int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
+				int offset, int len, u32 *val);
+
+extern int __indirect_read_config(struct pci_controller *hose,
+				  unsigned char bus_number, unsigned int devfn,
+				  int offset, int len, u32 *val);
+
+extern int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
+				 int offset, int len, u32 val);
+
+static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
+{
+	return bus->sysdata;
+}
+
+#ifndef CONFIG_PPC64
+
+extern int pci_device_from_OF_node(struct device_node *node,
+				   u8 *bus, u8 *devfn);
+extern void pci_create_OF_bus_map(void);
+
+#else	/* CONFIG_PPC64 */
+
+/*
+ * PCI stuff, for nodes representing PCI devices, pointed to
+ * by device_node->data.
+ */
+struct iommu_table;
+
+struct pci_dn {
+	int     flags;
+#define PCI_DN_FLAG_IOV_VF	0x01
+
+	int	busno;			/* pci bus number */
+	int	devfn;			/* pci device and function number */
+	int	vendor_id;		/* Vendor ID */
+	int	device_id;		/* Device ID */
+	int	class_code;		/* Device class code */
+
+	struct  pci_dn *parent;
+	struct  pci_controller *phb;	/* for pci devices */
+	struct	iommu_table_group *table_group;	/* for phb's or bridges */
+
+	int	pci_ext_config_space;	/* for pci devices */
+#ifdef CONFIG_EEH
+	struct eeh_dev *edev;		/* eeh device */
+#endif
+#define IODA_INVALID_PE		0xFFFFFFFF
+	unsigned int pe_number;
+#ifdef CONFIG_PCI_IOV
+	int     vf_index;		/* VF index in the PF */
+	u16     vfs_expanded;		/* number of VFs IOV BAR expanded */
+	u16     num_vfs;		/* number of VFs enabled*/
+	unsigned int *pe_num_map;	/* PE# for the first VF PE or array */
+	bool    m64_single_mode;	/* Use M64 BAR in Single Mode */
+#define IODA_INVALID_M64        (-1)
+	int     (*m64_map)[PCI_SRIOV_NUM_BARS];	/* Only used on powernv */
+	int     last_allow_rc;			/* Only used on pseries */
+#endif /* CONFIG_PCI_IOV */
+	int	mps;			/* Maximum Payload Size */
+	struct list_head child_list;
+	struct list_head list;
+	struct resource holes[PCI_SRIOV_NUM_BARS];
+};
+
+/* Get the pointer to a device_node's pci_dn */
+#define PCI_DN(dn)	((struct pci_dn *) (dn)->data)
+
+extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
+					   int devfn);
+extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
+extern struct pci_dn *add_dev_pci_data(struct pci_dev *pdev);
+extern void remove_dev_pci_data(struct pci_dev *pdev);
+extern struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
+					       struct device_node *dn);
+extern void pci_remove_device_node_info(struct device_node *dn);
+
+static inline int pci_device_from_OF_node(struct device_node *np,
+					  u8 *bus, u8 *devfn)
+{
+	if (!PCI_DN(np))
+		return -ENODEV;
+	*bus = PCI_DN(np)->busno;
+	*devfn = PCI_DN(np)->devfn;
+	return 0;
+}
+
+#if defined(CONFIG_EEH)
+static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
+{
+	return pdn ? pdn->edev : NULL;
+}
+#else
+#define pdn_to_eeh_dev(x)	(NULL)
+#endif
+
+/** Find the bus corresponding to the indicated device node */
+extern struct pci_bus *pci_find_bus_by_node(struct device_node *dn);
+
+/** Remove all of the PCI devices under this bus */
+extern void pci_hp_remove_devices(struct pci_bus *bus);
+
+/** Discover new pci devices under this bus, and add them */
+extern void pci_hp_add_devices(struct pci_bus *bus);
+
+extern int pcibios_unmap_io_space(struct pci_bus *bus);
+extern int pcibios_map_io_space(struct pci_bus *bus);
+
+#ifdef CONFIG_NUMA
+#define PHB_SET_NODE(PHB, NODE)		((PHB)->node = (NODE))
+#else
+#define PHB_SET_NODE(PHB, NODE)		((PHB)->node = -1)
+#endif
+
+#endif	/* CONFIG_PPC64 */
+
+/* Get the PCI host controller for an OF device */
+extern struct pci_controller *pci_find_hose_for_OF_device(
+			struct device_node* node);
+
+/* Fill up host controller resources from the OF node */
+extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
+			struct device_node *dev, int primary);
+
+/* Allocate & free a PCI host bridge structure */
+extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
+extern void pcibios_free_controller(struct pci_controller *phb);
+extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
+
+#ifdef CONFIG_PCI
+extern int pcibios_vaddr_is_ioport(void __iomem *address);
+#else
+static inline int pcibios_vaddr_is_ioport(void __iomem *address)
+{
+	return 0;
+}
+#endif	/* CONFIG_PCI */
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_PCI_BRIDGE_H */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
new file mode 100644
index 0000000..2af9ded
--- /dev/null
+++ b/arch/powerpc/include/asm/pci.h
@@ -0,0 +1,133 @@
+#ifndef __ASM_POWERPC_PCI_H
+#define __ASM_POWERPC_PCI_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+
+/* Return values for pci_controller_ops.probe_mode function */
+#define PCI_PROBE_NONE		-1	/* Don't look at this bus at all */
+#define PCI_PROBE_NORMAL	0	/* Do normal PCI probing */
+#define PCI_PROBE_DEVTREE	1	/* Instantiate from device tree */
+
+#define PCIBIOS_MIN_IO		0x1000
+#define PCIBIOS_MIN_MEM		0x10000000
+
+/* Values for the `which' argument to sys_pciconfig_iobase syscall.  */
+#define IOBASE_BRIDGE_NUMBER	0
+#define IOBASE_MEMORY		1
+#define IOBASE_IO		2
+#define IOBASE_ISA_IO		3
+#define IOBASE_ISA_MEM		4
+
+/*
+ * Set this to 1 if you want the kernel to re-assign all PCI
+ * bus numbers (don't do that on ppc64 yet !)
+ */
+#define pcibios_assign_all_busses() \
+	(pci_has_flag(PCI_REASSIGN_ALL_BUS))
+
+#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+	if (ppc_md.pci_get_legacy_ide_irq)
+		return ppc_md.pci_get_legacy_ide_irq(dev, channel);
+	return channel ? 15 : 14;
+}
+
+#ifdef CONFIG_PCI
+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
+extern const struct dma_map_ops *get_pci_dma_ops(void);
+#else	/* CONFIG_PCI */
+#define set_pci_dma_ops(d)
+#define get_pci_dma_ops()	NULL
+#endif
+
+#ifdef CONFIG_PPC64
+
+/*
+ * We want to avoid touching the cacheline size or MWI bit.
+ * pSeries firmware sets the cacheline size (which is not the cpu cacheline
+ * size in all cases) and hardware treats MWI the same as memory write.
+ */
+#define PCI_DISABLE_MWI
+
+#endif /* CONFIG_PPC64 */
+
+extern int pci_domain_nr(struct pci_bus *bus);
+
+/* Decide whether to display the domain number in /proc */
+extern int pci_proc_domain(struct pci_bus *bus);
+
+struct vm_area_struct;
+
+/* Tell PCI code what kind of PCI resource mappings we support */
+#define HAVE_PCI_MMAP			1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE	1
+#define arch_can_pci_mmap_io()		1
+#define arch_can_pci_mmap_wc()		1
+
+extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
+			   size_t count);
+extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
+			   size_t count);
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+				      struct vm_area_struct *vma,
+				      enum pci_mmap_state mmap_state);
+
+#define HAVE_PCI_LEGACY	1
+
+extern void pcibios_claim_one_bus(struct pci_bus *b);
+
+extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
+
+extern void pcibios_resource_survey(void);
+
+extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
+extern int remove_phb_dynamic(struct pci_controller *phb);
+
+extern struct pci_dev *of_create_pci_dev(struct device_node *node,
+					struct pci_bus *bus, int devfn);
+
+extern unsigned int pci_parse_of_flags(u32 addr0, int bridge);
+
+extern void of_scan_pci_bridge(struct pci_dev *dev);
+
+extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
+extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
+
+struct file;
+extern pgprot_t	pci_phys_mem_access_prot(struct file *file,
+					 unsigned long pfn,
+					 unsigned long size,
+					 pgprot_t prot);
+
+#define HAVE_ARCH_PCI_RESOURCE_TO_USER
+
+extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
+extern void pcibios_setup_bus_devices(struct pci_bus *bus);
+extern void pcibios_setup_bus_self(struct pci_bus *bus);
+extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
+extern void pcibios_scan_phb(struct pci_controller *hose);
+
+#endif	/* __KERNEL__ */
+
+extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev);
+extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index);
+
+#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
new file mode 100644
index 0000000..dce863a
--- /dev/null
+++ b/arch/powerpc/include/asm/percpu.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PERCPU_H_
+#define _ASM_POWERPC_PERCPU_H_
+#ifdef __powerpc64__
+
+/*
+ * Same as asm-generic/percpu.h, except that we store the per cpu offset
+ * in the paca. Based on the x86-64 implementation.
+ */
+
+#ifdef CONFIG_SMP
+
+#include <asm/paca.h>
+
+#define __my_cpu_offset local_paca->data_offset
+
+#endif /* CONFIG_SMP */
+#endif /* __powerpc64__ */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_POWERPC_PERCPU_H_ */
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
new file mode 100644
index 0000000..8bf1b63
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -0,0 +1,40 @@
+/*
+ * Performance event support - hardware-specific disambiguation
+ *
+ * For now this is a compile-time decision, but eventually it should be
+ * runtime.  This would allow multiplatform perf event support for e300 (fsl
+ * embedded perf counters) plus server/classic, and would accommodate
+ * devices other than the core which provide their own performance counters.
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef CONFIG_PPC_PERF_CTRS
+#include <asm/perf_event_server.h>
+#endif
+
+#ifdef CONFIG_FSL_EMB_PERF_EVENT
+#include <asm/perf_event_fsl_emb.h>
+#endif
+
+#ifdef CONFIG_PERF_EVENTS
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+
+/*
+ * Overload regs->result to specify whether we should use the MSR (result
+ * is zero) or the SIAR (result is non zero).
+ */
+#define perf_arch_fetch_caller_regs(regs, __ip)			\
+	do {							\
+		(regs)->result = 0;				\
+		(regs)->nip = __ip;				\
+		(regs)->gpr[1] = current_stack_pointer();	\
+		asm volatile("mfmsr %0" : "=r" ((regs)->msr));	\
+	} while (0)
+#endif
diff --git a/arch/powerpc/include/asm/perf_event_fsl_emb.h b/arch/powerpc/include/asm/perf_event_fsl_emb.h
new file mode 100644
index 0000000..a581654
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event_fsl_emb.h
@@ -0,0 +1,50 @@
+/*
+ * Performance event support - Freescale embedded specific definitions.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <asm/hw_irq.h>
+
+#define MAX_HWEVENTS 6
+
+/* event flags */
+#define FSL_EMB_EVENT_VALID      1
+#define FSL_EMB_EVENT_RESTRICTED 2
+
+/* upper half of event flags is PMLCb */
+#define FSL_EMB_EVENT_THRESHMUL  0x0000070000000000ULL
+#define FSL_EMB_EVENT_THRESH     0x0000003f00000000ULL
+
+struct fsl_emb_pmu {
+	const char	*name;
+	int		n_counter; /* total number of counters */
+
+	/*
+	 * The number of contiguous counters starting at zero that
+	 * can hold restricted events, or zero if there are no
+	 * restricted events.
+	 *
+	 * This isn't a very flexible method of expressing constraints,
+	 * but it's very simple and is adequate for existing chips.
+	 */
+	int		n_restricted;
+
+	/* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */
+	u64		(*xlate_event)(u64 event_id);
+
+	int		n_generic;
+	int		*generic_events;
+	int		(*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+			       [PERF_COUNT_HW_CACHE_OP_MAX]
+			       [PERF_COUNT_HW_CACHE_RESULT_MAX];
+};
+
+int register_fsl_emb_pmu(struct fsl_emb_pmu *);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
new file mode 100644
index 0000000..67a8a95
--- /dev/null
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -0,0 +1,165 @@
+/*
+ * Performance event support - PowerPC classic/server specific definitions.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <asm/hw_irq.h>
+#include <linux/device.h>
+#include <uapi/asm/perf_event.h>
+
+/* Update perf_event_print_debug() if this changes */
+#define MAX_HWEVENTS		8
+#define MAX_EVENT_ALTERNATIVES	8
+#define MAX_LIMITED_HWCOUNTERS	2
+
+struct perf_event;
+
+/*
+ * This struct provides the constants and functions needed to
+ * describe the PMU on a particular POWER-family CPU.
+ */
+struct power_pmu {
+	const char	*name;
+	int		n_counter;
+	int		max_alternatives;
+	unsigned long	add_fields;
+	unsigned long	test_adder;
+	int		(*compute_mmcr)(u64 events[], int n_ev,
+				unsigned int hwc[], unsigned long mmcr[],
+				struct perf_event *pevents[]);
+	int		(*get_constraint)(u64 event_id, unsigned long *mskp,
+				unsigned long *valp);
+	int		(*get_alternatives)(u64 event_id, unsigned int flags,
+				u64 alt[]);
+	void		(*get_mem_data_src)(union perf_mem_data_src *dsrc,
+				u32 flags, struct pt_regs *regs);
+	void		(*get_mem_weight)(u64 *weight);
+	u64             (*bhrb_filter_map)(u64 branch_sample_type);
+	void            (*config_bhrb)(u64 pmu_bhrb_filter);
+	void		(*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
+	int		(*limited_pmc_event)(u64 event_id);
+	u32		flags;
+	const struct attribute_group	**attr_groups;
+	int		n_generic;
+	int		*generic_events;
+	int		(*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+			       [PERF_COUNT_HW_CACHE_OP_MAX]
+			       [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+	int		n_blacklist_ev;
+	int 		*blacklist_ev;
+	/* BHRB entries in the PMU */
+	int		bhrb_nr;
+};
+
+/*
+ * Values for power_pmu.flags
+ */
+#define PPMU_LIMITED_PMC5_6	0x00000001 /* PMC5/6 have limited function */
+#define PPMU_ALT_SIPR		0x00000002 /* uses alternate posn for SIPR/HV */
+#define PPMU_NO_SIPR		0x00000004 /* no SIPR/HV in MMCRA at all */
+#define PPMU_NO_CONT_SAMPLING	0x00000008 /* no continuous sampling */
+#define PPMU_SIAR_VALID		0x00000010 /* Processor has SIAR Valid bit */
+#define PPMU_HAS_SSLOT		0x00000020 /* Has sampled slot in MMCRA */
+#define PPMU_HAS_SIER		0x00000040 /* Has SIER */
+#define PPMU_ARCH_207S		0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR		0x00000100 /* Do not use SIAR */
+
+/*
+ * Values for flags to get_alternatives()
+ */
+#define PPMU_LIMITED_PMC_OK	1	/* can put this on a limited PMC */
+#define PPMU_LIMITED_PMC_REQD	2	/* have to put this on a limited PMC */
+#define PPMU_ONLY_COUNT_RUN	4	/* only counting in run state */
+
+extern int register_power_pmu(struct power_pmu *);
+
+struct pt_regs;
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long int read_bhrb(int n);
+
+/*
+ * Only override the default definitions in include/linux/perf_event.h
+ * if we have hardware PMU support.
+ */
+#ifdef CONFIG_PPC_PERF_CTRS
+#define perf_misc_flags(regs)	perf_misc_flags(regs)
+#endif
+
+/*
+ * The power_pmu.get_constraint function returns a 32/64-bit value and
+ * a 32/64-bit mask that express the constraints between this event_id and
+ * other events.
+ *
+ * The value and mask are divided up into (non-overlapping) bitfields
+ * of three different types:
+ *
+ * Select field: this expresses the constraint that some set of bits
+ * in MMCR* needs to be set to a specific value for this event_id.  For a
+ * select field, the mask contains 1s in every bit of the field, and
+ * the value contains a unique value for each possible setting of the
+ * MMCR* bits.  The constraint checking code will ensure that two events
+ * that set the same field in their masks have the same value in their
+ * value dwords.
+ *
+ * Add field: this expresses the constraint that there can be at most
+ * N events in a particular class.  A field of k bits can be used for
+ * N <= 2^(k-1) - 1.  The mask has the most significant bit of the field
+ * set (and the other bits 0), and the value has only the least significant
+ * bit of the field set.  In addition, the 'add_fields' and 'test_adder'
+ * in the struct power_pmu for this processor come into play.  The
+ * add_fields value contains 1 in the LSB of the field, and the
+ * test_adder contains 2^(k-1) - 1 - N in the field.
+ *
+ * NAND field: this expresses the constraint that you may not have events
+ * in all of a set of classes.  (For example, on PPC970, you can't select
+ * events from the FPU, ISU and IDU simultaneously, although any two are
+ * possible.)  For N classes, the field is N+1 bits wide, and each class
+ * is assigned one bit from the least-significant N bits.  The mask has
+ * only the most-significant bit set, and the value has only the bit
+ * for the event_id's class set.  The test_adder has the least significant
+ * bit set in the field.
+ *
+ * If an event_id is not subject to the constraint expressed by a particular
+ * field, then it will have 0 in both the mask and value for that field.
+ */
+
+extern ssize_t power_events_sysfs_show(struct device *dev,
+				struct device_attribute *attr, char *page);
+
+/*
+ * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
+ *
+ * Having a suffix allows us to have aliases in sysfs - eg: the generic
+ * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
+ * 'PM_CYC' where the latter is the name by which the event is known in
+ * POWER CPU specification.
+ *
+ * Similarly, some hardware and cache events use the same event code. Eg.
+ * on POWER8, both "cache-references" and "L1-dcache-loads" events refer
+ * to the same event, PM_LD_REF_L1.  The suffix, allows us to have two
+ * sysfs objects for the same event and thus two entries/aliases in sysfs.
+ */
+#define	EVENT_VAR(_id, _suffix)		event_attr_##_id##_suffix
+#define	EVENT_PTR(_id, _suffix)		&EVENT_VAR(_id, _suffix).attr.attr
+
+#define	EVENT_ATTR(_name, _id, _suffix)					\
+	PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id,		\
+			power_events_sysfs_show)
+
+#define	GENERIC_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _g)
+#define	GENERIC_EVENT_PTR(_id)		EVENT_PTR(_id, _g)
+
+#define	CACHE_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _c)
+#define	CACHE_EVENT_PTR(_id)		EVENT_PTR(_id, _c)
+
+#define	POWER_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _p)
+#define	POWER_EVENT_PTR(_id)		EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
new file mode 100644
index 0000000..e11f030
--- /dev/null
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGALLOC_H
+#define _ASM_POWERPC_PGALLOC_H
+
+#include <linux/mm.h>
+
+#ifndef MODULE
+static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
+{
+	if (unlikely(mm == &init_mm))
+		return gfp;
+	return gfp | __GFP_ACCOUNT;
+}
+#else /* !MODULE */
+static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
+{
+	return gfp | __GFP_ACCOUNT;
+}
+#endif /* MODULE */
+
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
+
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/pgalloc.h>
+#else
+#include <asm/nohash/pgalloc.h>
+#endif
+
+#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
new file mode 100644
index 0000000..a89c67b
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_BE_TYPES_H
+#define _ASM_POWERPC_PGTABLE_BE_TYPES_H
+
+#include <asm/cmpxchg.h>
+
+/* PTE level */
+typedef struct { __be64 pte; } pte_t;
+#define __pte(x)	((pte_t) { cpu_to_be64(x) })
+#define __pte_raw(x)	((pte_t) { (x) })
+static inline unsigned long pte_val(pte_t x)
+{
+	return be64_to_cpu(x.pte);
+}
+
+static inline __be64 pte_raw(pte_t x)
+{
+	return x.pte;
+}
+
+/* PMD level */
+#ifdef CONFIG_PPC64
+typedef struct { __be64 pmd; } pmd_t;
+#define __pmd(x)	((pmd_t) { cpu_to_be64(x) })
+#define __pmd_raw(x)	((pmd_t) { (x) })
+static inline unsigned long pmd_val(pmd_t x)
+{
+	return be64_to_cpu(x.pmd);
+}
+
+static inline __be64 pmd_raw(pmd_t x)
+{
+	return x.pmd;
+}
+
+/*
+ * 64 bit hash always use 4 level table. Everybody else use 4 level
+ * only for 4K page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
+typedef struct { __be64 pud; } pud_t;
+#define __pud(x)	((pud_t) { cpu_to_be64(x) })
+#define __pud_raw(x)	((pud_t) { (x) })
+static inline unsigned long pud_val(pud_t x)
+{
+	return be64_to_cpu(x.pud);
+}
+
+static inline __be64 pud_raw(pud_t x)
+{
+	return x.pud;
+}
+
+#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
+
+/* PGD level */
+typedef struct { __be64 pgd; } pgd_t;
+#define __pgd(x)	((pgd_t) { cpu_to_be64(x) })
+#define __pgd_raw(x)	((pgd_t) { (x) })
+static inline unsigned long pgd_val(pgd_t x)
+{
+	return be64_to_cpu(x.pgd);
+}
+
+static inline __be64 pgd_raw(pgd_t x)
+{
+	return x.pgd;
+}
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x)	((x).pgprot)
+#define __pgprot(x)	((pgprot_t) { (x) })
+
+/*
+ * With hash config 64k pages additionally define a bigger "real PTE" type that
+ * gathers the "second half" part of the PTE for pseudo 64k pages
+ */
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64)
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+
+static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
+{
+	unsigned long *p = (unsigned long *)ptep;
+	__be64 prev;
+
+	/* See comment in switch_mm_irqs_off() */
+	prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
+					     (__force unsigned long)pte_raw(new));
+
+	return pte_raw(old) == prev;
+}
+
+static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
+{
+	unsigned long *p = (unsigned long *)pmdp;
+	__be64 prev;
+
+	prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pmd_raw(old),
+					     (__force unsigned long)pmd_raw(new));
+
+	return pmd_raw(old) == prev;
+}
+
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+	return be64_to_cpu(x.pdbe);
+}
+
+#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
new file mode 100644
index 0000000..eccb30b
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_TYPES_H
+#define _ASM_POWERPC_PGTABLE_TYPES_H
+
+/* PTE level */
+typedef struct { pte_basic_t pte; } pte_t;
+#define __pte(x)	((pte_t) { (x) })
+static inline pte_basic_t pte_val(pte_t x)
+{
+	return x.pte;
+}
+
+/* PMD level */
+#ifdef CONFIG_PPC64
+typedef struct { unsigned long pmd; } pmd_t;
+#define __pmd(x)	((pmd_t) { (x) })
+static inline unsigned long pmd_val(pmd_t x)
+{
+	return x.pmd;
+}
+
+/*
+ * 64 bit hash always use 4 level table. Everybody else use 4 level
+ * only for 4K page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
+typedef struct { unsigned long pud; } pud_t;
+#define __pud(x)	((pud_t) { (x) })
+static inline unsigned long pud_val(pud_t x)
+{
+	return x.pud;
+}
+#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
+
+/* PGD level */
+typedef struct { unsigned long pgd; } pgd_t;
+#define __pgd(x)	((pgd_t) { (x) })
+static inline unsigned long pgd_val(pgd_t x)
+{
+	return x.pgd;
+}
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x)	((x).pgprot)
+#define __pgprot(x)	((pgprot_t) { (x) })
+
+/*
+ * With hash config 64k pages additionally define a bigger "real PTE" type that
+ * gathers the "second half" part of the PTE for pseudo 64k pages
+ */
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64)
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/cmpxchg.h>
+
+static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
+{
+	unsigned long *p = (unsigned long *)ptep;
+
+	/* See comment in switch_mm_irqs_off() */
+	return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
+}
+#endif
+
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+	return x.pd;
+}
+
+#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
new file mode 100644
index 0000000..14c79a7
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_H
+#define _ASM_POWERPC_PGTABLE_H
+
+#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
+#include <linux/mmzone.h>
+#include <asm/processor.h>		/* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+struct mm_struct;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/pgtable.h>
+#else
+#include <asm/nohash/pgtable.h>
+#endif /* !CONFIG_PPC_BOOK3S */
+
+#ifndef __ASSEMBLY__
+
+#include <asm/tlbflush.h>
+
+/* Keep these as a macros to avoid include dependency mess */
+#define pte_page(x)		pfn_to_page(pte_pfn(x))
+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern pgd_t swapper_pg_dir[];
+
+void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
+int dma_pfn_limit_to_zone(u64 pfn_limit);
+extern void paging_init(void);
+
+/*
+ * kern_addr_valid is intended to indicate whether an address is a valid
+ * kernel address.  Most 32-bit archs define it as always true (like this)
+ * but most 64-bit archs actually perform a test.  What should we do here?
+ */
+#define kern_addr_valid(addr)	(1)
+
+#include <asm-generic/pgtable.h>
+
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+
+extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+		       unsigned long end, int write,
+		       struct page **pages, int *nr);
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_large(pmd)		0
+#endif
+
+/* can we use this in kvm */
+unsigned long vmalloc_to_phys(void *vmalloc_addr);
+
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_init(void);
+
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
+void mark_initmem_nx(void);
+#else
+static inline void mark_initmem_nx(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
new file mode 100644
index 0000000..20ebf15
--- /dev/null
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PowerPC Memory Protection Keys management
+ *
+ * Copyright 2017, Ram Pai, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_KEYS_H
+#define _ASM_POWERPC_KEYS_H
+
+#include <linux/jump_label.h>
+#include <asm/firmware.h>
+
+DECLARE_STATIC_KEY_TRUE(pkey_disabled);
+extern int pkeys_total; /* total pkeys as per device tree */
+extern u32 initial_allocation_mask; /*  bits set for the initially allocated keys */
+extern u32 reserved_allocation_mask; /* bits set for reserved keys */
+
+#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
+			    VM_PKEY_BIT3 | VM_PKEY_BIT4)
+
+/* Override any generic PKEY permission defines */
+#define PKEY_DISABLE_EXECUTE   0x4
+#define PKEY_ACCESS_MASK       (PKEY_DISABLE_ACCESS | \
+				PKEY_DISABLE_WRITE  | \
+				PKEY_DISABLE_EXECUTE)
+
+static inline u64 pkey_to_vmflag_bits(u16 pkey)
+{
+	return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
+}
+
+static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
+{
+	if (static_branch_likely(&pkey_disabled))
+		return 0x0UL;
+
+	return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT4 : 0x0UL) |
+		((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT3 : 0x0UL) |
+		((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) |
+		((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT1 : 0x0UL) |
+		((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT0 : 0x0UL));
+}
+
+static inline int vma_pkey(struct vm_area_struct *vma)
+{
+	if (static_branch_likely(&pkey_disabled))
+		return 0;
+	return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
+}
+
+#define arch_max_pkey() pkeys_total
+
+static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
+{
+	return (((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL));
+}
+
+static inline u16 pte_to_pkey_bits(u64 pteflags)
+{
+	return (((pteflags & H_PTE_PKEY_BIT0) ? 0x10 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT1) ? 0x8 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT3) ? 0x2 : 0x0UL) |
+		((pteflags & H_PTE_PKEY_BIT4) ? 0x1 : 0x0UL));
+}
+
+#define pkey_alloc_mask(pkey) (0x1 << pkey)
+
+#define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
+
+#define __mm_pkey_allocated(mm, pkey) {	\
+	mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \
+}
+
+#define __mm_pkey_free(mm, pkey) {	\
+	mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey);	\
+}
+
+#define __mm_pkey_is_allocated(mm, pkey)	\
+	(mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
+
+#define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \
+				       pkey_alloc_mask(pkey))
+
+static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
+{
+	if (pkey < 0 || pkey >= arch_max_pkey())
+		return false;
+
+	/* Reserved keys are never allocated. */
+	if (__mm_pkey_is_reserved(pkey))
+		return false;
+
+	return __mm_pkey_is_allocated(mm, pkey);
+}
+
+/*
+ * Returns a positive, 5-bit key on success, or -1 on failure.
+ * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and
+ * mm_pkey_free().
+ */
+static inline int mm_pkey_alloc(struct mm_struct *mm)
+{
+	/*
+	 * Note: this is the one and only place we make sure that the pkey is
+	 * valid as far as the hardware is concerned. The rest of the kernel
+	 * trusts that only good, valid pkeys come out of here.
+	 */
+	u32 all_pkeys_mask = (u32)(~(0x0));
+	int ret;
+
+	if (static_branch_likely(&pkey_disabled))
+		return -1;
+
+	/*
+	 * Are we out of pkeys? We must handle this specially because ffz()
+	 * behavior is undefined if there are no zeros.
+	 */
+	if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
+		return -1;
+
+	ret = ffz((u32)mm_pkey_allocation_map(mm));
+	__mm_pkey_allocated(mm, ret);
+
+	return ret;
+}
+
+static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
+{
+	if (static_branch_likely(&pkey_disabled))
+		return -1;
+
+	if (!mm_pkey_is_allocated(mm, pkey))
+		return -EINVAL;
+
+	__mm_pkey_free(mm, pkey);
+
+	return 0;
+}
+
+/*
+ * Try to dedicate one of the protection keys to be used as an
+ * execute-only protection key.
+ */
+extern int __execute_only_pkey(struct mm_struct *mm);
+static inline int execute_only_pkey(struct mm_struct *mm)
+{
+	if (static_branch_likely(&pkey_disabled))
+		return -1;
+
+	return __execute_only_pkey(mm);
+}
+
+extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
+					 int prot, int pkey);
+static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
+					      int prot, int pkey)
+{
+	if (static_branch_likely(&pkey_disabled))
+		return 0;
+
+	/*
+	 * Is this an mprotect_pkey() call? If so, never override the value that
+	 * came from the user.
+	 */
+	if (pkey != -1)
+		return pkey;
+
+	return __arch_override_mprotect_pkey(vma, prot, pkey);
+}
+
+extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+				       unsigned long init_val);
+static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+					    unsigned long init_val)
+{
+	if (static_branch_likely(&pkey_disabled))
+		return -EINVAL;
+
+	/*
+	 * userspace should not change pkey-0 permissions.
+	 * pkey-0 is associated with every page in the kernel.
+	 * If userspace denies any permission on pkey-0, the
+	 * kernel cannot operate.
+	 */
+	if (pkey == 0)
+		return init_val ? -EINVAL : 0;
+
+	return __arch_set_user_pkey_access(tsk, pkey, init_val);
+}
+
+static inline bool arch_pkeys_enabled(void)
+{
+	return !static_branch_likely(&pkey_disabled);
+}
+
+extern void pkey_mm_init(struct mm_struct *mm);
+extern bool arch_supports_pkeys(int cap);
+extern unsigned int arch_usable_pkeys(void);
+extern void thread_pkey_regs_save(struct thread_struct *thread);
+extern void thread_pkey_regs_restore(struct thread_struct *new_thread,
+				     struct thread_struct *old_thread);
+extern void thread_pkey_regs_init(struct thread_struct *thread);
+#endif /*_ASM_POWERPC_KEYS_H */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
new file mode 100644
index 0000000..cff5a41
--- /dev/null
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -0,0 +1,345 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
+#define _ASM_POWERPC_PLPAR_WRAPPERS_H
+
+#ifdef CONFIG_PPC_PSERIES
+
+#include <linux/string.h>
+#include <linux/irqflags.h>
+
+#include <asm/hvcall.h>
+#include <asm/paca.h>
+#include <asm/page.h>
+
+static inline long poll_pending(void)
+{
+	return plpar_hcall_norets(H_POLL_PENDING);
+}
+
+static inline u8 get_cede_latency_hint(void)
+{
+	return get_lppaca()->cede_latency_hint;
+}
+
+static inline void set_cede_latency_hint(u8 latency_hint)
+{
+	get_lppaca()->cede_latency_hint = latency_hint;
+}
+
+static inline long cede_processor(void)
+{
+	return plpar_hcall_norets(H_CEDE);
+}
+
+static inline long extended_cede_processor(unsigned long latency_hint)
+{
+	long rc;
+	u8 old_latency_hint = get_cede_latency_hint();
+
+	set_cede_latency_hint(latency_hint);
+
+	rc = cede_processor();
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+	/* Ensure that H_CEDE returns with IRQs on */
+	if (WARN_ON(!(mfmsr() & MSR_EE)))
+		__hard_irq_enable();
+#endif
+
+	set_cede_latency_hint(old_latency_hint);
+
+	return rc;
+}
+
+static inline long vpa_call(unsigned long flags, unsigned long cpu,
+		unsigned long vpa)
+{
+	flags = flags << H_VPA_FUNC_SHIFT;
+
+	return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
+}
+
+static inline long unregister_vpa(unsigned long cpu)
+{
+	return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
+}
+
+static inline long register_vpa(unsigned long cpu, unsigned long vpa)
+{
+	return vpa_call(H_VPA_REG_VPA, cpu, vpa);
+}
+
+static inline long unregister_slb_shadow(unsigned long cpu)
+{
+	return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
+}
+
+static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
+{
+	return vpa_call(H_VPA_REG_SLB, cpu, vpa);
+}
+
+static inline long unregister_dtl(unsigned long cpu)
+{
+	return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
+}
+
+static inline long register_dtl(unsigned long cpu, unsigned long vpa)
+{
+	return vpa_call(H_VPA_REG_DTL, cpu, vpa);
+}
+
+extern void vpa_init(int cpu);
+
+static inline long plpar_pte_enter(unsigned long flags,
+		unsigned long hpte_group, unsigned long hpte_v,
+		unsigned long hpte_r, unsigned long *slot)
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
+
+	*slot = retbuf[0];
+
+	return rc;
+}
+
+static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
+		unsigned long avpn, unsigned long *old_pteh_ret,
+		unsigned long *old_ptel_ret)
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
+
+	*old_pteh_ret = retbuf[0];
+	*old_ptel_ret = retbuf[1];
+
+	return rc;
+}
+
+/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
+		unsigned long avpn, unsigned long *old_pteh_ret,
+		unsigned long *old_ptel_ret)
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
+
+	*old_pteh_ret = retbuf[0];
+	*old_ptel_ret = retbuf[1];
+
+	return rc;
+}
+
+static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
+		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	rc = plpar_hcall(H_READ, retbuf, flags, ptex);
+
+	*old_pteh_ret = retbuf[0];
+	*old_ptel_ret = retbuf[1];
+
+	return rc;
+}
+
+/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
+		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
+
+	*old_pteh_ret = retbuf[0];
+	*old_ptel_ret = retbuf[1];
+
+	return rc;
+}
+
+/*
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
+				    unsigned long *ptes)
+
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+	rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
+
+	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+	return rc;
+}
+
+/*
+ * plpar_pte_read_4_raw can be called in real mode.
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
+					unsigned long *ptes)
+
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+	rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
+
+	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+	return rc;
+}
+
+static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
+		unsigned long avpn)
+{
+	return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
+}
+
+static inline long plpar_resize_hpt_prepare(unsigned long flags,
+					    unsigned long shift)
+{
+	return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
+}
+
+static inline long plpar_resize_hpt_commit(unsigned long flags,
+					   unsigned long shift)
+{
+	return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
+}
+
+static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
+		unsigned long *tce_ret)
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
+
+	*tce_ret = retbuf[0];
+
+	return rc;
+}
+
+static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
+		unsigned long tceval)
+{
+	return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
+}
+
+static inline long plpar_tce_put_indirect(unsigned long liobn,
+		unsigned long ioba, unsigned long page, unsigned long count)
+{
+	return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
+}
+
+static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
+		unsigned long tceval, unsigned long count)
+{
+	return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
+}
+
+/* Set various resource mode parameters */
+static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
+		unsigned long value1, unsigned long value2)
+{
+	return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
+}
+
+/*
+ * Enable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_reloc_on_exceptions(void)
+{
+	/* mflags = 3: Exceptions at 0xC000000000004000 */
+	return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
+}
+
+/*
+ * Disable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long disable_reloc_on_exceptions(void) {
+	return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
+}
+
+/*
+ * Take exceptions in big endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_big_endian_exceptions(void)
+{
+	/* mflags = 0: big endian exceptions */
+	return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
+}
+
+/*
+ * Take exceptions in little endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_little_endian_exceptions(void)
+{
+	/* mflags = 1: little endian exceptions */
+	return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
+}
+
+static inline long plpar_set_ciabr(unsigned long ciabr)
+{
+	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
+}
+
+static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
+{
+	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
+}
+
+static inline long plpar_signal_sys_reset(long cpu)
+{
+	return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
+}
+
+static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
+{
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+	long rc;
+
+	rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
+	if (rc == H_SUCCESS) {
+		p->character = retbuf[0];
+		p->behaviour = retbuf[1];
+	}
+
+	return rc;
+}
+
+#else /* !CONFIG_PPC_PSERIES */
+
+static inline long plpar_set_ciabr(unsigned long ciabr)
+{
+	return 0;
+}
+#endif /* CONFIG_PPC_PSERIES */
+
+#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
new file mode 100644
index 0000000..e08e829
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -0,0 +1,405 @@
+/*
+ * Definition of platform feature hooks for PowerMacs
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Paul Mackerras &
+ *                    Ben. Herrenschmidt.
+ *
+ *
+ * Note: I removed media-bay details from the feature stuff, I believe it's
+ *       not worth it, the media-bay driver can directly use the mac-io
+ *       ASIC registers.
+ *
+ * Implementation note: Currently, none of these functions will block.
+ * However, they may internally protect themselves with a spinlock
+ * for way too long. Be prepared for at least some of these to block
+ * in the future.
+ *
+ * Unless specifically defined, the result code is assumed to be an
+ * error when negative, 0 is the default success result. Some functions
+ * may return additional positive result values.
+ *
+ * To keep implementation simple, all feature calls are assumed to have
+ * the prototype parameters (struct device_node* node, int value).
+ * When either is not used, pass 0.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_PMAC_FEATURE_H
+#define __ASM_POWERPC_PMAC_FEATURE_H
+
+#include <asm/macio.h>
+#include <asm/machdep.h>
+
+/*
+ * Known Mac motherboard models
+ *
+ * Please, report any error here to benh@kernel.crashing.org, thanks !
+ *
+ * Note that I don't fully maintain this list for Core99 & MacRISC2
+ * and I'm considering removing all NewWorld entries from it and
+ * entirely rely on the model string.
+ */
+
+/* PowerSurge are the first generation of PCI Pmacs. This include
+ * all of the Grand-Central based machines. We currently don't
+ * differentiate most of them.
+ */
+#define PMAC_TYPE_PSURGE		0x10	/* PowerSurge */
+#define PMAC_TYPE_ANS			0x11	/* Apple Network Server */
+
+/* Here is the infamous serie of OHare based machines
+ */
+#define PMAC_TYPE_COMET			0x20	/* Believed to be PowerBook 2400 */
+#define PMAC_TYPE_HOOPER		0x21	/* Believed to be PowerBook 3400 */
+#define PMAC_TYPE_KANGA			0x22	/* PowerBook 3500 (first G3) */
+#define PMAC_TYPE_ALCHEMY		0x23	/* Alchemy motherboard base */
+#define PMAC_TYPE_GAZELLE		0x24	/* Spartacus, some 5xxx/6xxx */
+#define PMAC_TYPE_UNKNOWN_OHARE		0x2f	/* Unknown, but OHare based */
+
+/* Here are the Heathrow based machines
+ * FIXME: Differenciate wallstreet,mainstreet,wallstreetII
+ */
+#define PMAC_TYPE_GOSSAMER		0x30	/* Gossamer motherboard */
+#define PMAC_TYPE_SILK			0x31	/* Desktop PowerMac G3 */
+#define PMAC_TYPE_WALLSTREET		0x32	/* Wallstreet/Mainstreet PowerBook*/
+#define PMAC_TYPE_UNKNOWN_HEATHROW	0x3f	/* Unknown but heathrow based */
+
+/* Here are newworld machines based on Paddington (heathrow derivative)
+ */
+#define PMAC_TYPE_101_PBOOK		0x40	/* 101 PowerBook (aka Lombard) */
+#define PMAC_TYPE_ORIG_IMAC		0x41	/* First generation iMac */
+#define PMAC_TYPE_YOSEMITE		0x42	/* B&W G3 */
+#define PMAC_TYPE_YIKES			0x43	/* Yikes G4 (PCI graphics) */
+#define PMAC_TYPE_UNKNOWN_PADDINGTON	0x4f	/* Unknown but paddington based */
+
+/* Core99 machines based on UniNorth 1.0 and 1.5
+ *
+ * Note: A single entry here may cover several actual models according
+ * to the device-tree. (Sawtooth is most tower G4s, FW_IMAC is most
+ * FireWire based iMacs, etc...). Those machines are too similar to be
+ * distinguished here, when they need to be differencied, use the
+ * device-tree "model" or "compatible" property.
+ */
+#define PMAC_TYPE_ORIG_IBOOK		0x40	/* First iBook model (no firewire) */
+#define PMAC_TYPE_SAWTOOTH		0x41	/* Desktop G4s */
+#define PMAC_TYPE_FW_IMAC		0x42	/* FireWire iMacs (except Pangea based) */
+#define PMAC_TYPE_FW_IBOOK		0x43	/* FireWire iBooks (except iBook2) */
+#define PMAC_TYPE_CUBE			0x44	/* Cube PowerMac */
+#define PMAC_TYPE_QUICKSILVER		0x45	/* QuickSilver G4s */
+#define PMAC_TYPE_PISMO			0x46	/* Pismo PowerBook */
+#define PMAC_TYPE_TITANIUM		0x47	/* Titanium PowerBook */
+#define PMAC_TYPE_TITANIUM2		0x48	/* Titanium II PowerBook (no L3, M6) */
+#define PMAC_TYPE_TITANIUM3		0x49	/* Titanium III PowerBook (with L3 & M7) */
+#define PMAC_TYPE_TITANIUM4		0x50	/* Titanium IV PowerBook (with L3 & M9) */
+#define PMAC_TYPE_EMAC			0x50	/* eMac */
+#define PMAC_TYPE_UNKNOWN_CORE99	0x5f
+
+/* MacRisc2 with UniNorth 2.0 */
+#define PMAC_TYPE_RACKMAC		0x80	/* XServe */
+#define PMAC_TYPE_WINDTUNNEL		0x81
+
+/* MacRISC2 machines based on the Pangea chipset
+ */
+#define PMAC_TYPE_PANGEA_IMAC		0x100	/* Flower Power iMac */
+#define PMAC_TYPE_IBOOK2		0x101	/* iBook2 (polycarbonate) */
+#define PMAC_TYPE_FLAT_PANEL_IMAC	0x102	/* Flat panel iMac */
+#define PMAC_TYPE_UNKNOWN_PANGEA	0x10f
+
+/* MacRISC2 machines based on the Intrepid chipset
+ */
+#define PMAC_TYPE_UNKNOWN_INTREPID	0x11f	/* Generic */
+
+/* MacRISC4 / G5 machines. We don't have per-machine selection here anymore,
+ * but rather machine families
+ */
+#define PMAC_TYPE_POWERMAC_G5		0x150	/* U3 & U3H based */
+#define PMAC_TYPE_POWERMAC_G5_U3L	0x151	/* U3L based desktop */
+#define PMAC_TYPE_IMAC_G5		0x152	/* iMac G5 */
+#define PMAC_TYPE_XSERVE_G5		0x153	/* Xserve G5 */
+#define PMAC_TYPE_UNKNOWN_K2		0x19f	/* Any other K2 based */
+#define PMAC_TYPE_UNKNOWN_SHASTA       	0x19e	/* Any other Shasta based */
+
+/*
+ * Motherboard flags
+ */
+
+#define PMAC_MB_CAN_SLEEP		0x00000001
+#define PMAC_MB_HAS_FW_POWER		0x00000002
+#define PMAC_MB_OLD_CORE99		0x00000004
+#define PMAC_MB_MOBILE			0x00000008
+#define PMAC_MB_MAY_SLEEP		0x00000010
+
+/*
+ * Feature calls supported on pmac
+ *
+ */
+
+/*
+ * Use this inline wrapper
+ */
+struct device_node;
+
+static inline long pmac_call_feature(int selector, struct device_node* node,
+					long param, long value)
+{
+	if (!ppc_md.feature_call || !machine_is(powermac))
+		return -ENODEV;
+	return ppc_md.feature_call(selector, node, param, value);
+}
+
+/* PMAC_FTR_SERIAL_ENABLE	(struct device_node* node, int param, int value)
+ * enable/disable an SCC side. Pass the node corresponding to the
+ * channel side as a parameter.
+ * param is the type of port
+ * if param is ored with PMAC_SCC_FLAG_XMON, then the SCC is locked enabled
+ * for use by xmon.
+ */
+#define PMAC_FTR_SCC_ENABLE		PMAC_FTR_DEF(0)
+	#define PMAC_SCC_ASYNC		0
+	#define PMAC_SCC_IRDA		1
+	#define PMAC_SCC_I2S1		2
+	#define PMAC_SCC_FLAG_XMON	0x00001000
+
+/* PMAC_FTR_MODEM_ENABLE	(struct device_node* node, 0, int value)
+ * enable/disable the internal modem.
+ */
+#define PMAC_FTR_MODEM_ENABLE		PMAC_FTR_DEF(1)
+
+/* PMAC_FTR_SWIM3_ENABLE	(struct device_node* node, 0,int value)
+ * enable/disable the swim3 (floppy) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_SWIM3_ENABLE		PMAC_FTR_DEF(2)
+
+/* PMAC_FTR_MESH_ENABLE		(struct device_node* node, 0, int value)
+ * enable/disable the mesh (scsi) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_MESH_ENABLE		PMAC_FTR_DEF(3)
+
+/* PMAC_FTR_IDE_ENABLE		(struct device_node* node, int busID, int value)
+ * enable/disable an IDE port of a mac-io ASIC
+ * pass the busID parameter
+ */
+#define PMAC_FTR_IDE_ENABLE		PMAC_FTR_DEF(4)
+
+/* PMAC_FTR_IDE_RESET		(struct device_node* node, int busID, int value)
+ * assert(1)/release(0) an IDE reset line (mac-io IDE only)
+ */
+#define PMAC_FTR_IDE_RESET		PMAC_FTR_DEF(5)
+
+/* PMAC_FTR_BMAC_ENABLE		(struct device_node* node, 0, int value)
+ * enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive
+ * it's reset line
+ */
+#define PMAC_FTR_BMAC_ENABLE		PMAC_FTR_DEF(6)
+
+/* PMAC_FTR_GMAC_ENABLE		(struct device_node* node, 0, int value)
+ * enable/disable the gmac (ethernet) cell of an uninorth ASIC. This
+ * control the cell's clock.
+ */
+#define PMAC_FTR_GMAC_ENABLE		PMAC_FTR_DEF(7)
+
+/* PMAC_FTR_GMAC_PHY_RESET	(struct device_node* node, 0, 0)
+ * Perform a HW reset of the PHY connected to a gmac controller.
+ * Pass the gmac device node, not the PHY node.
+ */
+#define PMAC_FTR_GMAC_PHY_RESET		PMAC_FTR_DEF(8)
+
+/* PMAC_FTR_SOUND_CHIP_ENABLE	(struct device_node* node, 0, int value)
+ * enable/disable the sound chip, whatever it is and provided it can
+ * actually be controlled
+ */
+#define PMAC_FTR_SOUND_CHIP_ENABLE	PMAC_FTR_DEF(9)
+
+/* -- add various tweaks related to sound routing -- */
+
+/* PMAC_FTR_AIRPORT_ENABLE	(struct device_node* node, 0, int value)
+ * enable/disable the airport card
+ */
+#define PMAC_FTR_AIRPORT_ENABLE		PMAC_FTR_DEF(10)
+
+/* PMAC_FTR_RESET_CPU		(NULL, int cpu_nr, 0)
+ * toggle the reset line of a CPU on an uninorth-based SMP machine
+ */
+#define PMAC_FTR_RESET_CPU		PMAC_FTR_DEF(11)
+
+/* PMAC_FTR_USB_ENABLE		(struct device_node* node, 0, int value)
+ * enable/disable an USB cell, along with the power of the USB "pad"
+ * on keylargo based machines
+ */
+#define PMAC_FTR_USB_ENABLE		PMAC_FTR_DEF(12)
+
+/* PMAC_FTR_1394_ENABLE		(struct device_node* node, 0, int value)
+ * enable/disable the firewire cell of an uninorth ASIC.
+ */
+#define PMAC_FTR_1394_ENABLE		PMAC_FTR_DEF(13)
+
+/* PMAC_FTR_1394_CABLE_POWER	(struct device_node* node, 0, int value)
+ * enable/disable the firewire cable power supply of the uninorth
+ * firewire cell
+ */
+#define PMAC_FTR_1394_CABLE_POWER	PMAC_FTR_DEF(14)
+
+/* PMAC_FTR_SLEEP_STATE		(struct device_node* node, 0, int value)
+ * set the sleep state of the motherboard.
+ *
+ * Pass -1 as value to query for sleep capability
+ * Pass 1 to set IOs to sleep
+ * Pass 0 to set IOs to wake
+ */
+#define PMAC_FTR_SLEEP_STATE		PMAC_FTR_DEF(15)
+
+/* PMAC_FTR_GET_MB_INFO		(NULL, selector, 0)
+ *
+ * returns some motherboard infos.
+ * selector: 0  - model id
+ *           1  - model flags (capabilities)
+ *           2  - model name (cast to const char *)
+ */
+#define PMAC_FTR_GET_MB_INFO		PMAC_FTR_DEF(16)
+#define   PMAC_MB_INFO_MODEL	0
+#define   PMAC_MB_INFO_FLAGS	1
+#define   PMAC_MB_INFO_NAME	2
+
+/* PMAC_FTR_READ_GPIO		(NULL, int index, 0)
+ *
+ * read a GPIO from a mac-io controller of type KeyLargo or Pangea.
+ * the value returned is a byte (positive), or a negative error code
+ */
+#define PMAC_FTR_READ_GPIO		PMAC_FTR_DEF(17)
+
+/* PMAC_FTR_WRITE_GPIO		(NULL, int index, int value)
+ *
+ * write a GPIO of a mac-io controller of type KeyLargo or Pangea.
+ */
+#define PMAC_FTR_WRITE_GPIO		PMAC_FTR_DEF(18)
+
+/* PMAC_FTR_ENABLE_MPIC
+ *
+ * Enable the MPIC cell
+ */
+#define PMAC_FTR_ENABLE_MPIC		PMAC_FTR_DEF(19)
+
+/* PMAC_FTR_AACK_DELAY_ENABLE	(NULL, int enable, 0)
+ *
+ * Enable/disable the AACK delay on the northbridge for systems using DFS
+ */
+#define PMAC_FTR_AACK_DELAY_ENABLE     	PMAC_FTR_DEF(20)
+
+/* PMAC_FTR_DEVICE_CAN_WAKE
+ *
+ * Used by video drivers to inform system that they can actually perform
+ * wakeup from sleep
+ */
+#define PMAC_FTR_DEVICE_CAN_WAKE	PMAC_FTR_DEF(22)
+
+
+/* Don't use those directly, they are for the sake of pmac_setup.c */
+extern long pmac_do_feature_call(unsigned int selector, ...);
+extern void pmac_feature_init(void);
+
+/* Video suspend tweak */
+extern void pmac_set_early_video_resume(void (*proc)(void *data), void *data);
+extern void pmac_call_early_video_resume(void);
+
+#define PMAC_FTR_DEF(x) ((0x6660000) | (x))
+
+/* The AGP driver registers itself here */
+extern void pmac_register_agp_pm(struct pci_dev *bridge,
+				 int (*suspend)(struct pci_dev *bridge),
+				 int (*resume)(struct pci_dev *bridge));
+
+/* Those are meant to be used by video drivers to deal with AGP
+ * suspend resume properly
+ */
+extern void pmac_suspend_agp_for_card(struct pci_dev *dev);
+extern void pmac_resume_agp_for_card(struct pci_dev *dev);
+
+/*
+ * The part below is for use by macio_asic.c only, do not rely
+ * on the data structures or constants below in a normal driver
+ *
+ */
+
+#define MAX_MACIO_CHIPS		2
+
+enum {
+	macio_unknown = 0,
+	macio_grand_central,
+	macio_ohare,
+	macio_ohareII,
+	macio_heathrow,
+	macio_gatwick,
+	macio_paddington,
+	macio_keylargo,
+	macio_pangea,
+	macio_intrepid,
+	macio_keylargo2,
+	macio_shasta,
+};
+
+struct macio_chip
+{
+	struct device_node	*of_node;
+	int			type;
+	const char		*name;
+	int			rev;
+	volatile u32		__iomem *base;
+	unsigned long		flags;
+
+	/* For use by macio_asic PCI driver */
+	struct macio_bus	lbus;
+};
+
+extern struct macio_chip macio_chips[MAX_MACIO_CHIPS];
+
+#define MACIO_FLAG_SCCA_ON	0x00000001
+#define MACIO_FLAG_SCCB_ON	0x00000002
+#define MACIO_FLAG_SCC_LOCKED	0x00000004
+#define MACIO_FLAG_AIRPORT_ON	0x00000010
+#define MACIO_FLAG_FW_SUPPORTED	0x00000020
+
+extern struct macio_chip* macio_find(struct device_node* child, int type);
+
+#define MACIO_FCR32(macio, r)	((macio)->base + ((r) >> 2))
+#define MACIO_FCR8(macio, r)	(((volatile u8 __iomem *)((macio)->base)) + (r))
+
+#define MACIO_IN32(r)		(in_le32(MACIO_FCR32(macio,r)))
+#define MACIO_OUT32(r,v)	(out_le32(MACIO_FCR32(macio,r), (v)))
+#define MACIO_BIS(r,v)		(MACIO_OUT32((r), MACIO_IN32(r) | (v)))
+#define MACIO_BIC(r,v)		(MACIO_OUT32((r), MACIO_IN32(r) & ~(v)))
+#define MACIO_IN8(r)		(in_8(MACIO_FCR8(macio,r)))
+#define MACIO_OUT8(r,v)		(out_8(MACIO_FCR8(macio,r), (v)))
+
+/*
+ * Those are exported by pmac feature for internal use by arch code
+ * only like the platform function callbacks, do not use directly in drivers
+ */
+extern raw_spinlock_t feature_lock;
+extern struct device_node *uninorth_node;
+extern u32 __iomem *uninorth_base;
+
+/*
+ * Uninorth reg. access. Note that Uni-N regs are big endian
+ */
+
+#define UN_REG(r)	(uninorth_base + ((r) >> 2))
+#define UN_IN(r)	(in_be32(UN_REG(r)))
+#define UN_OUT(r,v)	(out_be32(UN_REG(r), (v)))
+#define UN_BIS(r,v)	(UN_OUT((r), UN_IN(r) | (v)))
+#define UN_BIC(r,v)	(UN_OUT((r), UN_IN(r) & ~(v)))
+
+/* Uninorth variant:
+ *
+ * 0 = not uninorth
+ * 1 = U1.x or U2.x
+ * 3 = U3
+ * 4 = U4
+ */
+extern int pmac_get_uninorth_variant(void);
+
+#endif /* __ASM_POWERPC_PMAC_FEATURE_H */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pmac_low_i2c.h b/arch/powerpc/include/asm/pmac_low_i2c.h
new file mode 100644
index 0000000..01d7182
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_low_i2c.h
@@ -0,0 +1,103 @@
+/* 
+ *  include/asm-ppc/pmac_low_i2c.h
+ *
+ *  Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef __PMAC_LOW_I2C_H__
+#define __PMAC_LOW_I2C_H__
+#ifdef __KERNEL__
+
+/* i2c mode (based on the platform functions format) */
+enum {
+	pmac_i2c_mode_dumb	= 1,
+	pmac_i2c_mode_std	= 2,
+	pmac_i2c_mode_stdsub	= 3,
+	pmac_i2c_mode_combined	= 4,
+};
+
+/* RW bit in address */
+enum {
+	pmac_i2c_read		= 0x01,
+	pmac_i2c_write		= 0x00
+};
+
+/* i2c bus type */
+enum {
+	pmac_i2c_bus_keywest	= 0,
+	pmac_i2c_bus_pmu	= 1,
+	pmac_i2c_bus_smu	= 2,
+};
+
+/* i2c bus features */
+enum {
+	/* can_largesub : supports >1 byte subaddresses (SMU only) */
+	pmac_i2c_can_largesub	= 0x00000001u,
+
+	/* multibus : device node holds multiple busses, bus number is
+	 * encoded in bits 0xff00 of "reg" of a given device
+	 */
+	pmac_i2c_multibus	= 0x00000002u,
+};
+
+/* i2c busses in the system */
+struct pmac_i2c_bus;
+struct i2c_adapter;
+
+/* Init, called early during boot */
+extern int pmac_i2c_init(void);
+
+/* Lookup an i2c bus for a device-node. The node can be either the bus
+ * node itself or a device below it. In the case of a multibus, the bus
+ * node itself is the controller node, else, it's a child of the controller
+ * node
+ */
+extern struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node);
+
+/* Get the address for an i2c device. This strips the bus number if
+ * necessary. The 7 bits address is returned 1 bit right shifted so that the
+ * direction can be directly ored in
+ */
+extern u8 pmac_i2c_get_dev_addr(struct device_node *device);
+
+/* Get infos about a bus */
+extern struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus);
+extern struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_type(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_flags(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_channel(struct pmac_i2c_bus *bus);
+
+/* i2c layer adapter helpers */
+extern struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus);
+extern struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter);
+
+/* March a device or bus with an i2c adapter structure, to be used by drivers
+ * to match device-tree nodes with i2c adapters during adapter discovery
+ * callbacks
+ */
+extern int pmac_i2c_match_adapter(struct device_node *dev,
+				  struct i2c_adapter *adapter);
+
+
+/* (legacy) Locking functions exposed to i2c-keywest */
+extern int pmac_low_i2c_lock(struct device_node *np);
+extern int pmac_low_i2c_unlock(struct device_node *np);
+
+/* Access functions for platform code */
+extern int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled);
+extern void pmac_i2c_close(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode);
+extern int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+			 u32 subaddr, u8 *data,  int len);
+
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_i2c_suspend(void);
+extern void pmac_pfunc_i2c_resume(void);
+
+#endif /* __KERNEL__ */
+#endif /* __PMAC_LOW_I2C_H__ */
diff --git a/arch/powerpc/include/asm/pmac_pfunc.h b/arch/powerpc/include/asm/pmac_pfunc.h
new file mode 100644
index 0000000..cee4e9f
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_pfunc.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PMAC_PFUNC_H__
+#define __PMAC_PFUNC_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+/* Flags in command lists */
+#define PMF_FLAGS_ON_INIT		0x80000000u
+#define PMF_FLGAS_ON_TERM		0x40000000u
+#define PMF_FLAGS_ON_SLEEP		0x20000000u
+#define PMF_FLAGS_ON_WAKE		0x10000000u
+#define PMF_FLAGS_ON_DEMAND		0x08000000u
+#define PMF_FLAGS_INT_GEN		0x04000000u
+#define PMF_FLAGS_HIGH_SPEED		0x02000000u
+#define PMF_FLAGS_LOW_SPEED		0x01000000u
+#define PMF_FLAGS_SIDE_EFFECTS		0x00800000u
+
+/*
+ * Arguments to a platform function call.
+ *
+ * NOTE: By convention, pointer arguments point to an u32
+ */
+struct pmf_args {
+	union {
+		u32 v;
+		u32 *p;
+	} u[4];
+	unsigned int count;
+};
+
+/*
+ * A driver capable of interpreting commands provides a handlers
+ * structure filled with whatever handlers are implemented by this
+ * driver. Non implemented handlers are left NULL.
+ *
+ * PMF_STD_ARGS are the same arguments that are passed to the parser
+ * and that gets passed back to the various handlers.
+ *
+ * Interpreting a given function always start with a begin() call which
+ * returns an instance data to be passed around subsequent calls, and
+ * ends with an end() call. This allows the low level driver to implement
+ * locking policy or per-function instance data.
+ *
+ * For interrupt capable functions, irq_enable() is called when a client
+ * registers, and irq_disable() is called when the last client unregisters
+ * Note that irq_enable & irq_disable are called within a semaphore held
+ * by the core, thus you should not try to register yourself to some other
+ * pmf interrupt during those calls.
+ */
+
+#define PMF_STD_ARGS	struct pmf_function *func, void *instdata, \
+		        struct pmf_args *args
+
+struct pmf_function;
+
+struct pmf_handlers {
+	void * (*begin)(struct pmf_function *func, struct pmf_args *args);
+	void (*end)(struct pmf_function *func, void *instdata);
+
+	int (*irq_enable)(struct pmf_function *func);
+	int (*irq_disable)(struct pmf_function *func);
+
+	int (*write_gpio)(PMF_STD_ARGS, u8 value, u8 mask);
+	int (*read_gpio)(PMF_STD_ARGS, u8 mask, int rshift, u8 xor);
+
+	int (*write_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+	int (*read_reg32)(PMF_STD_ARGS, u32 offset);
+	int (*write_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+	int (*read_reg16)(PMF_STD_ARGS, u32 offset);
+	int (*write_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+	int (*read_reg8)(PMF_STD_ARGS, u32 offset);
+
+	int (*delay)(PMF_STD_ARGS, u32 duration);
+
+	int (*wait_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+	int (*wait_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+	int (*wait_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+
+	int (*read_i2c)(PMF_STD_ARGS, u32 len);
+	int (*write_i2c)(PMF_STD_ARGS, u32 len, const u8 *data);
+	int (*rmw_i2c)(PMF_STD_ARGS, u32 masklen, u32 valuelen, u32 totallen,
+		       const u8 *maskdata, const u8 *valuedata);
+
+	int (*read_cfg)(PMF_STD_ARGS, u32 offset, u32 len);
+	int (*write_cfg)(PMF_STD_ARGS, u32 offset, u32 len, const u8 *data);
+	int (*rmw_cfg)(PMF_STD_ARGS, u32 offset, u32 masklen, u32 valuelen,
+		       u32 totallen, const u8 *maskdata, const u8 *valuedata);
+
+	int (*read_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len);
+	int (*write_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len, const u8 *data);
+	int (*set_i2c_mode)(PMF_STD_ARGS, int mode);
+	int (*rmw_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 masklen, u32 valuelen,
+			   u32 totallen, const u8 *maskdata,
+			   const u8 *valuedata);
+
+	int (*read_reg32_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+			       u32 xor);
+	int (*read_reg16_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+			       u32 xor);
+	int (*read_reg8_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+			      u32 xor);
+
+	int (*write_reg32_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+	int (*write_reg16_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+	int (*write_reg8_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+
+	int (*mask_and_compare)(PMF_STD_ARGS, u32 len, const u8 *maskdata,
+				const u8 *valuedata);
+
+	struct module *owner;
+};
+
+
+/*
+ * Drivers who expose platform functions register at init time, this
+ * causes the platform functions for that device node to be parsed in
+ * advance and associated with the device. The data structures are
+ * partially public so a driver can walk the list of platform functions
+ * and eventually inspect the flags
+ */
+struct pmf_device;
+
+struct pmf_function {
+	/* All functions for a given driver are linked */
+	struct list_head	link;
+
+	/* Function node & driver data */
+	struct device_node	*node;
+	void			*driver_data;
+
+	/* For internal use by core */
+	struct pmf_device	*dev;
+
+	/* The name is the "xxx" in "platform-do-xxx", this is how
+	 * platform functions are identified by this code. Some functions
+	 * only operate for a given target, in which case the phandle is
+	 * here (or 0 if the filter doesn't apply)
+	 */
+	const char		*name;
+	u32			phandle;
+
+	/* The flags for that function. You can have several functions
+	 * with the same name and different flag
+	 */
+	u32			flags;
+
+	/* The actual tokenized function blob */
+	const void		*data;
+	unsigned int		length;
+
+	/* Interrupt clients */
+	struct list_head	irq_clients;
+
+	/* Refcounting */
+	struct kref		ref;
+};
+
+/*
+ * For platform functions that are interrupts, one can register
+ * irq_client structures. You canNOT use the same structure twice
+ * as it contains a link member. Also, the callback is called with
+ * a spinlock held, you must not call back into any of the pmf_* functions
+ * from within that callback
+ */
+struct pmf_irq_client {
+	void			(*handler)(void *data);
+	void			*data;
+	struct module		*owner;
+	struct list_head	link;
+	struct pmf_function	*func;
+};
+
+
+/*
+ * Register/Unregister a function-capable driver and its handlers
+ */
+extern int pmf_register_driver(struct device_node *np,
+			      struct pmf_handlers *handlers,
+			      void *driverdata);
+
+extern void pmf_unregister_driver(struct device_node *np);
+
+
+/*
+ * Register/Unregister interrupt clients
+ */
+extern int pmf_register_irq_client(struct device_node *np,
+				   const char *name,
+				   struct pmf_irq_client *client);
+
+extern void pmf_unregister_irq_client(struct pmf_irq_client *client);
+
+/*
+ * Called by the handlers when an irq happens
+ */
+extern void pmf_do_irq(struct pmf_function *func);
+
+
+/*
+ * Low level call to platform functions.
+ *
+ * The phandle can filter on the target object for functions that have
+ * multiple targets, the flags allow you to restrict the call to a given
+ * combination of flags.
+ *
+ * The args array contains as many arguments as is required by the function,
+ * this is dependent on the function you are calling, unfortunately Apple
+ * mechanism provides no way to encode that so you have to get it right at
+ * the call site. Some functions require no args, in which case, you can
+ * pass NULL.
+ *
+ * You can also pass NULL to the name. This will match any function that has
+ * the appropriate combination of flags & phandle or you can pass 0 to the
+ * phandle to match any
+ */
+extern int pmf_do_functions(struct device_node *np, const char *name,
+			    u32 phandle, u32 flags, struct pmf_args *args);
+
+
+
+/*
+ * High level call to a platform function.
+ *
+ * This one looks for the platform-xxx first so you should call it to the
+ * actual target if any. It will fallback to platform-do-xxx if it can't
+ * find one. It will also exclusively target functions that have
+ * the "OnDemand" flag.
+ */
+
+extern int pmf_call_function(struct device_node *target, const char *name,
+			     struct pmf_args *args);
+
+
+/*
+ * For low latency interrupt usage, you can lookup for on-demand functions
+ * using the functions below
+ */
+
+extern struct pmf_function *pmf_find_function(struct device_node *target,
+					      const char *name);
+
+extern struct pmf_function * pmf_get_function(struct pmf_function *func);
+extern void pmf_put_function(struct pmf_function *func);
+
+extern int pmf_call_one(struct pmf_function *func, struct pmf_args *args);
+
+int pmac_pfunc_base_install(void);
+
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_base_suspend(void);
+extern void pmac_pfunc_base_resume(void);
+
+#endif /* __PMAC_PFUNC_H__ */
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
new file mode 100644
index 0000000..7ac3586
--- /dev/null
+++ b/arch/powerpc/include/asm/pmc.h
@@ -0,0 +1,60 @@
+/*
+ * pmc.h
+ * Copyright (C) 2004  David Gibson, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+#ifndef _POWERPC_PMC_H
+#define _POWERPC_PMC_H
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+typedef void (*perf_irq_t)(struct pt_regs *);
+extern perf_irq_t perf_irq;
+
+int reserve_pmc_hardware(perf_irq_t new_perf_irq);
+void release_pmc_hardware(void);
+void ppc_enable_pmcs(void);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/lppaca.h>
+#include <asm/firmware.h>
+
+static inline void ppc_set_pmu_inuse(int inuse)
+{
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+	if (firmware_has_feature(FW_FEATURE_LPAR)) {
+#ifdef CONFIG_PPC_PSERIES
+		get_lppaca()->pmcregs_in_use = inuse;
+#endif
+	} else {
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+		get_paca()->pmcregs_in_use = inuse;
+#endif
+	}
+#endif
+}
+
+extern void power4_enable_pmcs(void);
+
+#else /* CONFIG_PPC64 */
+
+static inline void ppc_set_pmu_inuse(int inuse) { }
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PMC_H */
diff --git a/arch/powerpc/include/asm/pmi.h b/arch/powerpc/include/asm/pmi.h
new file mode 100644
index 0000000..b4e91fb
--- /dev/null
+++ b/arch/powerpc/include/asm/pmi.h
@@ -0,0 +1,66 @@
+#ifndef _POWERPC_PMI_H
+#define _POWERPC_PMI_H
+
+/*
+ * Definitions for talking with PMI device on PowerPC
+ *
+ * PMI (Platform Management Interrupt) is a way to communicate
+ * with the BMC (Baseboard Management Controller) via interrupts.
+ * Unlike IPMI it is bidirectional and has a low latency.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef __KERNEL__
+
+#define PMI_TYPE_FREQ_CHANGE	0x01
+#define PMI_TYPE_POWER_BUTTON	0x02
+#define PMI_READ_TYPE		0
+#define PMI_READ_DATA0		1
+#define PMI_READ_DATA1		2
+#define PMI_READ_DATA2		3
+#define PMI_WRITE_TYPE		4
+#define PMI_WRITE_DATA0		5
+#define PMI_WRITE_DATA1		6
+#define PMI_WRITE_DATA2		7
+
+#define PMI_ACK			0x80
+
+#define PMI_TIMEOUT		100
+
+typedef struct {
+	u8	type;
+	u8	data0;
+	u8	data1;
+	u8	data2;
+} pmi_message_t;
+
+struct pmi_handler {
+	struct list_head node;
+	u8 type;
+	void (*handle_pmi_message) (pmi_message_t);
+};
+
+int pmi_register_handler(struct pmi_handler *);
+void pmi_unregister_handler(struct pmi_handler *);
+
+int pmi_send_message(pmi_message_t);
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PMI_H */
diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h
new file mode 100644
index 0000000..208b550
--- /dev/null
+++ b/arch/powerpc/include/asm/pnv-ocxl.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2017 IBM Corp.
+#ifndef _ASM_PNV_OCXL_H
+#define _ASM_PNV_OCXL_H
+
+#include <linux/pci.h>
+
+#define PNV_OCXL_TL_MAX_TEMPLATE        63
+#define PNV_OCXL_TL_BITS_PER_RATE       4
+#define PNV_OCXL_TL_RATE_BUF_SIZE       ((PNV_OCXL_TL_MAX_TEMPLATE+1) * PNV_OCXL_TL_BITS_PER_RATE / 8)
+
+extern int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled,
+			u16 *supported);
+extern int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count);
+
+extern int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap,
+			char *rate_buf, int rate_buf_size);
+extern int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap,
+			uint64_t rate_buf_phys, int rate_buf_size);
+
+extern int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq);
+extern void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar,
+				void __iomem *tfc, void __iomem *pe_handle);
+extern int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr,
+				void __iomem **dar, void __iomem **tfc,
+				void __iomem **pe_handle);
+
+extern int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask,
+			void **platform_data);
+extern void pnv_ocxl_spa_release(void *platform_data);
+extern int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle);
+
+extern int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr);
+extern void pnv_ocxl_free_xive_irq(u32 irq);
+
+#endif /* _ASM_PNV_OCXL_H */
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
new file mode 100644
index 0000000..7f627e3
--- /dev/null
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PNV_PCI_H
+#define _ASM_PNV_PCI_H
+
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/irq.h>
+#include <misc/cxl-base.h>
+#include <asm/opal-api.h>
+
+#define PCI_SLOT_ID_PREFIX	(1UL << 63)
+#define PCI_SLOT_ID(phb_id, bdfn)	\
+	(PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
+
+extern int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id);
+extern int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len);
+extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
+				   struct opal_msg *msg);
+extern int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target,
+			   u64 desc);
+
+extern int pnv_pci_enable_tunnel(struct pci_dev *dev, uint64_t *asnind);
+extern int pnv_pci_disable_tunnel(struct pci_dev *dev);
+extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr,
+				  int enable);
+extern int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid,
+				      u32 *pid, u32 *tid);
+int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
+int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+			   unsigned int virq);
+int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
+void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
+int pnv_cxl_get_irq_count(struct pci_dev *dev);
+struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
+int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq);
+bool is_pnv_opal_msi(struct irq_chip *chip);
+
+#ifdef CONFIG_CXL_BASE
+int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+			       struct pci_dev *dev, int num);
+void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
+				  struct pci_dev *dev);
+#endif
+
+struct pnv_php_slot {
+	struct hotplug_slot		slot;
+	struct hotplug_slot_info	slot_info;
+	uint64_t			id;
+	char				*name;
+	int				slot_no;
+	unsigned int			flags;
+#define PNV_PHP_FLAG_BROKEN_PDC		0x1
+	struct kref			kref;
+#define PNV_PHP_STATE_INITIALIZED	0
+#define PNV_PHP_STATE_REGISTERED	1
+#define PNV_PHP_STATE_POPULATED		2
+#define PNV_PHP_STATE_OFFLINE		3
+	int				state;
+	int				irq;
+	struct workqueue_struct		*wq;
+	struct device_node		*dn;
+	struct pci_dev			*pdev;
+	struct pci_bus			*bus;
+	bool				power_state_check;
+	void				*fdt;
+	void				*dt;
+	struct of_changeset		ocs;
+	struct pnv_php_slot		*parent;
+	struct list_head		children;
+	struct list_head		link;
+};
+extern struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn);
+extern int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
+					uint8_t state);
+
+#endif
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
new file mode 100644
index 0000000..2f3ff7a
--- /dev/null
+++ b/arch/powerpc/include/asm/powernv.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERNV_H
+#define _ASM_POWERNV_H
+
+#ifdef CONFIG_PPC_POWERNV
+#define NPU2_WRITE 1
+extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
+extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
+			unsigned long flags,
+			void (*cb)(struct npu_context *, void *),
+			void *priv);
+extern void pnv_npu2_destroy_context(struct npu_context *context,
+				struct pci_dev *gpdev);
+extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
+				unsigned long *flags, unsigned long *status,
+				int count);
+
+void pnv_tm_init(void);
+#else
+static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
+static inline struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
+			unsigned long flags,
+			struct npu_context *(*cb)(struct npu_context *, void *),
+			void *priv) { return ERR_PTR(-ENODEV); }
+static inline void pnv_npu2_destroy_context(struct npu_context *context,
+					struct pci_dev *gpdev) { }
+
+static inline int pnv_npu2_handle_fault(struct npu_context *context,
+					uintptr_t *ea, unsigned long *flags,
+					unsigned long *status, int count) {
+	return -ENODEV;
+}
+
+static inline void pnv_tm_init(void) { }
+static inline void pnv_power9_force_smt4(void) { }
+#endif
+
+#endif /* _ASM_POWERNV_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
new file mode 100644
index 0000000..665af14
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -0,0 +1,581 @@
+/*
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * provides masks and opcode images for use by code generation, emulation
+ * and for instructions that older assemblers might not know about
+ */
+#ifndef _ASM_POWERPC_PPC_OPCODE_H
+#define _ASM_POWERPC_PPC_OPCODE_H
+
+#include <asm/asm-const.h>
+
+#define	__REG_R0	0
+#define	__REG_R1	1
+#define	__REG_R2	2
+#define	__REG_R3	3
+#define	__REG_R4	4
+#define	__REG_R5	5
+#define	__REG_R6	6
+#define	__REG_R7	7
+#define	__REG_R8	8
+#define	__REG_R9	9
+#define	__REG_R10	10
+#define	__REG_R11	11
+#define	__REG_R12	12
+#define	__REG_R13	13
+#define	__REG_R14	14
+#define	__REG_R15	15
+#define	__REG_R16	16
+#define	__REG_R17	17
+#define	__REG_R18	18
+#define	__REG_R19	19
+#define	__REG_R20	20
+#define	__REG_R21	21
+#define	__REG_R22	22
+#define	__REG_R23	23
+#define	__REG_R24	24
+#define	__REG_R25	25
+#define	__REG_R26	26
+#define	__REG_R27	27
+#define	__REG_R28	28
+#define	__REG_R29	29
+#define	__REG_R30	30
+#define	__REG_R31	31
+
+#define	__REGA0_0	0
+#define	__REGA0_R1	1
+#define	__REGA0_R2	2
+#define	__REGA0_R3	3
+#define	__REGA0_R4	4
+#define	__REGA0_R5	5
+#define	__REGA0_R6	6
+#define	__REGA0_R7	7
+#define	__REGA0_R8	8
+#define	__REGA0_R9	9
+#define	__REGA0_R10	10
+#define	__REGA0_R11	11
+#define	__REGA0_R12	12
+#define	__REGA0_R13	13
+#define	__REGA0_R14	14
+#define	__REGA0_R15	15
+#define	__REGA0_R16	16
+#define	__REGA0_R17	17
+#define	__REGA0_R18	18
+#define	__REGA0_R19	19
+#define	__REGA0_R20	20
+#define	__REGA0_R21	21
+#define	__REGA0_R22	22
+#define	__REGA0_R23	23
+#define	__REGA0_R24	24
+#define	__REGA0_R25	25
+#define	__REGA0_R26	26
+#define	__REGA0_R27	27
+#define	__REGA0_R28	28
+#define	__REGA0_R29	29
+#define	__REGA0_R30	30
+#define	__REGA0_R31	31
+
+/* opcode and xopcode for instructions */
+#define OP_TRAP 3
+#define OP_TRAP_64 2
+
+#define OP_31_XOP_TRAP      4
+#define OP_31_XOP_LDX       21
+#define OP_31_XOP_LWZX      23
+#define OP_31_XOP_LDUX      53
+#define OP_31_XOP_DCBST     54
+#define OP_31_XOP_LWZUX     55
+#define OP_31_XOP_TRAP_64   68
+#define OP_31_XOP_DCBF      86
+#define OP_31_XOP_LBZX      87
+#define OP_31_XOP_STDX      149
+#define OP_31_XOP_STWX      151
+#define OP_31_XOP_STDUX     181
+#define OP_31_XOP_STWUX     183
+#define OP_31_XOP_STBX      215
+#define OP_31_XOP_LBZUX     119
+#define OP_31_XOP_STBUX     247
+#define OP_31_XOP_LHZX      279
+#define OP_31_XOP_LHZUX     311
+#define OP_31_XOP_MSGSNDP   142
+#define OP_31_XOP_MSGCLRP   174
+#define OP_31_XOP_MFSPR     339
+#define OP_31_XOP_LWAX      341
+#define OP_31_XOP_LHAX      343
+#define OP_31_XOP_LWAUX     373
+#define OP_31_XOP_LHAUX     375
+#define OP_31_XOP_STHX      407
+#define OP_31_XOP_STHUX     439
+#define OP_31_XOP_MTSPR     467
+#define OP_31_XOP_DCBI      470
+#define OP_31_XOP_LDBRX     532
+#define OP_31_XOP_LWBRX     534
+#define OP_31_XOP_TLBSYNC   566
+#define OP_31_XOP_STDBRX    660
+#define OP_31_XOP_STWBRX    662
+#define OP_31_XOP_STFSX	    663
+#define OP_31_XOP_STFSUX    695
+#define OP_31_XOP_STFDX     727
+#define OP_31_XOP_STFDUX    759
+#define OP_31_XOP_LHBRX     790
+#define OP_31_XOP_LFIWAX    855
+#define OP_31_XOP_LFIWZX    887
+#define OP_31_XOP_STHBRX    918
+#define OP_31_XOP_STFIWX    983
+
+/* VSX Scalar Load Instructions */
+#define OP_31_XOP_LXSDX         588
+#define OP_31_XOP_LXSSPX        524
+#define OP_31_XOP_LXSIWAX       76
+#define OP_31_XOP_LXSIWZX       12
+
+/* VSX Scalar Store Instructions */
+#define OP_31_XOP_STXSDX        716
+#define OP_31_XOP_STXSSPX       652
+#define OP_31_XOP_STXSIWX       140
+
+/* VSX Vector Load Instructions */
+#define OP_31_XOP_LXVD2X        844
+#define OP_31_XOP_LXVW4X        780
+
+/* VSX Vector Load and Splat Instruction */
+#define OP_31_XOP_LXVDSX        332
+
+/* VSX Vector Store Instructions */
+#define OP_31_XOP_STXVD2X       972
+#define OP_31_XOP_STXVW4X       908
+
+#define OP_31_XOP_LFSX          535
+#define OP_31_XOP_LFSUX         567
+#define OP_31_XOP_LFDX          599
+#define OP_31_XOP_LFDUX		631
+
+/* VMX Vector Load Instructions */
+#define OP_31_XOP_LVX           103
+
+/* VMX Vector Store Instructions */
+#define OP_31_XOP_STVX          231
+
+#define OP_31   31
+#define OP_LWZ  32
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
+#define OP_LD   58
+#define OP_LWZU 33
+#define OP_LBZ  34
+#define OP_LBZU 35
+#define OP_STW  36
+#define OP_STWU 37
+#define OP_STD  62
+#define OP_STB  38
+#define OP_STBU 39
+#define OP_LHZ  40
+#define OP_LHZU 41
+#define OP_LHA  42
+#define OP_LHAU 43
+#define OP_STH  44
+#define OP_STHU 45
+#define OP_LMW  46
+#define OP_STMW 47
+#define OP_LFS  48
+#define OP_LFSU 49
+#define OP_LFD  50
+#define OP_LFDU 51
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD  54
+#define OP_STFDU 55
+#define OP_LQ    56
+
+/* sorted alphabetically */
+#define PPC_INST_BHRBE			0x7c00025c
+#define PPC_INST_CLRBHRB		0x7c00035c
+#define PPC_INST_COPY			0x7c20060c
+#define PPC_INST_CP_ABORT		0x7c00068c
+#define PPC_INST_DARN			0x7c0005e6
+#define PPC_INST_DCBA			0x7c0005ec
+#define PPC_INST_DCBA_MASK		0xfc0007fe
+#define PPC_INST_DCBAL			0x7c2005ec
+#define PPC_INST_DCBZL			0x7c2007ec
+#define PPC_INST_ICBT			0x7c00002c
+#define PPC_INST_ICSWX			0x7c00032d
+#define PPC_INST_ICSWEPX		0x7c00076d
+#define PPC_INST_ISEL			0x7c00001e
+#define PPC_INST_ISEL_MASK		0xfc00003e
+#define PPC_INST_LDARX			0x7c0000a8
+#define PPC_INST_STDCX			0x7c0001ad
+#define PPC_INST_LQARX			0x7c000228
+#define PPC_INST_STQCX			0x7c00016d
+#define PPC_INST_LSWI			0x7c0004aa
+#define PPC_INST_LSWX			0x7c00042a
+#define PPC_INST_LWARX			0x7c000028
+#define PPC_INST_STWCX			0x7c00012d
+#define PPC_INST_LWSYNC			0x7c2004ac
+#define PPC_INST_SYNC			0x7c0004ac
+#define PPC_INST_SYNC_MASK		0xfc0007fe
+#define PPC_INST_ISYNC			0x4c00012c
+#define PPC_INST_LXVD2X			0x7c000698
+#define PPC_INST_MCRXR			0x7c000400
+#define PPC_INST_MCRXR_MASK		0xfc0007fe
+#define PPC_INST_MFSPR_PVR		0x7c1f42a6
+#define PPC_INST_MFSPR_PVR_MASK		0xfc1ffffe
+#define PPC_INST_MFTMR			0x7c0002dc
+#define PPC_INST_MSGSND			0x7c00019c
+#define PPC_INST_MSGCLR			0x7c0001dc
+#define PPC_INST_MSGSYNC		0x7c0006ec
+#define PPC_INST_MSGSNDP		0x7c00011c
+#define PPC_INST_MSGCLRP		0x7c00015c
+#define PPC_INST_MTMSRD			0x7c000164
+#define PPC_INST_MTTMR			0x7c0003dc
+#define PPC_INST_NOP			0x60000000
+#define PPC_INST_PASTE			0x7c20070d
+#define PPC_INST_POPCNTB		0x7c0000f4
+#define PPC_INST_POPCNTB_MASK		0xfc0007fe
+#define PPC_INST_POPCNTD		0x7c0003f4
+#define PPC_INST_POPCNTW		0x7c0002f4
+#define PPC_INST_RFEBB			0x4c000124
+#define PPC_INST_RFCI			0x4c000066
+#define PPC_INST_RFDI			0x4c00004e
+#define PPC_INST_RFID			0x4c000024
+#define PPC_INST_RFMCI			0x4c00004c
+#define PPC_INST_MFSPR			0x7c0002a6
+#define PPC_INST_MFSPR_DSCR		0x7c1102a6
+#define PPC_INST_MFSPR_DSCR_MASK	0xfc1ffffe
+#define PPC_INST_MTSPR_DSCR		0x7c1103a6
+#define PPC_INST_MTSPR_DSCR_MASK	0xfc1ffffe
+#define PPC_INST_MFSPR_DSCR_USER	0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK	0xfc1ffffe
+#define PPC_INST_MTSPR_DSCR_USER	0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK	0xfc1ffffe
+#define PPC_INST_MFVSRD			0x7c000066
+#define PPC_INST_MTVSRD			0x7c000166
+#define PPC_INST_SLBFEE			0x7c0007a7
+#define PPC_INST_SLBIA			0x7c0003e4
+
+#define PPC_INST_STRING			0x7c00042a
+#define PPC_INST_STRING_MASK		0xfc0007fe
+#define PPC_INST_STRING_GEN_MASK	0xfc00067e
+
+#define PPC_INST_STSWI			0x7c0005aa
+#define PPC_INST_STSWX			0x7c00052a
+#define PPC_INST_STXVD2X		0x7c000798
+#define PPC_INST_TLBIE			0x7c000264
+#define PPC_INST_TLBIEL			0x7c000224
+#define PPC_INST_TLBILX			0x7c000024
+#define PPC_INST_WAIT			0x7c00007c
+#define PPC_INST_TLBIVAX		0x7c000624
+#define PPC_INST_TLBSRX_DOT		0x7c0006a5
+#define PPC_INST_VPMSUMW		0x10000488
+#define PPC_INST_VPMSUMD		0x100004c8
+#define PPC_INST_VPERMXOR		0x1000002d
+#define PPC_INST_XXLOR			0xf0000490
+#define PPC_INST_XXSWAPD		0xf0000250
+#define PPC_INST_XVCPSGNDP		0xf0000780
+#define PPC_INST_TRECHKPT		0x7c0007dd
+#define PPC_INST_TRECLAIM		0x7c00075d
+#define PPC_INST_TABORT			0x7c00071d
+#define PPC_INST_TSR			0x7c0005dd
+
+#define PPC_INST_NAP			0x4c000364
+#define PPC_INST_SLEEP			0x4c0003a4
+#define PPC_INST_WINKLE			0x4c0003e4
+
+#define PPC_INST_STOP			0x4c0002e4
+
+/* A2 specific instructions */
+#define PPC_INST_ERATWE			0x7c0001a6
+#define PPC_INST_ERATRE			0x7c000166
+#define PPC_INST_ERATILX		0x7c000066
+#define PPC_INST_ERATIVAX		0x7c000666
+#define PPC_INST_ERATSX			0x7c000126
+#define PPC_INST_ERATSX_DOT		0x7c000127
+
+/* Misc instructions for BPF compiler */
+#define PPC_INST_LBZ			0x88000000
+#define PPC_INST_LD			0xe8000000
+#define PPC_INST_LHZ			0xa0000000
+#define PPC_INST_LWZ			0x80000000
+#define PPC_INST_LHBRX			0x7c00062c
+#define PPC_INST_LDBRX			0x7c000428
+#define PPC_INST_STB			0x98000000
+#define PPC_INST_STH			0xb0000000
+#define PPC_INST_STD			0xf8000000
+#define PPC_INST_STDU			0xf8000001
+#define PPC_INST_STW			0x90000000
+#define PPC_INST_STWU			0x94000000
+#define PPC_INST_MFLR			0x7c0802a6
+#define PPC_INST_MTLR			0x7c0803a6
+#define PPC_INST_MTCTR			0x7c0903a6
+#define PPC_INST_CMPWI			0x2c000000
+#define PPC_INST_CMPDI			0x2c200000
+#define PPC_INST_CMPW			0x7c000000
+#define PPC_INST_CMPD			0x7c200000
+#define PPC_INST_CMPLW			0x7c000040
+#define PPC_INST_CMPLD			0x7c200040
+#define PPC_INST_CMPLWI			0x28000000
+#define PPC_INST_CMPLDI			0x28200000
+#define PPC_INST_ADDI			0x38000000
+#define PPC_INST_ADDIS			0x3c000000
+#define PPC_INST_ADD			0x7c000214
+#define PPC_INST_SUB			0x7c000050
+#define PPC_INST_BLR			0x4e800020
+#define PPC_INST_BLRL			0x4e800021
+#define PPC_INST_BCTR			0x4e800420
+#define PPC_INST_MULLD			0x7c0001d2
+#define PPC_INST_MULLW			0x7c0001d6
+#define PPC_INST_MULHWU			0x7c000016
+#define PPC_INST_MULLI			0x1c000000
+#define PPC_INST_DIVWU			0x7c000396
+#define PPC_INST_DIVD			0x7c0003d2
+#define PPC_INST_RLWINM			0x54000000
+#define PPC_INST_RLWIMI			0x50000000
+#define PPC_INST_RLDICL			0x78000000
+#define PPC_INST_RLDICR			0x78000004
+#define PPC_INST_SLW			0x7c000030
+#define PPC_INST_SLD			0x7c000036
+#define PPC_INST_SRW			0x7c000430
+#define PPC_INST_SRD			0x7c000436
+#define PPC_INST_SRAD			0x7c000634
+#define PPC_INST_SRADI			0x7c000674
+#define PPC_INST_AND			0x7c000038
+#define PPC_INST_ANDDOT			0x7c000039
+#define PPC_INST_OR			0x7c000378
+#define PPC_INST_XOR			0x7c000278
+#define PPC_INST_ANDI			0x70000000
+#define PPC_INST_ORI			0x60000000
+#define PPC_INST_ORIS			0x64000000
+#define PPC_INST_XORI			0x68000000
+#define PPC_INST_XORIS			0x6c000000
+#define PPC_INST_NEG			0x7c0000d0
+#define PPC_INST_EXTSW			0x7c0007b4
+#define PPC_INST_BRANCH			0x48000000
+#define PPC_INST_BRANCH_COND		0x40800000
+#define PPC_INST_LBZCIX			0x7c0006aa
+#define PPC_INST_STBCIX			0x7c0007aa
+#define PPC_INST_LWZX			0x7c00002e
+#define PPC_INST_LFSX			0x7c00042e
+#define PPC_INST_STFSX			0x7c00052e
+#define PPC_INST_LFDX			0x7c0004ae
+#define PPC_INST_STFDX			0x7c0005ae
+#define PPC_INST_LVX			0x7c0000ce
+#define PPC_INST_STVX			0x7c0001ce
+#define PPC_INST_VCMPEQUD		0x100000c7
+#define PPC_INST_VCMPEQUB		0x10000006
+
+/* macros to insert fields into opcodes */
+#define ___PPC_RA(a)	(((a) & 0x1f) << 16)
+#define ___PPC_RB(b)	(((b) & 0x1f) << 11)
+#define ___PPC_RS(s)	(((s) & 0x1f) << 21)
+#define ___PPC_RT(t)	___PPC_RS(t)
+#define ___PPC_R(r)	(((r) & 0x1) << 16)
+#define ___PPC_PRS(prs)	(((prs) & 0x1) << 17)
+#define ___PPC_RIC(ric)	(((ric) & 0x3) << 18)
+#define __PPC_RA(a)	___PPC_RA(__REG_##a)
+#define __PPC_RA0(a)	___PPC_RA(__REGA0_##a)
+#define __PPC_RB(b)	___PPC_RB(__REG_##b)
+#define __PPC_RS(s)	___PPC_RS(__REG_##s)
+#define __PPC_RT(t)	___PPC_RT(__REG_##t)
+#define __PPC_XA(a)	((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
+#define __PPC_XB(b)	((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
+#define __PPC_XS(s)	((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
+#define __PPC_XT(s)	__PPC_XS(s)
+#define __PPC_T_TLB(t)	(((t) & 0x3) << 21)
+#define __PPC_WC(w)	(((w) & 0x3) << 21)
+#define __PPC_WS(w)	(((w) & 0x1f) << 11)
+#define __PPC_SH(s)	__PPC_WS(s)
+#define __PPC_SH64(s)	(__PPC_SH(s) | (((s) & 0x20) >> 4))
+#define __PPC_MB(s)	(((s) & 0x1f) << 6)
+#define __PPC_ME(s)	(((s) & 0x1f) << 1)
+#define __PPC_MB64(s)	(__PPC_MB(s) | ((s) & 0x20))
+#define __PPC_ME64(s)	__PPC_MB64(s)
+#define __PPC_BI(s)	(((s) & 0x1f) << 16)
+#define __PPC_CT(t)	(((t) & 0x0f) << 21)
+#define __PPC_SPR(r)	((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
+#define __PPC_RC21	(0x1 << 10)
+
+/*
+ * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
+ * larx with EH set as an illegal instruction.
+ */
+#ifdef CONFIG_PPC64
+#define __PPC_EH(eh)	(((eh) & 0x1) << 0)
+#else
+#define __PPC_EH(eh)	0
+#endif
+
+/* Deal with instructions that older assemblers aren't aware of */
+#define	PPC_CP_ABORT		stringify_in_c(.long PPC_INST_CP_ABORT)
+#define	PPC_COPY(a, b)		stringify_in_c(.long PPC_INST_COPY | \
+					___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_DARN(t, l)		stringify_in_c(.long PPC_INST_DARN |  \
+						___PPC_RT(t)	   |  \
+						(((l) & 0x3) << 16))
+#define	PPC_DCBAL(a, b)		stringify_in_c(.long PPC_INST_DCBAL | \
+					__PPC_RA(a) | __PPC_RB(b))
+#define	PPC_DCBZL(a, b)		stringify_in_c(.long PPC_INST_DCBZL | \
+					__PPC_RA(a) | __PPC_RB(b))
+#define PPC_LQARX(t, a, b, eh)	stringify_in_c(.long PPC_INST_LQARX | \
+					___PPC_RT(t) | ___PPC_RA(a) | \
+					___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_LDARX(t, a, b, eh)	stringify_in_c(.long PPC_INST_LDARX | \
+					___PPC_RT(t) | ___PPC_RA(a) | \
+					___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_LWARX(t, a, b, eh)	stringify_in_c(.long PPC_INST_LWARX | \
+					___PPC_RT(t) | ___PPC_RA(a) | \
+					___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_STQCX(t, a, b)	stringify_in_c(.long PPC_INST_STQCX | \
+					___PPC_RT(t) | ___PPC_RA(a) | \
+					___PPC_RB(b))
+#define PPC_MSGSND(b)		stringify_in_c(.long PPC_INST_MSGSND | \
+					___PPC_RB(b))
+#define PPC_MSGSYNC		stringify_in_c(.long PPC_INST_MSGSYNC)
+#define PPC_MSGCLR(b)		stringify_in_c(.long PPC_INST_MSGCLR | \
+					___PPC_RB(b))
+#define PPC_MSGSNDP(b)		stringify_in_c(.long PPC_INST_MSGSNDP | \
+					___PPC_RB(b))
+#define PPC_MSGCLRP(b)		stringify_in_c(.long PPC_INST_MSGCLRP | \
+					___PPC_RB(b))
+#define PPC_PASTE(a, b)		stringify_in_c(.long PPC_INST_PASTE | \
+					___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_POPCNTB(a, s)	stringify_in_c(.long PPC_INST_POPCNTB | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTD(a, s)	stringify_in_c(.long PPC_INST_POPCNTD | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTW(a, s)	stringify_in_c(.long PPC_INST_POPCNTW | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_RFCI		stringify_in_c(.long PPC_INST_RFCI)
+#define PPC_RFDI		stringify_in_c(.long PPC_INST_RFDI)
+#define PPC_RFMCI		stringify_in_c(.long PPC_INST_RFMCI)
+#define PPC_TLBILX(t, a, b)	stringify_in_c(.long PPC_INST_TLBILX | \
+					__PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_TLBILX_ALL(a, b)	PPC_TLBILX(0, a, b)
+#define PPC_TLBILX_PID(a, b)	PPC_TLBILX(1, a, b)
+#define PPC_TLBILX_VA(a, b)	PPC_TLBILX(3, a, b)
+#define PPC_WAIT(w)		stringify_in_c(.long PPC_INST_WAIT | \
+					__PPC_WC(w))
+#define PPC_TLBIE(lp,a) 	stringify_in_c(.long PPC_INST_TLBIE | \
+					       ___PPC_RB(a) | ___PPC_RS(lp))
+#define	PPC_TLBIE_5(rb,rs,ric,prs,r) \
+				stringify_in_c(.long PPC_INST_TLBIE | \
+					___PPC_RB(rb) | ___PPC_RS(rs) | \
+					___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+					___PPC_R(r))
+#define	PPC_TLBIEL(rb,rs,ric,prs,r) \
+				stringify_in_c(.long PPC_INST_TLBIEL | \
+					___PPC_RB(rb) | ___PPC_RS(rs) | \
+					___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+					___PPC_R(r))
+#define PPC_TLBSRX_DOT(a,b)	stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
+					__PPC_RA0(a) | __PPC_RB(b))
+#define PPC_TLBIVAX(a,b)	stringify_in_c(.long PPC_INST_TLBIVAX | \
+					__PPC_RA0(a) | __PPC_RB(b))
+
+#define PPC_ERATWE(s, a, w)	stringify_in_c(.long PPC_INST_ERATWE | \
+					__PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_ERATRE(s, a, w)	stringify_in_c(.long PPC_INST_ERATRE | \
+					__PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_ERATILX(t, a, b)	stringify_in_c(.long PPC_INST_ERATILX | \
+					__PPC_T_TLB(t) | __PPC_RA0(a) | \
+					__PPC_RB(b))
+#define PPC_ERATIVAX(s, a, b)	stringify_in_c(.long PPC_INST_ERATIVAX | \
+					__PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_ERATSX(t, a, w)	stringify_in_c(.long PPC_INST_ERATSX | \
+					__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_ERATSX_DOT(t, a, w)	stringify_in_c(.long PPC_INST_ERATSX_DOT | \
+					__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_SLBFEE_DOT(t, b)	stringify_in_c(.long PPC_INST_SLBFEE | \
+					__PPC_RT(t) | __PPC_RB(b))
+#define PPC_ICBT(c,a,b)		stringify_in_c(.long PPC_INST_ICBT | \
+				       __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
+/* PASemi instructions */
+#define LBZCIX(t,a,b)		stringify_in_c(.long PPC_INST_LBZCIX | \
+				       __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
+#define STBCIX(s,a,b)		stringify_in_c(.long PPC_INST_STBCIX | \
+				       __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
+
+/*
+ * Define what the VSX XX1 form instructions will look like, then add
+ * the 128 bit load store instructions based on that.
+ */
+#define VSX_XX1(s, a, b)	(__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
+#define VSX_XX3(t, a, b)	(__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
+#define STXVD2X(s, a, b)	stringify_in_c(.long PPC_INST_STXVD2X | \
+					       VSX_XX1((s), a, b))
+#define LXVD2X(s, a, b)		stringify_in_c(.long PPC_INST_LXVD2X | \
+					       VSX_XX1((s), a, b))
+#define MFVRD(a, t)		stringify_in_c(.long PPC_INST_MFVSRD | \
+					       VSX_XX1((t)+32, a, R0))
+#define MTVRD(t, a)		stringify_in_c(.long PPC_INST_MTVSRD | \
+					       VSX_XX1((t)+32, a, R0))
+#define VPMSUMW(t, a, b)	stringify_in_c(.long PPC_INST_VPMSUMW | \
+					       VSX_XX3((t), a, b))
+#define VPMSUMD(t, a, b)	stringify_in_c(.long PPC_INST_VPMSUMD | \
+					       VSX_XX3((t), a, b))
+#define XXLOR(t, a, b)		stringify_in_c(.long PPC_INST_XXLOR | \
+					       VSX_XX3((t), a, b))
+#define XXSWAPD(t, a)		stringify_in_c(.long PPC_INST_XXSWAPD | \
+					       VSX_XX3((t), a, a))
+#define XVCPSGNDP(t, a, b)	stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
+					       VSX_XX3((t), (a), (b))))
+
+#define VPERMXOR(vrt, vra, vrb, vrc)				\
+	stringify_in_c(.long (PPC_INST_VPERMXOR |		\
+			      ___PPC_RT(vrt) | ___PPC_RA(vra) | \
+			      ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6)))
+
+#define PPC_NAP			stringify_in_c(.long PPC_INST_NAP)
+#define PPC_SLEEP		stringify_in_c(.long PPC_INST_SLEEP)
+#define PPC_WINKLE		stringify_in_c(.long PPC_INST_WINKLE)
+
+#define PPC_STOP		stringify_in_c(.long PPC_INST_STOP)
+
+/* BHRB instructions */
+#define PPC_CLRBHRB		stringify_in_c(.long PPC_INST_CLRBHRB)
+#define PPC_MFBHRBE(r, n)	stringify_in_c(.long PPC_INST_BHRBE | \
+						__PPC_RT(r) | \
+							(((n) & 0x3ff) << 11))
+
+/* Transactional memory instructions */
+#define TRECHKPT		stringify_in_c(.long PPC_INST_TRECHKPT)
+#define TRECLAIM(r)		stringify_in_c(.long PPC_INST_TRECLAIM \
+					       | __PPC_RA(r))
+#define TABORT(r)		stringify_in_c(.long PPC_INST_TABORT \
+					       | __PPC_RA(r))
+
+/* book3e thread control instructions */
+#define TMRN(x)			((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6))
+#define MTTMR(tmr, r)		stringify_in_c(.long PPC_INST_MTTMR | \
+					       TMRN(tmr) | ___PPC_RS(r))
+#define MFTMR(tmr, r)		stringify_in_c(.long PPC_INST_MFTMR | \
+					       TMRN(tmr) | ___PPC_RT(r))
+
+/* Coprocessor instructions */
+#define PPC_ICSWX(s, a, b)	stringify_in_c(.long PPC_INST_ICSWX |	\
+					       ___PPC_RS(s) |		\
+					       ___PPC_RA(a) |		\
+					       ___PPC_RB(b))
+#define PPC_ICSWEPX(s, a, b)	stringify_in_c(.long PPC_INST_ICSWEPX | \
+					       ___PPC_RS(s) |		\
+					       ___PPC_RA(a) |		\
+					       ___PPC_RB(b))
+
+#define PPC_SLBIA(IH)	stringify_in_c(.long PPC_INST_SLBIA | \
+				       ((IH & 0x7) << 21))
+#define PPC_INVALIDATE_ERAT	PPC_SLBIA(7)
+
+#define VCMPEQUD_RC(vrt, vra, vrb)	stringify_in_c(.long PPC_INST_VCMPEQUD | \
+			      ___PPC_RT(vrt) | ___PPC_RA(vra) | \
+			      ___PPC_RB(vrb) | __PPC_RC21)
+
+#define VCMPEQUB_RC(vrt, vra, vrb)	stringify_in_c(.long PPC_INST_VCMPEQUB | \
+			      ___PPC_RT(vrt) | ___PPC_RA(vra) | \
+			      ___PPC_RB(vrb) | __PPC_RC21)
+
+#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
new file mode 100644
index 0000000..7262880
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -0,0 +1,85 @@
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_PPC_PCI_H
+#define _ASM_POWERPC_PPC_PCI_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+#include <asm/pci-bridge.h>
+
+extern unsigned long isa_io_base;
+
+extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
+extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
+
+
+extern struct list_head hose_list;
+
+extern struct pci_dev *isa_bridge_pcidev;	/* may be NULL if no ISA bus */
+
+/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
+#define BUID_HI(buid) upper_32_bits(buid)
+#define BUID_LO(buid) lower_32_bits(buid)
+
+/* PCI device_node operations */
+struct device_node;
+struct pci_dn;
+
+void *pci_traverse_device_nodes(struct device_node *start,
+				void *(*fn)(struct device_node *, void *),
+				void *data);
+void *traverse_pci_dn(struct pci_dn *root,
+		      void *(*fn)(struct pci_dn *, void *),
+		      void *data);
+extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+
+/* From rtas_pci.h */
+extern void init_pci_config_tokens (void);
+extern unsigned long get_phb_buid (struct device_node *);
+extern int rtas_setup_phb(struct pci_controller *phb);
+
+#ifdef CONFIG_EEH
+
+void eeh_addr_cache_insert_dev(struct pci_dev *dev);
+void eeh_addr_cache_rmv_dev(struct pci_dev *dev);
+struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr);
+void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
+int eeh_pci_enable(struct eeh_pe *pe, int function);
+int eeh_pe_reset_full(struct eeh_pe *pe);
+void eeh_save_bars(struct eeh_dev *edev);
+int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
+int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
+void eeh_pe_state_mark(struct eeh_pe *pe, int state);
+void eeh_pe_state_clear(struct eeh_pe *pe, int state);
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
+void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
+
+void eeh_sysfs_add_device(struct pci_dev *pdev);
+void eeh_sysfs_remove_device(struct pci_dev *pdev);
+
+static inline const char *eeh_pci_name(struct pci_dev *pdev) 
+{ 
+	return pdev ? pci_name(pdev) : "<null>";
+} 
+
+static inline const char *eeh_driver_name(struct pci_dev *pdev)
+{
+	return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
+}
+
+#endif /* CONFIG_EEH */
+
+#else /* CONFIG_PCI */
+static inline void init_pci_config_tokens(void) { }
+#endif /* !CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/arch/powerpc/include/asm/ppc4xx.h b/arch/powerpc/include/asm/ppc4xx.h
new file mode 100644
index 0000000..610a511
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc4xx.h
@@ -0,0 +1,18 @@
+/*
+ * PPC4xx Prototypes and definitions
+ *
+ * Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de>
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_POWERPC_PPC4xx_H__
+#define __ASM_POWERPC_PPC4xx_H__
+
+extern void __noreturn ppc4xx_reset_system(char *cmd);
+
+#endif /* __ASM_POWERPC_PPC4xx_H__ */
diff --git a/arch/powerpc/include/asm/ppc4xx_ocm.h b/arch/powerpc/include/asm/ppc4xx_ocm.h
new file mode 100644
index 0000000..6ce9046
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc4xx_ocm.h
@@ -0,0 +1,45 @@
+/*
+ * PowerPC 4xx OCM memory allocation support
+ *
+ * (C) Copyright 2009, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@amcc.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_POWERPC_PPC4XX_OCM_H__
+#define __ASM_POWERPC_PPC4XX_OCM_H__
+
+#define PPC4XX_OCM_NON_CACHED 0
+#define PPC4XX_OCM_CACHED     1
+
+#if defined(CONFIG_PPC4xx_OCM)
+
+void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
+		  int flags, const char *owner);
+void ppc4xx_ocm_free(const void *virt);
+
+#else
+
+#define ppc4xx_ocm_alloc(phys, size, align, flags, owner)	NULL
+#define ppc4xx_ocm_free(addr)	((void)0)
+
+#endif /* CONFIG_PPC4xx_OCM */
+
+#endif  /* __ASM_POWERPC_PPC4XX_OCM_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
new file mode 100644
index 0000000..b5d0236
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -0,0 +1,824 @@
+/*
+ * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
+ */
+#ifndef _ASM_POWERPC_PPC_ASM_H
+#define _ASM_POWERPC_PPC_ASM_H
+
+#include <linux/stringify.h>
+#include <asm/asm-compat.h>
+#include <asm/processor.h>
+#include <asm/ppc-opcode.h>
+#include <asm/firmware.h>
+#include <asm/feature-fixups.h>
+
+#ifdef __ASSEMBLY__
+
+#define SZL			(BITS_PER_LONG/8)
+
+/*
+ * Stuff for accurate CPU time accounting.
+ * These macros handle transitions between user and system state
+ * in exception entry and exit and accumulate time to the
+ * user_time and system_time fields in the paca.
+ */
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
+#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
+#define ACCOUNT_STOLEN_TIME
+#else
+#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)				\
+	MFTB(ra);			/* get timebase */		\
+	PPC_LL	rb, ACCOUNT_STARTTIME_USER(ptr);			\
+	PPC_STL	ra, ACCOUNT_STARTTIME(ptr);				\
+	subf	rb,rb,ra;		/* subtract start value */	\
+	PPC_LL	ra, ACCOUNT_USER_TIME(ptr);				\
+	add	ra,ra,rb;		/* add on to user time */	\
+	PPC_STL	ra, ACCOUNT_USER_TIME(ptr);				\
+
+#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)				\
+	MFTB(ra);			/* get timebase */		\
+	PPC_LL	rb, ACCOUNT_STARTTIME(ptr);				\
+	PPC_STL	ra, ACCOUNT_STARTTIME_USER(ptr);			\
+	subf	rb,rb,ra;		/* subtract start value */	\
+	PPC_LL	ra, ACCOUNT_SYSTEM_TIME(ptr);				\
+	add	ra,ra,rb;		/* add on to system time */	\
+	PPC_STL	ra, ACCOUNT_SYSTEM_TIME(ptr)
+
+#ifdef CONFIG_PPC_SPLPAR
+#define ACCOUNT_STOLEN_TIME						\
+BEGIN_FW_FTR_SECTION;							\
+	beq	33f;							\
+	/* from user - see if there are any DTL entries to process */	\
+	ld	r10,PACALPPACAPTR(r13);	/* get ptr to VPA */		\
+	ld	r11,PACA_DTL_RIDX(r13);	/* get log read index */	\
+	addi	r10,r10,LPPACA_DTLIDX;					\
+	LDX_BE	r10,0,r10;		/* get log write index */	\
+	cmpd	cr1,r11,r10;						\
+	beq+	cr1,33f;						\
+	bl	accumulate_stolen_time;				\
+	ld	r12,_MSR(r1);						\
+	andi.	r10,r12,MSR_PR;		/* Restore cr0 (coming from user) */ \
+33:									\
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
+
+#else  /* CONFIG_PPC_SPLPAR */
+#define ACCOUNT_STOLEN_TIME
+
+#endif /* CONFIG_PPC_SPLPAR */
+
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+
+/*
+ * Macros for storing registers into and loading registers from
+ * exception frames.
+ */
+#ifdef __powerpc64__
+#define SAVE_GPR(n, base)	std	n,GPR0+8*(n)(base)
+#define REST_GPR(n, base)	ld	n,GPR0+8*(n)(base)
+#define SAVE_NVGPRS(base)	SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base)	REST_8GPRS(14, base); REST_10GPRS(22, base)
+#else
+#define SAVE_GPR(n, base)	stw	n,GPR0+4*(n)(base)
+#define REST_GPR(n, base)	lwz	n,GPR0+4*(n)(base)
+#define SAVE_NVGPRS(base)	stmw	13, GPR0+4*13(base)
+#define REST_NVGPRS(base)	lmw	13, GPR0+4*13(base)
+#endif
+
+#define SAVE_2GPRS(n, base)	SAVE_GPR(n, base); SAVE_GPR(n+1, base)
+#define SAVE_4GPRS(n, base)	SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
+#define SAVE_8GPRS(n, base)	SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
+#define SAVE_10GPRS(n, base)	SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
+#define REST_2GPRS(n, base)	REST_GPR(n, base); REST_GPR(n+1, base)
+#define REST_4GPRS(n, base)	REST_2GPRS(n, base); REST_2GPRS(n+2, base)
+#define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)
+#define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+#define SAVE_FPR(n, base)	stfd	n,8*TS_FPRWIDTH*(n)(base)
+#define SAVE_2FPRS(n, base)	SAVE_FPR(n, base); SAVE_FPR(n+1, base)
+#define SAVE_4FPRS(n, base)	SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
+#define SAVE_8FPRS(n, base)	SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
+#define SAVE_16FPRS(n, base)	SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
+#define SAVE_32FPRS(n, base)	SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
+#define REST_FPR(n, base)	lfd	n,8*TS_FPRWIDTH*(n)(base)
+#define REST_2FPRS(n, base)	REST_FPR(n, base); REST_FPR(n+1, base)
+#define REST_4FPRS(n, base)	REST_2FPRS(n, base); REST_2FPRS(n+2, base)
+#define REST_8FPRS(n, base)	REST_4FPRS(n, base); REST_4FPRS(n+4, base)
+#define REST_16FPRS(n, base)	REST_8FPRS(n, base); REST_8FPRS(n+8, base)
+#define REST_32FPRS(n, base)	REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+
+#define SAVE_VR(n,b,base)	li b,16*(n);  stvx n,base,b
+#define SAVE_2VRS(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VRS(n,b,base)	SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
+#define SAVE_8VRS(n,b,base)	SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
+#define SAVE_16VRS(n,b,base)	SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
+#define SAVE_32VRS(n,b,base)	SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
+#define REST_VR(n,b,base)	li b,16*(n); lvx n,base,b
+#define REST_2VRS(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VRS(n,b,base)	REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
+#define REST_8VRS(n,b,base)	REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
+#define REST_16VRS(n,b,base)	REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
+#define REST_32VRS(n,b,base)	REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
+
+#ifdef __BIG_ENDIAN__
+#define STXVD2X_ROT(n,b,base)		STXVD2X(n,b,base)
+#define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base)
+#else
+#define STXVD2X_ROT(n,b,base)		XXSWAPD(n,n);		\
+					STXVD2X(n,b,base);	\
+					XXSWAPD(n,n)
+
+#define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base);	\
+					XXSWAPD(n,n)
+#endif
+/* Save the lower 32 VSRs in the thread VSR region */
+#define SAVE_VSR(n,b,base)	li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)
+#define SAVE_2VSRS(n,b,base)	SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
+#define SAVE_4VSRS(n,b,base)	SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
+#define SAVE_8VSRS(n,b,base)	SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
+#define SAVE_16VSRS(n,b,base)	SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
+#define SAVE_32VSRS(n,b,base)	SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
+#define REST_VSR(n,b,base)	li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
+#define REST_2VSRS(n,b,base)	REST_VSR(n,b,base); REST_VSR(n+1,b,base)
+#define REST_4VSRS(n,b,base)	REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
+#define REST_8VSRS(n,b,base)	REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
+#define REST_16VSRS(n,b,base)	REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
+#define REST_32VSRS(n,b,base)	REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
+
+/*
+ * b = base register for addressing, o = base offset from register of 1st EVR
+ * n = first EVR, s = scratch
+ */
+#define SAVE_EVR(n,s,b,o)	evmergehi s,s,n; stw s,o+4*(n)(b)
+#define SAVE_2EVRS(n,s,b,o)	SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
+#define SAVE_4EVRS(n,s,b,o)	SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
+#define SAVE_8EVRS(n,s,b,o)	SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
+#define SAVE_16EVRS(n,s,b,o)	SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
+#define SAVE_32EVRS(n,s,b,o)	SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
+#define REST_EVR(n,s,b,o)	lwz s,o+4*(n)(b); evmergelo n,s,n
+#define REST_2EVRS(n,s,b,o)	REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
+#define REST_4EVRS(n,s,b,o)	REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
+#define REST_8EVRS(n,s,b,o)	REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
+#define REST_16EVRS(n,s,b,o)	REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
+#define REST_32EVRS(n,s,b,o)	REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
+
+/* Macros to adjust thread priority for hardware multithreading */
+#define HMT_VERY_LOW	or	31,31,31	# very low priority
+#define HMT_LOW		or	1,1,1
+#define HMT_MEDIUM_LOW  or	6,6,6		# medium low priority
+#define HMT_MEDIUM	or	2,2,2
+#define HMT_MEDIUM_HIGH or	5,5,5		# medium high priority
+#define HMT_HIGH	or	3,3,3
+#define HMT_EXTRA_HIGH	or	7,7,7		# power7 only
+
+#ifdef CONFIG_PPC64
+#define ULONG_SIZE 	8
+#else
+#define ULONG_SIZE	4
+#endif
+#define __VCPU_GPR(n)	(VCPU_GPRS + (n * ULONG_SIZE))
+#define VCPU_GPR(n)	__VCPU_GPR(__REG_##n)
+
+#ifdef __KERNEL__
+#ifdef CONFIG_PPC64
+
+#define STACKFRAMESIZE 256
+#define __STK_REG(i)   (112 + ((i)-14)*8)
+#define STK_REG(i)     __STK_REG(__REG_##i)
+
+#ifdef PPC64_ELF_ABI_v2
+#define STK_GOT		24
+#define __STK_PARAM(i)	(32 + ((i)-3)*8)
+#else
+#define STK_GOT		40
+#define __STK_PARAM(i)	(48 + ((i)-3)*8)
+#endif
+#define STK_PARAM(i)	__STK_PARAM(__REG_##i)
+
+#ifdef PPC64_ELF_ABI_v2
+
+#define _GLOBAL(name) \
+	.align 2 ; \
+	.type name,@function; \
+	.globl name; \
+name:
+
+#define _GLOBAL_TOC(name) \
+	.align 2 ; \
+	.type name,@function; \
+	.globl name; \
+name: \
+0:	addis r2,r12,(.TOC.-0b)@ha; \
+	addi r2,r2,(.TOC.-0b)@l; \
+	.localentry name,.-name
+
+#define DOTSYM(a)	a
+
+#else
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+#define _GLOBAL(name) \
+	.align 2 ; \
+	.globl name; \
+	.globl GLUE(.,name); \
+	.pushsection ".opd","aw"; \
+name: \
+	.quad GLUE(.,name); \
+	.quad .TOC.@tocbase; \
+	.quad 0; \
+	.popsection; \
+	.type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
+#define DOTSYM(a)	GLUE(.,a)
+
+#endif
+
+#else /* 32-bit */
+
+#define _ENTRY(n)	\
+	.globl n;	\
+n:
+
+#define _GLOBAL(n)	\
+	.stabs __stringify(n:F-1),N_FUN,0,0,n;\
+	.globl n;	\
+n:
+
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
+#endif
+
+/*
+ * __kprobes (the C annotation) puts the symbol into the .kprobes.text
+ * section, which gets emitted at the end of regular text.
+ *
+ * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
+ * a blacklist. The former is for core kprobe functions/data, the
+ * latter is for those that incdentially must be excluded from probing
+ * and allows them to be linked at more optimal location within text.
+ */
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE_SYMBOL(entry)			\
+	.pushsection "_kprobe_blacklist","aw";		\
+	PPC_LONG (entry) ;				\
+	.popsection
+#else
+#define _ASM_NOKPROBE_SYMBOL(entry)
+#endif
+
+#define FUNC_START(name)	_GLOBAL(name)
+#define FUNC_END(name)
+
+/* 
+ * LOAD_REG_IMMEDIATE(rn, expr)
+ *   Loads the value of the constant expression 'expr' into register 'rn'
+ *   using immediate instructions only.  Use this when it's important not
+ *   to reference other data (i.e. on ppc64 when the TOC pointer is not
+ *   valid) and when 'expr' is a constant or absolute address.
+ *
+ * LOAD_REG_ADDR(rn, name)
+ *   Loads the address of label 'name' into register 'rn'.  Use this when
+ *   you don't particularly need immediate instructions only, but you need
+ *   the whole address in one register (e.g. it's a structure address and
+ *   you want to access various offsets within it).  On ppc32 this is
+ *   identical to LOAD_REG_IMMEDIATE.
+ *
+ * LOAD_REG_ADDR_PIC(rn, name)
+ *   Loads the address of label 'name' into register 'run'. Use this when
+ *   the kernel doesn't run at the linked or relocated address. Please
+ *   note that this macro will clobber the lr register.
+ *
+ * LOAD_REG_ADDRBASE(rn, name)
+ * ADDROFF(name)
+ *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into
+ *   register 'rn'.  ADDROFF(name) returns the remainder of the address as
+ *   a constant expression.  ADDROFF(name) is a signed expression < 16 bits
+ *   in size, so is suitable for use directly as an offset in load and store
+ *   instructions.  Use this when loading/storing a single word or less as:
+ *      LOAD_REG_ADDRBASE(rX, name)
+ *      ld	rY,ADDROFF(name)(rX)
+ */
+
+/* Be careful, this will clobber the lr register. */
+#define LOAD_REG_ADDR_PIC(reg, name)		\
+	bl	0f;				\
+0:	mflr	reg;				\
+	addis	reg,reg,(name - 0b)@ha;		\
+	addi	reg,reg,(name - 0b)@l;
+
+#ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
+#define LOAD_REG_IMMEDIATE(reg,expr)		\
+	lis     reg,(expr)@highest;		\
+	ori     reg,reg,(expr)@higher;	\
+	rldicr  reg,reg,32,31;		\
+	oris    reg,reg,(expr)@__AS_ATHIGH;	\
+	ori     reg,reg,(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name)			\
+	ld	reg,name@got(r2)
+
+#define LOAD_REG_ADDRBASE(reg,name)	LOAD_REG_ADDR(reg,name)
+#define ADDROFF(name)			0
+
+/* offsets for stack frame layout */
+#define LRSAVE	16
+
+#else /* 32-bit */
+
+#define LOAD_REG_IMMEDIATE(reg,expr)		\
+	lis	reg,(expr)@ha;		\
+	addi	reg,reg,(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name)		LOAD_REG_IMMEDIATE(reg, name)
+
+#define LOAD_REG_ADDRBASE(reg, name)	lis	reg,name@ha
+#define ADDROFF(name)			name@l
+
+/* offsets for stack frame layout */
+#define LRSAVE	4
+
+#endif
+
+/* various errata or part fixups */
+#ifdef CONFIG_PPC601_SYNC_FIX
+#define SYNC				\
+BEGIN_FTR_SECTION			\
+	sync;				\
+	isync;				\
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define SYNC_601			\
+BEGIN_FTR_SECTION			\
+	sync;				\
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define ISYNC_601			\
+BEGIN_FTR_SECTION			\
+	isync;				\
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#else
+#define	SYNC
+#define SYNC_601
+#define ISYNC_601
+#endif
+
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
+#define MFTB(dest)			\
+90:	mfspr dest, SPRN_TBRL;		\
+BEGIN_FTR_SECTION_NESTED(96);		\
+	cmpwi dest,0;			\
+	beq-  90b;			\
+END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
+#else
+#define MFTB(dest)			MFTBL(dest)
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#define MFTBL(dest)			mftb dest
+#define MFTBU(dest)			mftbu dest
+#else
+#define MFTBL(dest)			mfspr dest, SPRN_TBRL
+#define MFTBU(dest)			mfspr dest, SPRN_TBRU
+#endif
+
+#ifndef CONFIG_SMP
+#define TLBSYNC
+#else /* CONFIG_SMP */
+/* tlbsync is not implemented on 601 */
+#define TLBSYNC				\
+BEGIN_FTR_SECTION			\
+	tlbsync;			\
+	sync;				\
+END_FTR_SECTION_IFCLR(CPU_FTR_601)
+#endif
+
+#ifdef CONFIG_PPC64
+#define MTOCRF(FXM, RS)			\
+	BEGIN_FTR_SECTION_NESTED(848);	\
+	mtcrf	(FXM), RS;		\
+	FTR_SECTION_ELSE_NESTED(848);	\
+	mtocrf (FXM), RS;		\
+	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
+#endif
+
+/*
+ * This instruction is not implemented on the PPC 603 or 601; however, on
+ * the 403GCX and 405GP tlbia IS defined and tlbie is not.
+ * All of these instructions exist in the 8xx, they have magical powers,
+ * and they must be used.
+ */
+
+#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx)
+#define tlbia					\
+	li	r4,1024;			\
+	mtctr	r4;				\
+	lis	r4,KERNELBASE@h;		\
+	.machine push;				\
+	.machine "power4";			\
+0:	tlbie	r4;				\
+	.machine pop;				\
+	addi	r4,r4,0x1000;			\
+	bdnz	0b
+#endif
+
+
+#ifdef CONFIG_IBM440EP_ERR42
+#define PPC440EP_ERR42 isync
+#else
+#define PPC440EP_ERR42
+#endif
+
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly).  The embedded and server mnemonics for
+ * dcbt are different so this must only be used for server.
+ */
+#define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch)	\
+       lis     scratch,0x60000000@h;			\
+       dcbt    0,scratch,0b01010
+
+/*
+ * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
+ * keep the address intact to be compatible with code shared with
+ * 32-bit classic.
+ *
+ * On the other hand, I find it useful to have them behave as expected
+ * by their name (ie always do the addition) on 64-bit BookE
+ */
+#if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
+#define toreal(rd)
+#define fromreal(rd)
+
+/*
+ * We use addis to ensure compatibility with the "classic" ppc versions of
+ * these macros, which use rs = 0 to get the tophys offset in rd, rather than
+ * converting the address in r0, and so this version has to do that too
+ * (i.e. set register rd to 0 when rs == 0).
+ */
+#define tophys(rd,rs)				\
+	addis	rd,rs,0
+
+#define tovirt(rd,rs)				\
+	addis	rd,rs,0
+
+#elif defined(CONFIG_PPC64)
+#define toreal(rd)		/* we can access c000... in real mode */
+#define fromreal(rd)
+
+#define tophys(rd,rs)                           \
+	clrldi	rd,rs,2
+
+#define tovirt(rd,rs)                           \
+	rotldi	rd,rs,16;			\
+	ori	rd,rd,((KERNELBASE>>48)&0xFFFF);\
+	rotldi	rd,rd,48
+#else
+/*
+ * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
+ * physical base address of RAM at compile time.
+ */
+#define toreal(rd)	tophys(rd,rd)
+#define fromreal(rd)	tovirt(rd,rd)
+
+#define tophys(rd,rs)				\
+0:	addis	rd,rs,-PAGE_OFFSET@h;		\
+	.section ".vtop_fixup","aw";		\
+	.align  1;				\
+	.long   0b;				\
+	.previous
+
+#define tovirt(rd,rs)				\
+0:	addis	rd,rs,PAGE_OFFSET@h;		\
+	.section ".ptov_fixup","aw";		\
+	.align  1;				\
+	.long   0b;				\
+	.previous
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define RFI		rfid
+#define MTMSRD(r)	mtmsrd	r
+#define MTMSR_EERI(reg)	mtmsrd	reg,1
+#else
+#ifndef CONFIG_40x
+#define	RFI		rfi
+#else
+#define RFI		rfi; b .	/* Prevent prefetch past rfi */
+#endif
+#define MTMSRD(r)	mtmsr	r
+#define MTMSR_EERI(reg)	mtmsr	reg
+#endif
+
+#endif /* __KERNEL__ */
+
+/* The boring bits... */
+
+/* Condition Register Bit Fields */
+
+#define	cr0	0
+#define	cr1	1
+#define	cr2	2
+#define	cr3	3
+#define	cr4	4
+#define	cr5	5
+#define	cr6	6
+#define	cr7	7
+
+
+/*
+ * General Purpose Registers (GPRs)
+ *
+ * The lower case r0-r31 should be used in preference to the upper
+ * case R0-R31 as they provide more error checking in the assembler.
+ * Use R0-31 only when really nessesary.
+ */
+
+#define	r0	%r0
+#define	r1	%r1
+#define	r2	%r2
+#define	r3	%r3
+#define	r4	%r4
+#define	r5	%r5
+#define	r6	%r6
+#define	r7	%r7
+#define	r8	%r8
+#define	r9	%r9
+#define	r10	%r10
+#define	r11	%r11
+#define	r12	%r12
+#define	r13	%r13
+#define	r14	%r14
+#define	r15	%r15
+#define	r16	%r16
+#define	r17	%r17
+#define	r18	%r18
+#define	r19	%r19
+#define	r20	%r20
+#define	r21	%r21
+#define	r22	%r22
+#define	r23	%r23
+#define	r24	%r24
+#define	r25	%r25
+#define	r26	%r26
+#define	r27	%r27
+#define	r28	%r28
+#define	r29	%r29
+#define	r30	%r30
+#define	r31	%r31
+
+
+/* Floating Point Registers (FPRs) */
+
+#define	fr0	0
+#define	fr1	1
+#define	fr2	2
+#define	fr3	3
+#define	fr4	4
+#define	fr5	5
+#define	fr6	6
+#define	fr7	7
+#define	fr8	8
+#define	fr9	9
+#define	fr10	10
+#define	fr11	11
+#define	fr12	12
+#define	fr13	13
+#define	fr14	14
+#define	fr15	15
+#define	fr16	16
+#define	fr17	17
+#define	fr18	18
+#define	fr19	19
+#define	fr20	20
+#define	fr21	21
+#define	fr22	22
+#define	fr23	23
+#define	fr24	24
+#define	fr25	25
+#define	fr26	26
+#define	fr27	27
+#define	fr28	28
+#define	fr29	29
+#define	fr30	30
+#define	fr31	31
+
+/* AltiVec Registers (VPRs) */
+
+#define	v0	0
+#define	v1	1
+#define	v2	2
+#define	v3	3
+#define	v4	4
+#define	v5	5
+#define	v6	6
+#define	v7	7
+#define	v8	8
+#define	v9	9
+#define	v10	10
+#define	v11	11
+#define	v12	12
+#define	v13	13
+#define	v14	14
+#define	v15	15
+#define	v16	16
+#define	v17	17
+#define	v18	18
+#define	v19	19
+#define	v20	20
+#define	v21	21
+#define	v22	22
+#define	v23	23
+#define	v24	24
+#define	v25	25
+#define	v26	26
+#define	v27	27
+#define	v28	28
+#define	v29	29
+#define	v30	30
+#define	v31	31
+
+/* VSX Registers (VSRs) */
+
+#define	vs0	0
+#define	vs1	1
+#define	vs2	2
+#define	vs3	3
+#define	vs4	4
+#define	vs5	5
+#define	vs6	6
+#define	vs7	7
+#define	vs8	8
+#define	vs9	9
+#define	vs10	10
+#define	vs11	11
+#define	vs12	12
+#define	vs13	13
+#define	vs14	14
+#define	vs15	15
+#define	vs16	16
+#define	vs17	17
+#define	vs18	18
+#define	vs19	19
+#define	vs20	20
+#define	vs21	21
+#define	vs22	22
+#define	vs23	23
+#define	vs24	24
+#define	vs25	25
+#define	vs26	26
+#define	vs27	27
+#define	vs28	28
+#define	vs29	29
+#define	vs30	30
+#define	vs31	31
+#define	vs32	32
+#define	vs33	33
+#define	vs34	34
+#define	vs35	35
+#define	vs36	36
+#define	vs37	37
+#define	vs38	38
+#define	vs39	39
+#define	vs40	40
+#define	vs41	41
+#define	vs42	42
+#define	vs43	43
+#define	vs44	44
+#define	vs45	45
+#define	vs46	46
+#define	vs47	47
+#define	vs48	48
+#define	vs49	49
+#define	vs50	50
+#define	vs51	51
+#define	vs52	52
+#define	vs53	53
+#define	vs54	54
+#define	vs55	55
+#define	vs56	56
+#define	vs57	57
+#define	vs58	58
+#define	vs59	59
+#define	vs60	60
+#define	vs61	61
+#define	vs62	62
+#define	vs63	63
+
+/* SPE Registers (EVPRs) */
+
+#define	evr0	0
+#define	evr1	1
+#define	evr2	2
+#define	evr3	3
+#define	evr4	4
+#define	evr5	5
+#define	evr6	6
+#define	evr7	7
+#define	evr8	8
+#define	evr9	9
+#define	evr10	10
+#define	evr11	11
+#define	evr12	12
+#define	evr13	13
+#define	evr14	14
+#define	evr15	15
+#define	evr16	16
+#define	evr17	17
+#define	evr18	18
+#define	evr19	19
+#define	evr20	20
+#define	evr21	21
+#define	evr22	22
+#define	evr23	23
+#define	evr24	24
+#define	evr25	25
+#define	evr26	26
+#define	evr27	27
+#define	evr28	28
+#define	evr29	29
+#define	evr30	30
+#define	evr31	31
+
+/* some stab codes */
+#define N_FUN	36
+#define N_RSYM	64
+#define N_SLINE	68
+#define N_SO	100
+
+/*
+ * Create an endian fixup trampoline
+ *
+ * This starts with a "tdi 0,0,0x48" instruction which is
+ * essentially a "trap never", and thus akin to a nop.
+ *
+ * The opcode for this instruction read with the wrong endian
+ * however results in a b . + 8
+ *
+ * So essentially we use that trick to execute the following
+ * trampoline in "reverse endian" if we are running with the
+ * MSR_LE bit set the "wrong" way for whatever endianness the
+ * kernel is built for.
+ */
+
+#ifdef CONFIG_PPC_BOOK3E
+#define FIXUP_ENDIAN
+#else
+/*
+ * This version may be used in in HV or non-HV context.
+ * MSR[EE] must be disabled.
+ */
+#define FIXUP_ENDIAN						   \
+	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \
+	b     191f;	  /* Skip trampoline if endian is good	*/ \
+	.long 0xa600607d; /* mfmsr r11				*/ \
+	.long 0x01006b69; /* xori r11,r11,1			*/ \
+	.long 0x00004039; /* li r10,0				*/ \
+	.long 0x6401417d; /* mtmsrd r10,1			*/ \
+	.long 0x05009f42; /* bcl 20,31,$+4			*/ \
+	.long 0xa602487d; /* mflr r10				*/ \
+	.long 0x14004a39; /* addi r10,r10,20			*/ \
+	.long 0xa6035a7d; /* mtsrr0 r10				*/ \
+	.long 0xa6037b7d; /* mtsrr1 r11				*/ \
+	.long 0x2400004c; /* rfid				*/ \
+191:
+
+/*
+ * This version that may only be used with MSR[HV]=1
+ * - Does not clear MSR[RI], so more robust.
+ * - Slightly smaller and faster.
+ */
+#define FIXUP_ENDIAN_HV						   \
+	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \
+	b     191f;	  /* Skip trampoline if endian is good	*/ \
+	.long 0xa600607d; /* mfmsr r11				*/ \
+	.long 0x01006b69; /* xori r11,r11,1			*/ \
+	.long 0x05009f42; /* bcl 20,31,$+4			*/ \
+	.long 0xa602487d; /* mflr r10				*/ \
+	.long 0x14004a39; /* addi r10,r10,20			*/ \
+	.long 0xa64b5a7d; /* mthsrr0 r10			*/ \
+	.long 0xa64b7b7d; /* mthsrr1 r11			*/ \
+	.long 0x2402004c; /* hrfid				*/ \
+191:
+
+#endif /* !CONFIG_PPC_BOOK3E */
+
+#endif /*  __ASSEMBLY__ */
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target)		\
+	stringify_in_c(.section __ex_table,"a";)\
+	stringify_in_c(.balign 4;)		\
+	stringify_in_c(.long (_fault) - . ;)	\
+	stringify_in_c(.long (_target) - . ;)	\
+	stringify_in_c(.previous)
+
+#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
new file mode 100644
index 0000000..3421637
--- /dev/null
+++ b/arch/powerpc/include/asm/probes.h
@@ -0,0 +1,67 @@
+#ifndef _ASM_POWERPC_PROBES_H
+#define _ASM_POWERPC_PROBES_H
+#ifdef __KERNEL__
+/*
+ * Definitions common to probes files
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2012
+ */
+#include <linux/types.h>
+
+typedef u32 ppc_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0x7fe00008	/* trap */
+
+/* Trap definitions per ISA */
+#define IS_TW(instr)		(((instr) & 0xfc0007fe) == 0x7c000008)
+#define IS_TD(instr)		(((instr) & 0xfc0007fe) == 0x7c000088)
+#define IS_TDI(instr)		(((instr) & 0xfc000000) == 0x08000000)
+#define IS_TWI(instr)		(((instr) & 0xfc000000) == 0x0c000000)
+
+#ifdef CONFIG_PPC64
+#define is_trap(instr)		(IS_TW(instr) || IS_TD(instr) || \
+				IS_TWI(instr) || IS_TDI(instr))
+#else
+#define is_trap(instr)		(IS_TW(instr) || IS_TWI(instr))
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#define MSR_SINGLESTEP	(MSR_DE)
+#else
+#define MSR_SINGLESTEP	(MSR_SE)
+#endif
+
+/* Enable single stepping for the current task */
+static inline void enable_single_step(struct pt_regs *regs)
+{
+	regs->msr |= MSR_SINGLESTEP;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+	/*
+	 * We turn off Critical Input Exception(CE) to ensure that the single
+	 * step will be for the instruction we have the probe on; if we don't,
+	 * it is possible we'd get the single step reported for CE.
+	 */
+	regs->msr &= ~MSR_CE;
+	mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#ifdef CONFIG_PPC_47x
+	isync();
+#endif
+#endif
+}
+
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_PROBES_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
new file mode 100644
index 0000000..52fadde
--- /dev/null
+++ b/arch/powerpc/include/asm/processor.h
@@ -0,0 +1,541 @@
+#ifndef _ASM_POWERPC_PROCESSOR_H
+#define _ASM_POWERPC_PROCESSOR_H
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/reg.h>
+
+#ifdef CONFIG_VSX
+#define TS_FPRWIDTH 2
+
+#ifdef __BIG_ENDIAN__
+#define TS_FPROFFSET 0
+#define TS_VSRLOWOFFSET 1
+#else
+#define TS_FPROFFSET 1
+#define TS_VSRLOWOFFSET 0
+#endif
+
+#else
+#define TS_FPRWIDTH 1
+#define TS_FPROFFSET 0
+#endif
+
+#ifdef CONFIG_PPC64
+/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
+#define PPR_PRIORITY 3
+#ifdef __ASSEMBLY__
+#define INIT_PPR (PPR_PRIORITY << 50)
+#else
+#define INIT_PPR ((u64)PPR_PRIORITY << 50)
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PPC64 */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+#include <asm/hw_breakpoint.h>
+
+/* We do _not_ want to define new machine types at all, those must die
+ * in favor of using the device-tree
+ * -- BenH.
+ */
+
+/* PREP sub-platform types. Unused */
+#define _PREP_Motorola	0x01	/* motorola prep */
+#define _PREP_Firm	0x02	/* firmworks prep */
+#define _PREP_IBM	0x00	/* ibm prep */
+#define _PREP_Bull	0x03	/* bull prep */
+
+/* CHRP sub-platform types. These are arbitrary */
+#define _CHRP_Motorola	0x04	/* motorola chrp, the cobra */
+#define _CHRP_IBM	0x05	/* IBM chrp, the longtrail and longtrail 2 */
+#define _CHRP_Pegasos	0x06	/* Genesi/bplan's Pegasos and Pegasos2 */
+#define _CHRP_briq	0x07	/* TotalImpact's briQ */
+
+#if defined(__KERNEL__) && defined(CONFIG_PPC32)
+
+extern int _chrp_type;
+
+#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#define HMT_very_low()   asm volatile("or 31,31,31   # very low priority")
+#define HMT_low()	 asm volatile("or 1,1,1	     # low priority")
+#define HMT_medium_low() asm volatile("or 6,6,6      # medium low priority")
+#define HMT_medium()	 asm volatile("or 2,2,2	     # medium priority")
+#define HMT_medium_high() asm volatile("or 5,5,5      # medium high priority")
+#define HMT_high()	 asm volatile("or 3,3,3	     # high priority")
+
+#ifdef __KERNEL__
+
+struct task_struct;
+void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
+void release_thread(struct task_struct *);
+
+#ifdef CONFIG_PPC32
+
+#if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
+#error User TASK_SIZE overlaps with KERNEL_START address
+#endif
+#define TASK_SIZE	(CONFIG_TASK_SIZE)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	(TASK_SIZE / 8 * 3)
+#endif
+
+#ifdef CONFIG_PPC64
+/*
+ * 64-bit user address space can have multiple limits
+ * For now supported values are:
+ */
+#define TASK_SIZE_64TB  (0x0000400000000000UL)
+#define TASK_SIZE_128TB (0x0000800000000000UL)
+#define TASK_SIZE_512TB (0x0002000000000000UL)
+#define TASK_SIZE_1PB   (0x0004000000000000UL)
+#define TASK_SIZE_2PB   (0x0008000000000000UL)
+/*
+ * With 52 bits in the address we can support
+ * upto 4PB of range.
+ */
+#define TASK_SIZE_4PB   (0x0010000000000000UL)
+
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
+/*
+ * Max value currently used:
+ */
+#define TASK_SIZE_USER64		TASK_SIZE_4PB
+#define DEFAULT_MAP_WINDOW_USER64	TASK_SIZE_128TB
+#define TASK_CONTEXT_SIZE		TASK_SIZE_512TB
+#else
+#define TASK_SIZE_USER64		TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64	TASK_SIZE_64TB
+/*
+ * We don't need to allocate extended context ids for 4K page size, because
+ * we limit the max effective address on this config to 64TB.
+ */
+#define TASK_CONTEXT_SIZE		TASK_SIZE_64TB
+#endif
+
+/*
+ * 32-bit user address space is 4GB - 1 page
+ * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
+ */
+#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
+
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+		TASK_SIZE_USER32 : TASK_SIZE_USER64)
+#define TASK_SIZE	  TASK_SIZE_OF(current)
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
+
+#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
+		TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
+#endif
+
+/*
+ * Initial task size value for user applications. For book3s 64 we start
+ * with 128TB and conditionally enable upto 512TB
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+#define DEFAULT_MAP_WINDOW	((is_32bit_task()) ?			\
+				 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
+#else
+#define DEFAULT_MAP_WINDOW	TASK_SIZE
+#endif
+
+#ifdef __powerpc64__
+
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
+#define STACK_TOP_USER32 TASK_SIZE_USER32
+
+#define STACK_TOP (is_32bit_task() ? \
+		   STACK_TOP_USER32 : STACK_TOP_USER64)
+
+#define STACK_TOP_MAX TASK_SIZE_USER64
+
+#else /* __powerpc64__ */
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
+
+#endif /* __powerpc64__ */
+
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
+#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
+#define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
+
+/* FP and VSX 0-31 register set */
+struct thread_fp_state {
+	u64	fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
+	u64	fpscr;		/* Floating point status */
+};
+
+/* Complete AltiVec register set including VSCR */
+struct thread_vr_state {
+	vector128	vr[32] __attribute__((aligned(16)));
+	vector128	vscr __attribute__((aligned(16)));
+};
+
+struct debug_reg {
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+	/*
+	 * The following help to manage the use of Debug Control Registers
+	 * om the BookE platforms.
+	 */
+	uint32_t	dbcr0;
+	uint32_t	dbcr1;
+#ifdef CONFIG_BOOKE
+	uint32_t	dbcr2;
+#endif
+	/*
+	 * The stored value of the DBSR register will be the value at the
+	 * last debug interrupt. This register can only be read from the
+	 * user (will never be written to) and has value while helping to
+	 * describe the reason for the last debug trap.  Torez
+	 */
+	uint32_t	dbsr;
+	/*
+	 * The following will contain addresses used by debug applications
+	 * to help trace and trap on particular address locations.
+	 * The bits in the Debug Control Registers above help define which
+	 * of the following registers will contain valid data and/or addresses.
+	 */
+	unsigned long	iac1;
+	unsigned long	iac2;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+	unsigned long	iac3;
+	unsigned long	iac4;
+#endif
+	unsigned long	dac1;
+	unsigned long	dac2;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+	unsigned long	dvc1;
+	unsigned long	dvc2;
+#endif
+#endif
+};
+
+struct thread_struct {
+	unsigned long	ksp;		/* Kernel stack pointer */
+
+#ifdef CONFIG_PPC64
+	unsigned long	ksp_vsid;
+#endif
+	struct pt_regs	*regs;		/* Pointer to saved register state */
+	mm_segment_t	addr_limit;	/* for get_fs() validation */
+#ifdef CONFIG_BOOKE
+	/* BookE base exception scratch space; align on cacheline */
+	unsigned long	normsave[8] ____cacheline_aligned;
+#endif
+#ifdef CONFIG_PPC32
+	void		*pgdir;		/* root of page-table tree */
+	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */
+#endif
+	/* Debug Registers */
+	struct debug_reg debug;
+	struct thread_fp_state	fp_state;
+	struct thread_fp_state	*fp_save_area;
+	int		fpexc_mode;	/* floating-point exception mode */
+	unsigned int	align_ctl;	/* alignment handling control */
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	struct perf_event *ptrace_bps[HBP_NUM];
+	/*
+	 * Helps identify source of single-step exception and subsequent
+	 * hw-breakpoint enablement
+	 */
+	struct perf_event *last_hit_ubp;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+	struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
+	unsigned long	trap_nr;	/* last trap # on this thread */
+	u8 load_fp;
+#ifdef CONFIG_ALTIVEC
+	u8 load_vec;
+	struct thread_vr_state vr_state;
+	struct thread_vr_state *vr_save_area;
+	unsigned long	vrsave;
+	int		used_vr;	/* set if process has used altivec */
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+	/* VSR status */
+	int		used_vsr;	/* set if process has used VSX */
+#endif /* CONFIG_VSX */
+#ifdef CONFIG_SPE
+	unsigned long	evr[32];	/* upper 32-bits of SPE regs */
+	u64		acc;		/* Accumulator */
+	unsigned long	spefscr;	/* SPE & eFP status */
+	unsigned long	spefscr_last;	/* SPEFSCR value on last prctl
+					   call or trap return */
+	int		used_spe;	/* set if process has used spe */
+#endif /* CONFIG_SPE */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	u8	load_tm;
+	u64		tm_tfhar;	/* Transaction fail handler addr */
+	u64		tm_texasr;	/* Transaction exception & summary */
+	u64		tm_tfiar;	/* Transaction fail instr address reg */
+	struct pt_regs	ckpt_regs;	/* Checkpointed registers */
+
+	unsigned long	tm_tar;
+	unsigned long	tm_ppr;
+	unsigned long	tm_dscr;
+
+	/*
+	 * Checkpointed FP and VSX 0-31 register set.
+	 *
+	 * When a transaction is active/signalled/scheduled etc., *regs is the
+	 * most recent set of/speculated GPRs with ckpt_regs being the older
+	 * checkpointed regs to which we roll back if transaction aborts.
+	 *
+	 * These are analogous to how ckpt_regs and pt_regs work
+	 */
+	struct thread_fp_state ckfp_state; /* Checkpointed FP state */
+	struct thread_vr_state ckvr_state; /* Checkpointed VR state */
+	unsigned long	ckvrsave; /* Checkpointed VRSAVE */
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+#ifdef CONFIG_PPC_MEM_KEYS
+	unsigned long	amr;
+	unsigned long	iamr;
+	unsigned long	uamor;
+#endif
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+	void*		kvm_shadow_vcpu; /* KVM internal data */
+#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
+	struct kvm_vcpu	*kvm_vcpu;
+#endif
+#ifdef CONFIG_PPC64
+	unsigned long	dscr;
+	unsigned long	fscr;
+	/*
+	 * This member element dscr_inherit indicates that the process
+	 * has explicitly attempted and changed the DSCR register value
+	 * for itself. Hence kernel wont use the default CPU DSCR value
+	 * contained in the PACA structure anymore during process context
+	 * switch. Once this variable is set, this behaviour will also be
+	 * inherited to all the children of this process from that point
+	 * onwards.
+	 */
+	int		dscr_inherit;
+	unsigned long	ppr;	/* used to save/restore SMT priority */
+	unsigned long	tidr;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+	unsigned long	tar;
+	unsigned long	ebbrr;
+	unsigned long	ebbhr;
+	unsigned long	bescr;
+	unsigned long	siar;
+	unsigned long	sdar;
+	unsigned long	sier;
+	unsigned long	mmcr2;
+	unsigned 	mmcr0;
+
+	unsigned 	used_ebb;
+	unsigned int	used_vas;
+#endif
+};
+
+#define ARCH_MIN_TASKALIGN 16
+
+#define INIT_SP		(sizeof(init_stack) + (unsigned long) &init_stack)
+#define INIT_SP_LIMIT \
+	(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
+
+#ifdef CONFIG_SPE
+#define SPEFSCR_INIT \
+	.spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
+	.spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
+#else
+#define SPEFSCR_INIT
+#endif
+
+#ifdef CONFIG_PPC32
+#define INIT_THREAD { \
+	.ksp = INIT_SP, \
+	.ksp_limit = INIT_SP_LIMIT, \
+	.addr_limit = KERNEL_DS, \
+	.pgdir = swapper_pg_dir, \
+	.fpexc_mode = MSR_FE0 | MSR_FE1, \
+	SPEFSCR_INIT \
+}
+#else
+#define INIT_THREAD  { \
+	.ksp = INIT_SP, \
+	.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
+	.addr_limit = KERNEL_DS, \
+	.fpexc_mode = 0, \
+	.ppr = INIT_PPR, \
+	.fscr = FSCR_TAR | FSCR_EBB \
+}
+#endif
+
+#define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.regs)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
+
+/* Get/set floating-point exception mode */
+#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
+#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
+
+extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
+extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
+
+#define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
+#define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
+
+extern int get_endian(struct task_struct *tsk, unsigned long adr);
+extern int set_endian(struct task_struct *tsk, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, adr)	get_unalign_ctl((tsk), (adr))
+#define SET_UNALIGN_CTL(tsk, val)	set_unalign_ctl((tsk), (val))
+
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
+extern void load_fp_state(struct thread_fp_state *fp);
+extern void store_fp_state(struct thread_fp_state *fp);
+extern void load_vr_state(struct thread_vr_state *vr);
+extern void store_vr_state(struct thread_vr_state *vr);
+
+static inline unsigned int __unpack_fe01(unsigned long msr_bits)
+{
+	return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
+}
+
+static inline unsigned long __pack_fe01(unsigned int fpmode)
+{
+	return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
+}
+
+#ifdef CONFIG_PPC64
+#define cpu_relax()	do { HMT_low(); HMT_medium(); barrier(); } while (0)
+
+#define spin_begin()	HMT_low()
+
+#define spin_cpu_relax()	barrier()
+
+#define spin_cpu_yield()	spin_cpu_relax()
+
+#define spin_end()	HMT_medium()
+
+#define spin_until_cond(cond)					\
+do {								\
+	if (unlikely(!(cond))) {				\
+		spin_begin();					\
+		do {						\
+			spin_cpu_relax();			\
+		} while (!(cond));				\
+		spin_end();					\
+	}							\
+} while (0)
+
+#else
+#define cpu_relax()	barrier()
+#endif
+
+/* Check that a certain kernel stack pointer is valid in task_struct p */
+int validate_sp(unsigned long sp, struct task_struct *p,
+                       unsigned long nbytes);
+
+/*
+ * Prefetch macros.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+	if (unlikely(!x))
+		return;
+
+	__asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
+}
+
+static inline void prefetchw(const void *x)
+{
+	if (unlikely(!x))
+		return;
+
+	__asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
+}
+
+#define spin_lock_prefetch(x)	prefetchw(x)
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
+#ifdef CONFIG_PPC64
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
+{
+	if (is_32)
+		return sp & 0x0ffffffffUL;
+	return sp;
+}
+#else
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
+{
+	return sp;
+}
+#endif
+
+extern unsigned long cpuidle_disable;
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
+
+extern int powersave_nap;	/* set if nap mode can be used in idle loop */
+extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/
+extern void power7_idle_type(unsigned long type);
+extern unsigned long power9_idle_stop(unsigned long psscr_val);
+extern unsigned long power9_offline_stop(unsigned long psscr_val);
+extern void power9_idle_type(unsigned long stop_psscr_val,
+			      unsigned long stop_psscr_mask);
+
+extern void flush_instruction_cache(void);
+extern void hard_reset_now(void);
+extern void poweroff_now(void);
+extern int fix_alignment(struct pt_regs *);
+extern void cvt_fd(float *from, double *to);
+extern void cvt_df(double *from, float *to);
+extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
+
+#ifdef CONFIG_PPC64
+/*
+ * We handle most unaligned accesses in hardware. On the other hand 
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN	0
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
new file mode 100644
index 0000000..b04c5ce
--- /dev/null
+++ b/arch/powerpc/include/asm/prom.h
@@ -0,0 +1,184 @@
+#ifndef _POWERPC_PROM_H
+#define _POWERPC_PROM_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+
+/* These includes should be removed once implicit includes are cleaned up. */
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#define OF_DT_BEGIN_NODE	0x1		/* Start of node, full name */
+#define OF_DT_END_NODE		0x2		/* End node */
+#define OF_DT_PROP		0x3		/* Property: name off, size,
+						 * content */
+#define OF_DT_NOP		0x4		/* nop */
+#define OF_DT_END		0x9
+
+#define OF_DT_VERSION		0x10
+
+/*
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
+ */
+struct boot_param_header {
+	__be32	magic;			/* magic word OF_DT_HEADER */
+	__be32	totalsize;		/* total size of DT block */
+	__be32	off_dt_struct;		/* offset to structure */
+	__be32	off_dt_strings;		/* offset to strings */
+	__be32	off_mem_rsvmap;		/* offset to memory reserve map */
+	__be32	version;		/* format version */
+	__be32	last_comp_version;	/* last compatible version */
+	/* version 2 fields below */
+	__be32	boot_cpuid_phys;	/* Physical CPU id we're booting on */
+	/* version 3 fields below */
+	__be32	dt_strings_size;	/* size of the DT strings block */
+	/* version 17 fields below */
+	__be32	dt_struct_size;		/* size of the DT structure block */
+};
+
+/*
+ * OF address retreival & translation
+ */
+
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
+			 unsigned long *busno, unsigned long *phys,
+			 unsigned long *size);
+
+extern void of_instantiate_rtc(void);
+
+extern int of_get_ibm_chip_id(struct device_node *np);
+
+struct of_drc_info {
+	char *drc_type;
+	char *drc_name_prefix;
+	u32 drc_index_start;
+	u32 drc_name_suffix_start;
+	u32 num_sequential_elems;
+	u32 sequential_inc;
+	u32 drc_power_domain;
+	u32 last_drc_index;
+};
+
+extern int of_read_drc_info_cell(struct property **prop,
+			const __be32 **curval, struct of_drc_info *data);
+
+
+/*
+ * There are two methods for telling firmware what our capabilities are.
+ * Newer machines have an "ibm,client-architecture-support" method on the
+ * root node.  For older machines, we have to call the "process-elf-header"
+ * method in the /packages/elf-loader node, passing it a fake 32-bit
+ * ELF header containing a couple of PT_NOTE sections that contain
+ * structures that contain various information.
+ */
+
+/* New method - extensible architecture description vector. */
+
+/* Option vector bits - generic bits in byte 1 */
+#define OV_IGNORE		0x80	/* ignore this vector */
+#define OV_CESSATION_POLICY	0x40	/* halt if unsupported option present*/
+
+/* Option vector 1: processor architectures supported */
+#define OV1_PPC_2_00		0x80	/* set if we support PowerPC 2.00 */
+#define OV1_PPC_2_01		0x40	/* set if we support PowerPC 2.01 */
+#define OV1_PPC_2_02		0x20	/* set if we support PowerPC 2.02 */
+#define OV1_PPC_2_03		0x10	/* set if we support PowerPC 2.03 */
+#define OV1_PPC_2_04		0x08	/* set if we support PowerPC 2.04 */
+#define OV1_PPC_2_05		0x04	/* set if we support PowerPC 2.05 */
+#define OV1_PPC_2_06		0x02	/* set if we support PowerPC 2.06 */
+#define OV1_PPC_2_07		0x01	/* set if we support PowerPC 2.07 */
+
+#define OV1_PPC_3_00		0x80	/* set if we support PowerPC 3.00 */
+
+/* Option vector 2: Open Firmware options supported */
+#define OV2_REAL_MODE		0x20	/* set if we want OF in real mode */
+
+/* Option vector 3: processor options supported */
+#define OV3_FP			0x80	/* floating point */
+#define OV3_VMX			0x40	/* VMX/Altivec */
+#define OV3_DFP			0x20	/* decimal FP */
+
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP		0x01	/* minimum VP entitled capacity */
+
+/* Option vector 5: PAPR/OF options supported
+ * These bits are also used in firmware_has_feature() to validate
+ * the capabilities reported for vector 5 in the device tree so we
+ * encode the vector index in the define and use the OV5_FEAT()
+ * and OV5_INDX() macros to extract the desired information.
+ */
+#define OV5_FEAT(x)	((x) & 0xff)
+#define OV5_INDX(x)	((x) >> 8)
+#define OV5_LPAR		0x0280	/* logical partitioning supported */
+#define OV5_SPLPAR		0x0240	/* shared-processor LPAR supported */
+/* ibm,dynamic-reconfiguration-memory property supported */
+#define OV5_DRCONF_MEMORY	0x0220
+#define OV5_LARGE_PAGES		0x0210	/* large pages supported */
+#define OV5_DONATE_DEDICATE_CPU	0x0202	/* donate dedicated CPU support */
+#define OV5_MSI			0x0201	/* PCIe/MSI support */
+#define OV5_CMO			0x0480	/* Cooperative Memory Overcommitment */
+#define OV5_XCMO		0x0440	/* Page Coalescing */
+#define OV5_TYPE1_AFFINITY	0x0580	/* Type 1 NUMA affinity */
+#define OV5_PRRN		0x0540	/* Platform Resource Reassignment */
+#define OV5_HP_EVT		0x0604	/* Hot Plug Event support */
+#define OV5_RESIZE_HPT		0x0601	/* Hash Page Table resizing */
+#define OV5_PFO_HW_RNG		0x1180	/* PFO Random Number Generator */
+#define OV5_PFO_HW_842		0x1140	/* PFO Compression Accelerator */
+#define OV5_PFO_HW_ENCR		0x1120	/* PFO Encryption Accelerator */
+#define OV5_SUB_PROCESSORS	0x1501	/* 1,2,or 4 Sub-Processors supported */
+#define OV5_DRMEM_V2		0x1680	/* ibm,dynamic-reconfiguration-v2 */
+#define OV5_XIVE_SUPPORT	0x17C0	/* XIVE Exploitation Support Mask */
+#define OV5_XIVE_LEGACY		0x1700	/* XIVE legacy mode Only */
+#define OV5_XIVE_EXPLOIT	0x1740	/* XIVE exploitation mode Only */
+#define OV5_XIVE_EITHER		0x1780	/* XIVE legacy or exploitation mode */
+/* MMU Base Architecture */
+#define OV5_MMU_SUPPORT		0x18C0	/* MMU Mode Support Mask */
+#define OV5_MMU_HASH		0x1800	/* Hash MMU Only */
+#define OV5_MMU_RADIX		0x1840	/* Radix MMU Only */
+#define OV5_MMU_EITHER		0x1880	/* Hash or Radix Supported */
+#define OV5_MMU_DYNAMIC		0x18C0	/* Hash or Radix Can Switch Later */
+#define OV5_NMMU		0x1820	/* Nest MMU Available */
+/* Hash Table Extensions */
+#define OV5_HASH_SEG_TBL	0x1980	/* In Memory Segment Tables Available */
+#define OV5_HASH_GTSE		0x1940	/* Guest Translation Shoot Down Avail */
+/* Radix Table Extensions */
+#define OV5_RADIX_GTSE		0x1A40	/* Guest Translation Shoot Down Avail */
+#define OV5_DRC_INFO		0x1640	/* Redef Prop Structures: drc-info   */
+
+/* Option Vector 6: IBM PAPR hints */
+#define OV6_LINUX		0x02	/* Linux is our OS */
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
new file mode 100644
index 0000000..17ee719
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3.h
@@ -0,0 +1,531 @@
+/*
+ *  PS3 platform declarations.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_ASM_POWERPC_PS3_H)
+#define _ASM_POWERPC_PS3_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/cell-pmu.h>
+
+union ps3_firmware_version {
+	u64 raw;
+	struct {
+		u16 pad;
+		u16 major;
+		u16 minor;
+		u16 rev;
+	};
+};
+
+void ps3_get_firmware_version(union ps3_firmware_version *v);
+int ps3_compare_firmware_version(u16 major, u16 minor, u16 rev);
+
+/* 'Other OS' area */
+
+enum ps3_param_av_multi_out {
+	PS3_PARAM_AV_MULTI_OUT_NTSC = 0,
+	PS3_PARAM_AV_MULTI_OUT_PAL_RGB = 1,
+	PS3_PARAM_AV_MULTI_OUT_PAL_YCBCR = 2,
+	PS3_PARAM_AV_MULTI_OUT_SECAM = 3,
+};
+
+enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void);
+
+extern u64 ps3_os_area_get_rtc_diff(void);
+extern void ps3_os_area_set_rtc_diff(u64 rtc_diff);
+
+struct ps3_os_area_flash_ops {
+	ssize_t (*read)(void *buf, size_t count, loff_t pos);
+	ssize_t (*write)(const void *buf, size_t count, loff_t pos);
+};
+
+extern void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops);
+
+/* dma routines */
+
+enum ps3_dma_page_size {
+	PS3_DMA_4K = 12U,
+	PS3_DMA_64K = 16U,
+	PS3_DMA_1M = 20U,
+	PS3_DMA_16M = 24U,
+};
+
+enum ps3_dma_region_type {
+	PS3_DMA_OTHER = 0,
+	PS3_DMA_INTERNAL = 2,
+};
+
+struct ps3_dma_region_ops;
+
+/**
+ * struct ps3_dma_region - A per device dma state variables structure
+ * @did: The HV device id.
+ * @page_size: The ioc pagesize.
+ * @region_type: The HV region type.
+ * @bus_addr: The 'translated' bus address of the region.
+ * @len: The length in bytes of the region.
+ * @offset: The offset from the start of memory of the region.
+ * @ioid: The IOID of the device who owns this region
+ * @chunk_list: Opaque variable used by the ioc page manager.
+ * @region_ops: struct ps3_dma_region_ops - dma region operations
+ */
+
+struct ps3_dma_region {
+	struct ps3_system_bus_device *dev;
+	/* device variables */
+	const struct ps3_dma_region_ops *region_ops;
+	unsigned char ioid;
+	enum ps3_dma_page_size page_size;
+	enum ps3_dma_region_type region_type;
+	unsigned long len;
+	unsigned long offset;
+
+	/* driver variables  (set by ps3_dma_region_create) */
+	unsigned long bus_addr;
+	struct {
+		spinlock_t lock;
+		struct list_head head;
+	} chunk_list;
+};
+
+struct ps3_dma_region_ops {
+	int (*create)(struct ps3_dma_region *);
+	int (*free)(struct ps3_dma_region *);
+	int (*map)(struct ps3_dma_region *,
+		   unsigned long virt_addr,
+		   unsigned long len,
+		   dma_addr_t *bus_addr,
+		   u64 iopte_pp);
+	int (*unmap)(struct ps3_dma_region *,
+		     dma_addr_t bus_addr,
+		     unsigned long len);
+};
+/**
+ * struct ps3_dma_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+struct ps3_system_bus_device;
+
+int ps3_dma_region_init(struct ps3_system_bus_device *dev,
+	struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
+	enum ps3_dma_region_type region_type, void *addr, unsigned long len);
+int ps3_dma_region_create(struct ps3_dma_region *r);
+int ps3_dma_region_free(struct ps3_dma_region *r);
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+	unsigned long len, dma_addr_t *bus_addr,
+	u64 iopte_pp);
+int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
+	unsigned long len);
+
+/* mmio routines */
+
+enum ps3_mmio_page_size {
+	PS3_MMIO_4K = 12U,
+	PS3_MMIO_64K = 16U
+};
+
+struct ps3_mmio_region_ops;
+/**
+ * struct ps3_mmio_region - a per device mmio state variables structure
+ *
+ * Current systems can be supported with a single region per device.
+ */
+
+struct ps3_mmio_region {
+	struct ps3_system_bus_device *dev;
+	const struct ps3_mmio_region_ops *mmio_ops;
+	unsigned long bus_addr;
+	unsigned long len;
+	enum ps3_mmio_page_size page_size;
+	unsigned long lpar_addr;
+};
+
+struct ps3_mmio_region_ops {
+	int (*create)(struct ps3_mmio_region *);
+	int (*free)(struct ps3_mmio_region *);
+};
+/**
+ * struct ps3_mmio_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
+	struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
+	enum ps3_mmio_page_size page_size);
+int ps3_mmio_region_create(struct ps3_mmio_region *r);
+int ps3_free_mmio_region(struct ps3_mmio_region *r);
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr);
+
+/* inrerrupt routines */
+
+enum ps3_cpu_binding {
+	PS3_BINDING_CPU_ANY = -1,
+	PS3_BINDING_CPU_0 = 0,
+	PS3_BINDING_CPU_1 = 1,
+};
+
+int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
+	unsigned int *virq);
+int ps3_irq_plug_destroy(unsigned int virq);
+int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_event_receive_port_destroy(unsigned int virq);
+int ps3_send_event_locally(unsigned int virq);
+
+int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
+	unsigned int *virq);
+int ps3_io_irq_destroy(unsigned int virq);
+int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
+	unsigned int *virq);
+int ps3_vuart_irq_destroy(unsigned int virq);
+int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
+	unsigned int class, unsigned int *virq);
+int ps3_spe_irq_destroy(unsigned int virq);
+
+int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
+	enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
+	unsigned int virq);
+
+/* lv1 result codes */
+
+enum lv1_result {
+	LV1_SUCCESS                     = 0,
+	/* not used                       -1 */
+	LV1_RESOURCE_SHORTAGE           = -2,
+	LV1_NO_PRIVILEGE                = -3,
+	LV1_DENIED_BY_POLICY            = -4,
+	LV1_ACCESS_VIOLATION            = -5,
+	LV1_NO_ENTRY                    = -6,
+	LV1_DUPLICATE_ENTRY             = -7,
+	LV1_TYPE_MISMATCH               = -8,
+	LV1_BUSY                        = -9,
+	LV1_EMPTY                       = -10,
+	LV1_WRONG_STATE                 = -11,
+	/* not used                       -12 */
+	LV1_NO_MATCH                    = -13,
+	LV1_ALREADY_CONNECTED           = -14,
+	LV1_UNSUPPORTED_PARAMETER_VALUE = -15,
+	LV1_CONDITION_NOT_SATISFIED     = -16,
+	LV1_ILLEGAL_PARAMETER_VALUE     = -17,
+	LV1_BAD_OPTION                  = -18,
+	LV1_IMPLEMENTATION_LIMITATION   = -19,
+	LV1_NOT_IMPLEMENTED             = -20,
+	LV1_INVALID_CLASS_ID            = -21,
+	LV1_CONSTRAINT_NOT_SATISFIED    = -22,
+	LV1_ALIGNMENT_ERROR             = -23,
+	LV1_HARDWARE_ERROR              = -24,
+	LV1_INVALID_DATA_FORMAT         = -25,
+	LV1_INVALID_OPERATION           = -26,
+	LV1_INTERNAL_ERROR              = -32768,
+};
+
+static inline const char* ps3_result(int result)
+{
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
+	switch (result) {
+	case LV1_SUCCESS:
+		return "LV1_SUCCESS (0)";
+	case -1:
+		return "** unknown result ** (-1)";
+	case LV1_RESOURCE_SHORTAGE:
+		return "LV1_RESOURCE_SHORTAGE (-2)";
+	case LV1_NO_PRIVILEGE:
+		return "LV1_NO_PRIVILEGE (-3)";
+	case LV1_DENIED_BY_POLICY:
+		return "LV1_DENIED_BY_POLICY (-4)";
+	case LV1_ACCESS_VIOLATION:
+		return "LV1_ACCESS_VIOLATION (-5)";
+	case LV1_NO_ENTRY:
+		return "LV1_NO_ENTRY (-6)";
+	case LV1_DUPLICATE_ENTRY:
+		return "LV1_DUPLICATE_ENTRY (-7)";
+	case LV1_TYPE_MISMATCH:
+		return "LV1_TYPE_MISMATCH (-8)";
+	case LV1_BUSY:
+		return "LV1_BUSY (-9)";
+	case LV1_EMPTY:
+		return "LV1_EMPTY (-10)";
+	case LV1_WRONG_STATE:
+		return "LV1_WRONG_STATE (-11)";
+	case -12:
+		return "** unknown result ** (-12)";
+	case LV1_NO_MATCH:
+		return "LV1_NO_MATCH (-13)";
+	case LV1_ALREADY_CONNECTED:
+		return "LV1_ALREADY_CONNECTED (-14)";
+	case LV1_UNSUPPORTED_PARAMETER_VALUE:
+		return "LV1_UNSUPPORTED_PARAMETER_VALUE (-15)";
+	case LV1_CONDITION_NOT_SATISFIED:
+		return "LV1_CONDITION_NOT_SATISFIED (-16)";
+	case LV1_ILLEGAL_PARAMETER_VALUE:
+		return "LV1_ILLEGAL_PARAMETER_VALUE (-17)";
+	case LV1_BAD_OPTION:
+		return "LV1_BAD_OPTION (-18)";
+	case LV1_IMPLEMENTATION_LIMITATION:
+		return "LV1_IMPLEMENTATION_LIMITATION (-19)";
+	case LV1_NOT_IMPLEMENTED:
+		return "LV1_NOT_IMPLEMENTED (-20)";
+	case LV1_INVALID_CLASS_ID:
+		return "LV1_INVALID_CLASS_ID (-21)";
+	case LV1_CONSTRAINT_NOT_SATISFIED:
+		return "LV1_CONSTRAINT_NOT_SATISFIED (-22)";
+	case LV1_ALIGNMENT_ERROR:
+		return "LV1_ALIGNMENT_ERROR (-23)";
+	case LV1_HARDWARE_ERROR:
+		return "LV1_HARDWARE_ERROR (-24)";
+	case LV1_INVALID_DATA_FORMAT:
+		return "LV1_INVALID_DATA_FORMAT (-25)";
+	case LV1_INVALID_OPERATION:
+		return "LV1_INVALID_OPERATION (-26)";
+	case LV1_INTERNAL_ERROR:
+		return "LV1_INTERNAL_ERROR (-32768)";
+	default:
+		BUG();
+		return "** unknown result **";
+	};
+#else
+	return "";
+#endif
+}
+
+/* system bus routines */
+
+enum ps3_match_id {
+	PS3_MATCH_ID_EHCI		= 1,
+	PS3_MATCH_ID_OHCI		= 2,
+	PS3_MATCH_ID_GELIC		= 3,
+	PS3_MATCH_ID_AV_SETTINGS	= 4,
+	PS3_MATCH_ID_SYSTEM_MANAGER	= 5,
+	PS3_MATCH_ID_STOR_DISK		= 6,
+	PS3_MATCH_ID_STOR_ROM		= 7,
+	PS3_MATCH_ID_STOR_FLASH		= 8,
+	PS3_MATCH_ID_SOUND		= 9,
+	PS3_MATCH_ID_GPU		= 10,
+	PS3_MATCH_ID_LPM		= 11,
+};
+
+enum ps3_match_sub_id {
+	PS3_MATCH_SUB_ID_GPU_FB		= 1,
+	PS3_MATCH_SUB_ID_GPU_RAMDISK	= 2,
+};
+
+#define PS3_MODULE_ALIAS_EHCI		"ps3:1:0"
+#define PS3_MODULE_ALIAS_OHCI		"ps3:2:0"
+#define PS3_MODULE_ALIAS_GELIC		"ps3:3:0"
+#define PS3_MODULE_ALIAS_AV_SETTINGS	"ps3:4:0"
+#define PS3_MODULE_ALIAS_SYSTEM_MANAGER	"ps3:5:0"
+#define PS3_MODULE_ALIAS_STOR_DISK	"ps3:6:0"
+#define PS3_MODULE_ALIAS_STOR_ROM	"ps3:7:0"
+#define PS3_MODULE_ALIAS_STOR_FLASH	"ps3:8:0"
+#define PS3_MODULE_ALIAS_SOUND		"ps3:9:0"
+#define PS3_MODULE_ALIAS_GPU_FB		"ps3:10:1"
+#define PS3_MODULE_ALIAS_GPU_RAMDISK	"ps3:10:2"
+#define PS3_MODULE_ALIAS_LPM		"ps3:11:0"
+
+enum ps3_system_bus_device_type {
+	PS3_DEVICE_TYPE_IOC0 = 1,
+	PS3_DEVICE_TYPE_SB,
+	PS3_DEVICE_TYPE_VUART,
+	PS3_DEVICE_TYPE_LPM,
+};
+
+/**
+ * struct ps3_system_bus_device - a device on the system bus
+ */
+
+struct ps3_system_bus_device {
+	enum ps3_match_id match_id;
+	enum ps3_match_sub_id match_sub_id;
+	enum ps3_system_bus_device_type dev_type;
+
+	u64 bus_id;                       /* SB */
+	u64 dev_id;                       /* SB */
+	unsigned int interrupt_id;        /* SB */
+	struct ps3_dma_region *d_region;  /* SB, IOC0 */
+	struct ps3_mmio_region *m_region; /* SB, IOC0*/
+	unsigned int port_number;         /* VUART */
+	struct {                          /* LPM */
+		u64 node_id;
+		u64 pu_id;
+		u64 rights;
+	} lpm;
+
+/*	struct iommu_table *iommu_table; -- waiting for BenH's cleanups */
+	struct device core;
+	void *driver_priv; /* private driver variables */
+};
+
+int ps3_open_hv_device(struct ps3_system_bus_device *dev);
+int ps3_close_hv_device(struct ps3_system_bus_device *dev);
+
+/**
+ * struct ps3_system_bus_driver - a driver for a device on the system bus
+ */
+
+struct ps3_system_bus_driver {
+	enum ps3_match_id match_id;
+	enum ps3_match_sub_id match_sub_id;
+	struct device_driver core;
+	int (*probe)(struct ps3_system_bus_device *);
+	int (*remove)(struct ps3_system_bus_device *);
+	int (*shutdown)(struct ps3_system_bus_device *);
+/*	int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
+/*	int (*resume)(struct ps3_system_bus_device *); */
+};
+
+int ps3_system_bus_device_register(struct ps3_system_bus_device *dev);
+int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv);
+void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv);
+
+static inline struct ps3_system_bus_driver *ps3_drv_to_system_bus_drv(
+	struct device_driver *_drv)
+{
+	return container_of(_drv, struct ps3_system_bus_driver, core);
+}
+static inline struct ps3_system_bus_device *ps3_dev_to_system_bus_dev(
+	struct device *_dev)
+{
+	return container_of(_dev, struct ps3_system_bus_device, core);
+}
+static inline struct ps3_system_bus_driver *
+	ps3_system_bus_dev_to_system_bus_drv(struct ps3_system_bus_device *_dev)
+{
+	BUG_ON(!_dev);
+	BUG_ON(!_dev->core.driver);
+	return ps3_drv_to_system_bus_drv(_dev->core.driver);
+}
+
+/**
+ * ps3_system_bus_set_drvdata -
+ * @dev: device structure
+ * @data: Data to set
+ */
+
+static inline void ps3_system_bus_set_drvdata(
+	struct ps3_system_bus_device *dev, void *data)
+{
+	dev_set_drvdata(&dev->core, data);
+}
+static inline void *ps3_system_bus_get_drvdata(
+	struct ps3_system_bus_device *dev)
+{
+	return dev_get_drvdata(&dev->core);
+}
+
+/* These two need global scope for get_arch_dma_ops(). */
+
+extern struct bus_type ps3_system_bus_type;
+
+/* system manager */
+
+struct ps3_sys_manager_ops {
+	struct ps3_system_bus_device *dev;
+	void (*power_off)(struct ps3_system_bus_device *dev);
+	void (*restart)(struct ps3_system_bus_device *dev);
+};
+
+void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops);
+void __noreturn ps3_sys_manager_power_off(void);
+void __noreturn ps3_sys_manager_restart(void);
+void __noreturn ps3_sys_manager_halt(void);
+int ps3_sys_manager_get_wol(void);
+void ps3_sys_manager_set_wol(int state);
+
+struct ps3_prealloc {
+    const char *name;
+    void *address;
+    unsigned long size;
+    unsigned long align;
+};
+
+extern struct ps3_prealloc ps3fb_videomemory;
+extern struct ps3_prealloc ps3flash_bounce_buffer;
+
+/* logical performance monitor */
+
+/**
+ * enum ps3_lpm_rights - Rigths granted by the system policy module.
+ *
+ * @PS3_LPM_RIGHTS_USE_LPM: The right to use the lpm.
+ * @PS3_LPM_RIGHTS_USE_TB: The right to use the internal trace buffer.
+ */
+
+enum ps3_lpm_rights {
+	PS3_LPM_RIGHTS_USE_LPM = 0x001,
+	PS3_LPM_RIGHTS_USE_TB = 0x100,
+};
+
+/**
+ * enum ps3_lpm_tb_type - Type of trace buffer lv1 should use.
+ *
+ * @PS3_LPM_TB_TYPE_NONE: Do not use a trace buffer.
+ * @PS3_LPM_RIGHTS_USE_TB: Use the lv1 internal trace buffer.  Must have
+ *  rights @PS3_LPM_RIGHTS_USE_TB.
+ */
+
+enum ps3_lpm_tb_type {
+	PS3_LPM_TB_TYPE_NONE = 0,
+	PS3_LPM_TB_TYPE_INTERNAL = 1,
+};
+
+int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
+	u64 tb_cache_size);
+int ps3_lpm_close(void);
+int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
+	unsigned long *bytes_copied);
+int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
+	unsigned long count, unsigned long *bytes_copied);
+void ps3_set_bookmark(u64 bookmark);
+void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id);
+int ps3_set_signal(u64 rtas_signal_group, u8 signal_bit, u16 sub_unit,
+	u8 bus_word);
+
+u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr);
+void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+u32 ps3_read_ctr(u32 cpu, u32 ctr);
+void ps3_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+u32 ps3_read_pm07_control(u32 cpu, u32 ctr);
+void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg);
+void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr);
+void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+void ps3_enable_pm(u32 cpu);
+void ps3_disable_pm(u32 cpu);
+void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+void ps3_disable_pm_interrupts(u32 cpu);
+
+u32 ps3_get_and_clear_pm_interrupts(u32 cpu);
+void ps3_sync_irq(int node);
+u32 ps3_get_hw_thread_id(int cpu);
+u64 ps3_get_spe_id(void *arg);
+
+void ps3_early_mm_init(void);
+
+#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
new file mode 100644
index 0000000..a1dc784
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -0,0 +1,743 @@
+/*
+ *  PS3 AV backend support.
+ *
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _ASM_POWERPC_PS3AV_H_
+#define _ASM_POWERPC_PS3AV_H_
+
+/** command for ioctl() **/
+#define PS3AV_VERSION 0x205	/* version of ps3av command */
+
+#define PS3AV_CID_AV_INIT              0x00000001
+#define PS3AV_CID_AV_FIN               0x00000002
+#define PS3AV_CID_AV_GET_HW_CONF       0x00000003
+#define PS3AV_CID_AV_GET_MONITOR_INFO  0x00000004
+#define PS3AV_CID_AV_ENABLE_EVENT      0x00000006
+#define PS3AV_CID_AV_DISABLE_EVENT     0x00000007
+#define PS3AV_CID_AV_TV_MUTE           0x0000000a
+
+#define PS3AV_CID_AV_VIDEO_CS          0x00010001
+#define PS3AV_CID_AV_VIDEO_MUTE        0x00010002
+#define PS3AV_CID_AV_VIDEO_DISABLE_SIG 0x00010003
+#define PS3AV_CID_AV_AUDIO_PARAM       0x00020001
+#define PS3AV_CID_AV_AUDIO_MUTE        0x00020002
+#define PS3AV_CID_AV_HDMI_MODE         0x00040001
+
+#define PS3AV_CID_VIDEO_INIT           0x01000001
+#define PS3AV_CID_VIDEO_MODE           0x01000002
+#define PS3AV_CID_VIDEO_FORMAT         0x01000004
+#define PS3AV_CID_VIDEO_PITCH          0x01000005
+
+#define PS3AV_CID_AUDIO_INIT           0x02000001
+#define PS3AV_CID_AUDIO_MODE           0x02000002
+#define PS3AV_CID_AUDIO_MUTE           0x02000003
+#define PS3AV_CID_AUDIO_ACTIVE         0x02000004
+#define PS3AV_CID_AUDIO_INACTIVE       0x02000005
+#define PS3AV_CID_AUDIO_SPDIF_BIT      0x02000006
+#define PS3AV_CID_AUDIO_CTRL           0x02000007
+
+#define PS3AV_CID_EVENT_UNPLUGGED      0x10000001
+#define PS3AV_CID_EVENT_PLUGGED        0x10000002
+#define PS3AV_CID_EVENT_HDCP_DONE      0x10000003
+#define PS3AV_CID_EVENT_HDCP_FAIL      0x10000004
+#define PS3AV_CID_EVENT_HDCP_AUTH      0x10000005
+#define PS3AV_CID_EVENT_HDCP_ERROR     0x10000006
+
+#define PS3AV_CID_AVB_PARAM            0x04000001
+
+/* max backend ports */
+#define PS3AV_HDMI_MAX                 2	/* HDMI_0 HDMI_1 */
+#define PS3AV_AVMULTI_MAX              1	/* AVMULTI_0 */
+#define PS3AV_AV_PORT_MAX              (PS3AV_HDMI_MAX + PS3AV_AVMULTI_MAX)
+#define PS3AV_OPT_PORT_MAX             1	/* SPDIF0 */
+#define PS3AV_HEAD_MAX                 2	/* HEAD_A HEAD_B */
+
+/* num of pkt for PS3AV_CID_AVB_PARAM */
+#define PS3AV_AVB_NUM_VIDEO            PS3AV_HEAD_MAX
+#define PS3AV_AVB_NUM_AUDIO            0	/* not supported */
+#define PS3AV_AVB_NUM_AV_VIDEO         PS3AV_AV_PORT_MAX
+#define PS3AV_AVB_NUM_AV_AUDIO         PS3AV_HDMI_MAX
+
+#define PS3AV_MUTE_PORT_MAX            1	/* num of ports in mute pkt */
+
+/* event_bit */
+#define PS3AV_CMD_EVENT_BIT_UNPLUGGED			(1 << 0)
+#define PS3AV_CMD_EVENT_BIT_PLUGGED			(1 << 1)
+#define PS3AV_CMD_EVENT_BIT_HDCP_DONE			(1 << 2)
+#define PS3AV_CMD_EVENT_BIT_HDCP_FAIL			(1 << 3)
+#define PS3AV_CMD_EVENT_BIT_HDCP_REAUTH			(1 << 4)
+#define PS3AV_CMD_EVENT_BIT_HDCP_TOPOLOGY		(1 << 5)
+
+/* common params */
+/* mute */
+#define PS3AV_CMD_MUTE_OFF				0x0000
+#define PS3AV_CMD_MUTE_ON				0x0001
+/* avport */
+#define PS3AV_CMD_AVPORT_HDMI_0				0x0000
+#define PS3AV_CMD_AVPORT_HDMI_1				0x0001
+#define PS3AV_CMD_AVPORT_AVMULTI_0			0x0010
+#define PS3AV_CMD_AVPORT_SPDIF_0			0x0020
+#define PS3AV_CMD_AVPORT_SPDIF_1			0x0021
+
+/* for av backend */
+/* av_mclk */
+#define PS3AV_CMD_AV_MCLK_128				0x0000
+#define PS3AV_CMD_AV_MCLK_256				0x0001
+#define PS3AV_CMD_AV_MCLK_512				0x0003
+/* av_inputlen */
+#define PS3AV_CMD_AV_INPUTLEN_16			0x02
+#define PS3AV_CMD_AV_INPUTLEN_20			0x0a
+#define PS3AV_CMD_AV_INPUTLEN_24			0x0b
+/* av_layout */
+#define PS3AV_CMD_AV_LAYOUT_32				(1 << 0)
+#define PS3AV_CMD_AV_LAYOUT_44				(1 << 1)
+#define PS3AV_CMD_AV_LAYOUT_48				(1 << 2)
+#define PS3AV_CMD_AV_LAYOUT_88				(1 << 3)
+#define PS3AV_CMD_AV_LAYOUT_96				(1 << 4)
+#define PS3AV_CMD_AV_LAYOUT_176				(1 << 5)
+#define PS3AV_CMD_AV_LAYOUT_192				(1 << 6)
+/* hdmi_mode */
+#define PS3AV_CMD_AV_HDMI_MODE_NORMAL			0xff
+#define PS3AV_CMD_AV_HDMI_HDCP_OFF			0x01
+#define PS3AV_CMD_AV_HDMI_EDID_PASS			0x80
+#define PS3AV_CMD_AV_HDMI_DVI				0x40
+
+/* for video module */
+/* video_head */
+#define PS3AV_CMD_VIDEO_HEAD_A				0x0000
+#define PS3AV_CMD_VIDEO_HEAD_B				0x0001
+/* video_cs_out video_cs_in */
+#define PS3AV_CMD_VIDEO_CS_NONE				0x0000
+#define PS3AV_CMD_VIDEO_CS_RGB_8			0x0001
+#define PS3AV_CMD_VIDEO_CS_YUV444_8			0x0002
+#define PS3AV_CMD_VIDEO_CS_YUV422_8			0x0003
+#define PS3AV_CMD_VIDEO_CS_XVYCC_8			0x0004
+#define PS3AV_CMD_VIDEO_CS_RGB_10			0x0005
+#define PS3AV_CMD_VIDEO_CS_YUV444_10			0x0006
+#define PS3AV_CMD_VIDEO_CS_YUV422_10			0x0007
+#define PS3AV_CMD_VIDEO_CS_XVYCC_10			0x0008
+#define PS3AV_CMD_VIDEO_CS_RGB_12			0x0009
+#define PS3AV_CMD_VIDEO_CS_YUV444_12			0x000a
+#define PS3AV_CMD_VIDEO_CS_YUV422_12			0x000b
+#define PS3AV_CMD_VIDEO_CS_XVYCC_12			0x000c
+/* video_vid */
+#define PS3AV_CMD_VIDEO_VID_NONE			0x0000
+#define PS3AV_CMD_VIDEO_VID_480I			0x0001
+#define PS3AV_CMD_VIDEO_VID_576I			0x0003
+#define PS3AV_CMD_VIDEO_VID_480P			0x0005
+#define PS3AV_CMD_VIDEO_VID_576P			0x0006
+#define PS3AV_CMD_VIDEO_VID_1080I_60HZ			0x0007
+#define PS3AV_CMD_VIDEO_VID_1080I_50HZ			0x0008
+#define PS3AV_CMD_VIDEO_VID_720P_60HZ			0x0009
+#define PS3AV_CMD_VIDEO_VID_720P_50HZ			0x000a
+#define PS3AV_CMD_VIDEO_VID_1080P_60HZ			0x000b
+#define PS3AV_CMD_VIDEO_VID_1080P_50HZ			0x000c
+#define PS3AV_CMD_VIDEO_VID_WXGA			0x000d
+#define PS3AV_CMD_VIDEO_VID_SXGA			0x000e
+#define PS3AV_CMD_VIDEO_VID_WUXGA			0x000f
+#define PS3AV_CMD_VIDEO_VID_480I_A			0x0010
+/* video_format */
+#define PS3AV_CMD_VIDEO_FORMAT_BLACK			0x0000
+#define PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT		0x0007
+/* video_order */
+#define PS3AV_CMD_VIDEO_ORDER_RGB			0x0000
+#define PS3AV_CMD_VIDEO_ORDER_BGR			0x0001
+/* video_fmt */
+#define PS3AV_CMD_VIDEO_FMT_X8R8G8B8			0x0000
+/* video_out_format */
+#define PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT		0x0000
+/* video_cl_cnv */
+#define PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT		0x0000
+#define PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT		0x0010
+/* video_sync */
+#define PS3AV_CMD_VIDEO_SYNC_VSYNC			0x0001
+#define PS3AV_CMD_VIDEO_SYNC_CSYNC			0x0004
+#define PS3AV_CMD_VIDEO_SYNC_HSYNC			0x0010
+
+/* for audio module */
+/* num_of_ch */
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_2			0x0000
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_3			0x0001
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_4			0x0002
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_5			0x0003
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_6			0x0004
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_7			0x0005
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_8			0x0006
+/* audio_fs */
+#define PS3AV_CMD_AUDIO_FS_32K				0x0001
+#define PS3AV_CMD_AUDIO_FS_44K				0x0002
+#define PS3AV_CMD_AUDIO_FS_48K				0x0003
+#define PS3AV_CMD_AUDIO_FS_88K				0x0004
+#define PS3AV_CMD_AUDIO_FS_96K				0x0005
+#define PS3AV_CMD_AUDIO_FS_176K				0x0006
+#define PS3AV_CMD_AUDIO_FS_192K				0x0007
+/* audio_word_bits */
+#define PS3AV_CMD_AUDIO_WORD_BITS_16			0x0001
+#define PS3AV_CMD_AUDIO_WORD_BITS_20			0x0002
+#define PS3AV_CMD_AUDIO_WORD_BITS_24			0x0003
+/* audio_format */
+#define PS3AV_CMD_AUDIO_FORMAT_PCM			0x0001
+#define PS3AV_CMD_AUDIO_FORMAT_BITSTREAM		0x00ff
+/* audio_source */
+#define PS3AV_CMD_AUDIO_SOURCE_SERIAL			0x0000
+#define PS3AV_CMD_AUDIO_SOURCE_SPDIF			0x0001
+/* audio_swap */
+#define PS3AV_CMD_AUDIO_SWAP_0				0x0000
+#define PS3AV_CMD_AUDIO_SWAP_1				0x0000
+/* audio_map */
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_0			0x0000
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_1			0x0001
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_2			0x0002
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_3			0x0003
+/* audio_layout */
+#define PS3AV_CMD_AUDIO_LAYOUT_2CH			0x0000
+#define PS3AV_CMD_AUDIO_LAYOUT_6CH			0x000b	/* LREClr */
+#define PS3AV_CMD_AUDIO_LAYOUT_8CH			0x001f	/* LREClrXY */
+/* audio_downmix */
+#define PS3AV_CMD_AUDIO_DOWNMIX_PERMITTED		0x0000
+#define PS3AV_CMD_AUDIO_DOWNMIX_PROHIBITED		0x0001
+
+/* audio_port */
+#define PS3AV_CMD_AUDIO_PORT_HDMI_0			( 1 << 0 )
+#define PS3AV_CMD_AUDIO_PORT_HDMI_1			( 1 << 1 )
+#define PS3AV_CMD_AUDIO_PORT_AVMULTI_0			( 1 << 10 )
+#define PS3AV_CMD_AUDIO_PORT_SPDIF_0			( 1 << 20 )
+#define PS3AV_CMD_AUDIO_PORT_SPDIF_1			( 1 << 21 )
+
+/* audio_ctrl_id */
+#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_RESET		0x0000
+#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_DE_EMPHASIS		0x0001
+#define PS3AV_CMD_AUDIO_CTRL_ID_AVCLK			0x0002
+/* audio_ctrl_data[0] reset */
+#define PS3AV_CMD_AUDIO_CTRL_RESET_NEGATE		0x0000
+#define PS3AV_CMD_AUDIO_CTRL_RESET_ASSERT		0x0001
+/* audio_ctrl_data[0] de-emphasis */
+#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_OFF		0x0000
+#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_ON		0x0001
+/* audio_ctrl_data[0] avclk */
+#define PS3AV_CMD_AUDIO_CTRL_AVCLK_22			0x0000
+#define PS3AV_CMD_AUDIO_CTRL_AVCLK_18			0x0001
+
+/* av_vid */
+/* do not use these params directly, use vid_video2av */
+#define PS3AV_CMD_AV_VID_480I				0x0000
+#define PS3AV_CMD_AV_VID_480P				0x0001
+#define PS3AV_CMD_AV_VID_720P_60HZ			0x0002
+#define PS3AV_CMD_AV_VID_1080I_60HZ			0x0003
+#define PS3AV_CMD_AV_VID_1080P_60HZ			0x0004
+#define PS3AV_CMD_AV_VID_576I				0x0005
+#define PS3AV_CMD_AV_VID_576P				0x0006
+#define PS3AV_CMD_AV_VID_720P_50HZ			0x0007
+#define PS3AV_CMD_AV_VID_1080I_50HZ			0x0008
+#define PS3AV_CMD_AV_VID_1080P_50HZ			0x0009
+#define PS3AV_CMD_AV_VID_WXGA				0x000a
+#define PS3AV_CMD_AV_VID_SXGA				0x000b
+#define PS3AV_CMD_AV_VID_WUXGA				0x000c
+/* av_cs_out av_cs_in */
+/* use cs_video2av() */
+#define PS3AV_CMD_AV_CS_RGB_8				0x0000
+#define PS3AV_CMD_AV_CS_YUV444_8			0x0001
+#define PS3AV_CMD_AV_CS_YUV422_8			0x0002
+#define PS3AV_CMD_AV_CS_XVYCC_8				0x0003
+#define PS3AV_CMD_AV_CS_RGB_10				0x0004
+#define PS3AV_CMD_AV_CS_YUV444_10			0x0005
+#define PS3AV_CMD_AV_CS_YUV422_10			0x0006
+#define PS3AV_CMD_AV_CS_XVYCC_10			0x0007
+#define PS3AV_CMD_AV_CS_RGB_12				0x0008
+#define PS3AV_CMD_AV_CS_YUV444_12			0x0009
+#define PS3AV_CMD_AV_CS_YUV422_12			0x000a
+#define PS3AV_CMD_AV_CS_XVYCC_12			0x000b
+#define PS3AV_CMD_AV_CS_8				0x0000
+#define PS3AV_CMD_AV_CS_10				0x0001
+#define PS3AV_CMD_AV_CS_12				0x0002
+/* dither */
+#define PS3AV_CMD_AV_DITHER_OFF				0x0000
+#define PS3AV_CMD_AV_DITHER_ON				0x0001
+#define PS3AV_CMD_AV_DITHER_8BIT			0x0000
+#define PS3AV_CMD_AV_DITHER_10BIT			0x0002
+#define PS3AV_CMD_AV_DITHER_12BIT			0x0004
+/* super_white */
+#define PS3AV_CMD_AV_SUPER_WHITE_OFF			0x0000
+#define PS3AV_CMD_AV_SUPER_WHITE_ON			0x0001
+/* aspect */
+#define PS3AV_CMD_AV_ASPECT_16_9			0x0000
+#define PS3AV_CMD_AV_ASPECT_4_3				0x0001
+/* video_cs_cnv() */
+#define PS3AV_CMD_VIDEO_CS_RGB				0x0001
+#define PS3AV_CMD_VIDEO_CS_YUV422			0x0002
+#define PS3AV_CMD_VIDEO_CS_YUV444			0x0003
+
+/* for broadcast automode */
+#define PS3AV_RESBIT_720x480P			0x0003	/* 0x0001 | 0x0002 */
+#define PS3AV_RESBIT_720x576P			0x0003	/* 0x0001 | 0x0002 */
+#define PS3AV_RESBIT_1280x720P			0x0004
+#define PS3AV_RESBIT_1920x1080I			0x0008
+#define PS3AV_RESBIT_1920x1080P			0x4000
+#define PS3AV_RES_MASK_60			(PS3AV_RESBIT_720x480P \
+						| PS3AV_RESBIT_1280x720P \
+						| PS3AV_RESBIT_1920x1080I \
+						| PS3AV_RESBIT_1920x1080P)
+#define PS3AV_RES_MASK_50			(PS3AV_RESBIT_720x576P \
+						| PS3AV_RESBIT_1280x720P \
+						| PS3AV_RESBIT_1920x1080I \
+						| PS3AV_RESBIT_1920x1080P)
+
+/* for VESA automode */
+#define PS3AV_RESBIT_VGA			0x0001
+#define PS3AV_RESBIT_WXGA			0x0002
+#define PS3AV_RESBIT_SXGA			0x0004
+#define PS3AV_RESBIT_WUXGA			0x0008
+#define PS3AV_RES_MASK_VESA			(PS3AV_RESBIT_WXGA |\
+						 PS3AV_RESBIT_SXGA |\
+						 PS3AV_RESBIT_WUXGA)
+
+#define PS3AV_MONITOR_TYPE_HDMI			1	/* HDMI */
+#define PS3AV_MONITOR_TYPE_DVI			2	/* DVI */
+
+
+/* for video mode */
+enum ps3av_mode_num {
+	PS3AV_MODE_AUTO				= 0,
+	PS3AV_MODE_480I				= 1,
+	PS3AV_MODE_480P				= 2,
+	PS3AV_MODE_720P60			= 3,
+	PS3AV_MODE_1080I60			= 4,
+	PS3AV_MODE_1080P60			= 5,
+	PS3AV_MODE_576I				= 6,
+	PS3AV_MODE_576P				= 7,
+	PS3AV_MODE_720P50			= 8,
+	PS3AV_MODE_1080I50			= 9,
+	PS3AV_MODE_1080P50			= 10,
+	PS3AV_MODE_WXGA				= 11,
+	PS3AV_MODE_SXGA				= 12,
+	PS3AV_MODE_WUXGA			= 13,
+};
+
+#define PS3AV_MODE_MASK				0x000F
+#define PS3AV_MODE_HDCP_OFF			0x1000	/* Retail PS3 product doesn't support this */
+#define PS3AV_MODE_DITHER			0x0800
+#define PS3AV_MODE_COLOR			0x0400
+#define PS3AV_MODE_WHITE			0x0200
+#define PS3AV_MODE_FULL				0x0080
+#define PS3AV_MODE_DVI				0x0040
+#define PS3AV_MODE_RGB				0x0020
+
+
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_60	PS3AV_MODE_480P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60	PS3AV_MODE_480I
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_50	PS3AV_MODE_576P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50	PS3AV_MODE_576I
+
+#define PS3AV_REGION_60				0x01
+#define PS3AV_REGION_50				0x02
+#define PS3AV_REGION_RGB			0x10
+
+#define get_status(buf)				(((__u32 *)buf)[2])
+#define PS3AV_HDR_SIZE				4	/* version + size */
+
+
+/** command packet structure **/
+struct ps3av_send_hdr {
+	u16 version;
+	u16 size;		/* size of command packet */
+	u32 cid;		/* command id */
+};
+
+struct ps3av_reply_hdr {
+	u16 version;
+	u16 size;
+	u32 cid;
+	u32 status;
+};
+
+/* backend: initialization */
+struct ps3av_pkt_av_init {
+	struct ps3av_send_hdr send_hdr;
+	u32 event_bit;
+};
+
+/* backend: finalize */
+struct ps3av_pkt_av_fin {
+	struct ps3av_send_hdr send_hdr;
+	/* recv */
+	u32 reserved;
+};
+
+/* backend: get port */
+struct ps3av_pkt_av_get_hw_conf {
+	struct ps3av_send_hdr send_hdr;
+	/* recv */
+	u32 status;
+	u16 num_of_hdmi;	/* out: number of hdmi */
+	u16 num_of_avmulti;	/* out: number of avmulti */
+	u16 num_of_spdif;	/* out: number of hdmi */
+	u16 reserved;
+};
+
+/* backend: get monitor info */
+struct ps3av_info_resolution {
+	u32 res_bits;
+	u32 native;
+};
+
+struct ps3av_info_cs {
+	u8 rgb;
+	u8 yuv444;
+	u8 yuv422;
+	u8 reserved;
+};
+
+struct ps3av_info_color {
+	u16 red_x;
+	u16 red_y;
+	u16 green_x;
+	u16 green_y;
+	u16 blue_x;
+	u16 blue_y;
+	u16 white_x;
+	u16 white_y;
+	u32 gamma;
+};
+
+struct ps3av_info_audio {
+	u8 type;
+	u8 max_num_of_ch;
+	u8 fs;
+	u8 sbit;
+};
+
+struct ps3av_info_monitor {
+	u8 avport;
+	u8 monitor_id[10];
+	u8 monitor_type;
+	u8 monitor_name[16];
+	struct ps3av_info_resolution res_60;
+	struct ps3av_info_resolution res_50;
+	struct ps3av_info_resolution res_other;
+	struct ps3av_info_resolution res_vesa;
+	struct ps3av_info_cs cs;
+	struct ps3av_info_color color;
+	u8 supported_ai;
+	u8 speaker_info;
+	u8 num_of_audio_block;
+	struct ps3av_info_audio audio[0];	/* 0 or more audio blocks */
+	u8 reserved[169];
+} __attribute__ ((packed));
+
+struct ps3av_pkt_av_get_monitor_info {
+	struct ps3av_send_hdr send_hdr;
+	u16 avport;		/* in: avport */
+	u16 reserved;
+	/* recv */
+	struct ps3av_info_monitor info;	/* out: monitor info */
+};
+
+/* backend: enable/disable event */
+struct ps3av_pkt_av_event {
+	struct ps3av_send_hdr send_hdr;
+	u32 event_bit;		/* in */
+};
+
+/* backend: video cs param */
+struct ps3av_pkt_av_video_cs {
+	struct ps3av_send_hdr send_hdr;
+	u16 avport;		/* in: avport */
+	u16 av_vid;		/* in: video resolution */
+	u16 av_cs_out;		/* in: output color space */
+	u16 av_cs_in;		/* in: input color space */
+	u8 dither;		/* in: dither bit length */
+	u8 bitlen_out;		/* in: bit length */
+	u8 super_white;		/* in: super white */
+	u8 aspect;		/* in: aspect ratio */
+};
+
+/* backend: video mute */
+struct ps3av_av_mute {
+	u16 avport;		/* in: avport */
+	u16 mute;		/* in: mute on/off */
+};
+
+struct ps3av_pkt_av_video_mute {
+	struct ps3av_send_hdr send_hdr;
+	struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
+};
+
+/* backend: video disable signal */
+struct ps3av_pkt_av_video_disable_sig {
+	struct ps3av_send_hdr send_hdr;
+	u16 avport;		/* in: avport */
+	u16 reserved;
+};
+
+/* backend: audio param */
+struct ps3av_audio_info_frame {
+	struct pb1_bit {
+		u8 ct:4;
+		u8 rsv:1;
+		u8 cc:3;
+	} pb1;
+	struct pb2_bit {
+		u8 rsv:3;
+		u8 sf:3;
+		u8 ss:2;
+	} pb2;
+	u8 pb3;
+	u8 pb4;
+	struct pb5_bit {
+		u8 dm:1;
+		u8 lsv:4;
+		u8 rsv:3;
+	} pb5;
+};
+
+struct ps3av_pkt_av_audio_param {
+	struct ps3av_send_hdr send_hdr;
+	u16 avport;		/* in: avport */
+	u16 reserved;
+	u8 mclk;		/* in: audio mclk */
+	u8 ns[3];		/* in: audio ns val */
+	u8 enable;		/* in: audio enable */
+	u8 swaplr;		/* in: audio swap */
+	u8 fifomap;		/* in: audio fifomap */
+	u8 inputctrl;		/* in: audio input ctrl */
+	u8 inputlen;		/* in: sample bit size */
+	u8 layout;		/* in: speaker layout param */
+	struct ps3av_audio_info_frame info;	/* in: info */
+	u8 chstat[5];		/* in: ch stat */
+};
+
+/* backend: audio_mute */
+struct ps3av_pkt_av_audio_mute {
+	struct ps3av_send_hdr send_hdr;
+	struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
+};
+
+/* backend: hdmi_mode */
+struct ps3av_pkt_av_hdmi_mode {
+	struct ps3av_send_hdr send_hdr;
+	u8 mode;		/* in: hdmi_mode */
+	u8 reserved0;
+	u8 reserved1;
+	u8 reserved2;
+};
+
+/* backend: tv_mute */
+struct ps3av_pkt_av_tv_mute {
+	struct ps3av_send_hdr send_hdr;
+	u16 avport;		/* in: avport HDMI only */
+	u16 mute;		/* in: mute */
+};
+
+/* video: initialize */
+struct ps3av_pkt_video_init {
+	struct ps3av_send_hdr send_hdr;
+	/* recv */
+	u32 reserved;
+};
+
+/* video: mode setting */
+struct ps3av_pkt_video_mode {
+	struct ps3av_send_hdr send_hdr;
+	u32 video_head;		/* in: head */
+	u32 reserved;
+	u32 video_vid;		/* in: video resolution */
+	u16 reserved1;
+	u16 width;		/* in: width in pixel */
+	u16 reserved2;
+	u16 height;		/* in: height in pixel */
+	u32 pitch;		/* in: line size in byte */
+	u32 video_out_format;	/* in: out format */
+	u32 video_format;	/* in: input frame buffer format */
+	u8 reserved3;
+	u8 video_cl_cnv;	/* in: color conversion */
+	u16 video_order;	/* in: input RGB order */
+	u32 reserved4;
+};
+
+/* video: format */
+struct ps3av_pkt_video_format {
+	struct ps3av_send_hdr send_hdr;
+	u32 video_head;		/* in: head */
+	u32 video_format;	/* in: frame buffer format */
+	u8 reserved;
+	u8 video_cl_cnv;	/* in: color conversion */
+	u16 video_order;	/* in: input RGB order */
+};
+
+/* video: pitch */
+struct ps3av_pkt_video_pitch {
+	u16 version;
+	u16 size;		/* size of command packet */
+	u32 cid;		/* command id */
+	u32 video_head;		/* in: head */
+	u32 pitch;		/* in: line size in byte */
+};
+
+/* audio: initialize */
+struct ps3av_pkt_audio_init {
+	struct ps3av_send_hdr send_hdr;
+	/* recv */
+	u32 reserved;
+};
+
+/* audio: mode setting */
+struct ps3av_pkt_audio_mode {
+	struct ps3av_send_hdr send_hdr;
+	u8 avport;		/* in: avport */
+	u8 reserved0[3];
+	u32 mask;		/* in: mask */
+	u32 audio_num_of_ch;	/* in: number of ch */
+	u32 audio_fs;		/* in: sampling freq */
+	u32 audio_word_bits;	/* in: sample bit size */
+	u32 audio_format;	/* in: audio output format */
+	u32 audio_source;	/* in: audio source */
+	u8 audio_enable[4];	/* in: audio enable */
+	u8 audio_swap[4];	/* in: audio swap */
+	u8 audio_map[4];	/* in: audio map */
+	u32 audio_layout;	/* in: speaker layout */
+	u32 audio_downmix;	/* in: audio downmix permission */
+	u32 audio_downmix_level;
+	u8 audio_cs_info[8];	/* in: IEC channel status */
+};
+
+/* audio: mute */
+struct ps3av_audio_mute {
+	u8 avport;		/* in: opt_port optical */
+	u8 reserved[3];
+	u32 mute;		/* in: mute */
+};
+
+struct ps3av_pkt_audio_mute {
+	struct ps3av_send_hdr send_hdr;
+	struct ps3av_audio_mute mute[PS3AV_OPT_PORT_MAX];
+};
+
+/* audio: active/inactive */
+struct ps3av_pkt_audio_active {
+	struct ps3av_send_hdr send_hdr;
+	u32 audio_port;		/* in: audio active/inactive port */
+};
+
+/* audio: SPDIF user bit */
+struct ps3av_pkt_audio_spdif_bit {
+	u16 version;
+	u16 size;		/* size of command packet */
+	u32 cid;		/* command id */
+	u8 avport;		/* in: avport SPDIF only */
+	u8 reserved[3];
+	u32 audio_port;		/* in: SPDIF only */
+	u32 spdif_bit_data[12];	/* in: user bit data */
+};
+
+/* audio: audio control */
+struct ps3av_pkt_audio_ctrl {
+	u16 version;
+	u16 size;		/* size of command packet */
+	u32 cid;		/* command id */
+	u32 audio_ctrl_id;	/* in: control id */
+	u32 audio_ctrl_data[4];	/* in: control data */
+};
+
+/* avb:param */
+#define PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE	\
+	(PS3AV_AVB_NUM_VIDEO*sizeof(struct ps3av_pkt_video_mode) + \
+	 PS3AV_AVB_NUM_AUDIO*sizeof(struct ps3av_pkt_audio_mode) + \
+	 PS3AV_AVB_NUM_AV_VIDEO*sizeof(struct ps3av_pkt_av_video_cs) + \
+	 PS3AV_AVB_NUM_AV_AUDIO*sizeof(struct ps3av_pkt_av_audio_param))
+
+struct ps3av_pkt_avb_param {
+	struct ps3av_send_hdr send_hdr;
+	u16 num_of_video_pkt;
+	u16 num_of_audio_pkt;
+	u16 num_of_av_video_pkt;
+	u16 num_of_av_audio_pkt;
+	/*
+	 * The actual buffer layout depends on the fields above:
+	 *
+	 * struct ps3av_pkt_video_mode video[num_of_video_pkt];
+	 * struct ps3av_pkt_audio_mode audio[num_of_audio_pkt];
+	 * struct ps3av_pkt_av_video_cs av_video[num_of_av_video_pkt];
+	 * struct ps3av_pkt_av_audio_param av_audio[num_of_av_audio_pkt];
+	 */
+	u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE];
+};
+
+/* channel status */
+extern u8 ps3av_mode_cs_info[];
+
+/** command status **/
+#define PS3AV_STATUS_SUCCESS			0x0000	/* success */
+#define PS3AV_STATUS_RECEIVE_VUART_ERROR	0x0001	/* receive vuart error */
+#define PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL	0x0002	/* syscon communication error */
+#define PS3AV_STATUS_INVALID_COMMAND		0x0003	/* obsolete invalid CID */
+#define PS3AV_STATUS_INVALID_PORT		0x0004	/* invalid port number */
+#define PS3AV_STATUS_INVALID_VID		0x0005	/* invalid video format */
+#define PS3AV_STATUS_INVALID_COLOR_SPACE	0x0006	/* invalid video colose space */
+#define PS3AV_STATUS_INVALID_FS			0x0007	/* invalid audio sampling freq */
+#define PS3AV_STATUS_INVALID_AUDIO_CH		0x0008	/* invalid audio channel number */
+#define PS3AV_STATUS_UNSUPPORTED_VERSION	0x0009	/* version mismatch  */
+#define PS3AV_STATUS_INVALID_SAMPLE_SIZE	0x000a	/* invalid audio sample bit size */
+#define PS3AV_STATUS_FAILURE			0x000b	/* other failures */
+#define PS3AV_STATUS_UNSUPPORTED_COMMAND	0x000c	/* unsupported cid */
+#define PS3AV_STATUS_BUFFER_OVERFLOW		0x000d	/* write buffer overflow */
+#define PS3AV_STATUS_INVALID_VIDEO_PARAM	0x000e	/* invalid video param */
+#define PS3AV_STATUS_NO_SEL			0x000f	/* not exist selector */
+#define PS3AV_STATUS_INVALID_AV_PARAM		0x0010	/* invalid backend param */
+#define PS3AV_STATUS_INVALID_AUDIO_PARAM	0x0011	/* invalid audio param */
+#define PS3AV_STATUS_UNSUPPORTED_HDMI_MODE	0x0012	/* unsupported hdmi mode */
+#define PS3AV_STATUS_NO_SYNC_HEAD		0x0013	/* sync head failed */
+
+extern void ps3av_set_hdr(u32, u16, struct ps3av_send_hdr *);
+extern int ps3av_do_pkt(u32, u16, size_t, struct ps3av_send_hdr *);
+
+extern int ps3av_cmd_init(void);
+extern int ps3av_cmd_fin(void);
+extern int ps3av_cmd_av_video_mute(int, u32 *, u32);
+extern int ps3av_cmd_av_video_disable_sig(u32);
+extern int ps3av_cmd_av_tv_mute(u32, u32);
+extern int ps3av_cmd_enable_event(void);
+extern int ps3av_cmd_av_hdmi_mode(u8);
+extern u32 ps3av_cmd_set_av_video_cs(void *, u32, int, int, int, u32);
+extern u32 ps3av_cmd_set_video_mode(void *, u32, int, int, u32);
+extern int ps3av_cmd_video_format_black(u32, u32, u32);
+extern int ps3av_cmd_av_audio_mute(int, u32 *, u32);
+extern u32 ps3av_cmd_set_av_audio_param(void *, u32,
+					const struct ps3av_pkt_audio_mode *,
+					u32);
+extern void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *, u32, u32,
+				     u32, u32, u32, u32);
+extern int ps3av_cmd_audio_mode(struct ps3av_pkt_audio_mode *);
+extern int ps3av_cmd_audio_mute(int, u32 *, u32);
+extern int ps3av_cmd_audio_active(int, u32);
+extern int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *, u32);
+extern int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *);
+extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *,
+					    u32);
+
+extern int ps3av_set_video_mode(int);
+extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32);
+extern int ps3av_get_auto_mode(void);
+extern int ps3av_get_mode(void);
+extern int ps3av_video_mode2res(u32, u32 *, u32 *);
+extern int ps3av_video_mute(int);
+extern int ps3av_audio_mute(int);
+extern int ps3av_audio_mute_analog(int);
+extern int ps3av_dev_open(void);
+extern int ps3av_dev_close(void);
+#endif	/* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h
new file mode 100644
index 0000000..b2b8959
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3gpu.h
@@ -0,0 +1,86 @@
+/*
+ *  PS3 GPU declarations.
+ *
+ *  Copyright 2009 Sony Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.
+ *  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_POWERPC_PS3GPU_H
+#define _ASM_POWERPC_PS3GPU_H
+
+#include <linux/mutex.h>
+
+#include <asm/lv1call.h>
+
+
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC	0x101
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP	0x102
+
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP	0x600
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT		0x601
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC	0x602
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE	0x603
+
+#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION	(1ULL << 32)
+
+#define L1GPU_DISPLAY_SYNC_HSYNC		1
+#define L1GPU_DISPLAY_SYNC_VSYNC		2
+
+
+/* mutex synchronizing GPU accesses and video mode changes */
+extern struct mutex ps3_gpu_mutex;
+
+
+static inline int lv1_gpu_display_sync(u64 context_handle, u64 head,
+				       u64 ddr_offset)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+					 head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_display_flip(u64 context_handle, u64 head,
+				       u64 ddr_offset)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
+					 head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_fb_setup(u64 context_handle, u64 xdr_lpar,
+				   u64 xdr_size, u64 ioif_offset)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
+					 xdr_lpar, xdr_size, ioif_offset, 0);
+}
+
+static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset,
+				  u64 ioif_offset, u64 sync_width, u64 pitch)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
+					 ddr_offset, ioif_offset, sync_width,
+					 pitch);
+}
+
+static inline int lv1_gpu_fb_close(u64 context_handle)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE, 0,
+					 0, 0, 0);
+}
+
+#endif /* _ASM_POWERPC_PS3GPU_H */
diff --git a/arch/powerpc/include/asm/ps3stor.h b/arch/powerpc/include/asm/ps3stor.h
new file mode 100644
index 0000000..6fcaf71
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3stor.h
@@ -0,0 +1,71 @@
+/*
+ * PS3 Storage Devices
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _ASM_POWERPC_PS3STOR_H_
+#define _ASM_POWERPC_PS3STOR_H_
+
+#include <linux/interrupt.h>
+
+#include <asm/ps3.h>
+
+
+struct ps3_storage_region {
+	unsigned int id;
+	u64 start;
+	u64 size;
+};
+
+struct ps3_storage_device {
+	struct ps3_system_bus_device sbd;
+
+	struct ps3_dma_region dma_region;
+	unsigned int irq;
+	u64 blk_size;
+
+	u64 tag;
+	u64 lv1_status;
+	struct completion done;
+
+	unsigned long bounce_size;
+	void *bounce_buf;
+	u64 bounce_lpar;
+	dma_addr_t bounce_dma;
+
+	unsigned int num_regions;
+	unsigned long accessible_regions;
+	unsigned int region_idx;		/* first accessible region */
+	struct ps3_storage_region regions[0];	/* Must be last */
+};
+
+static inline struct ps3_storage_device *to_ps3_storage_device(struct device *dev)
+{
+	return container_of(dev, struct ps3_storage_device, sbd.core);
+}
+
+extern int ps3stor_setup(struct ps3_storage_device *dev,
+			 irq_handler_t handler);
+extern void ps3stor_teardown(struct ps3_storage_device *dev);
+extern u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
+				      u64 start_sector, u64 sectors,
+				      int write);
+extern u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd,
+				u64 arg1, u64 arg2, u64 arg3, u64 arg4);
+
+#endif /* _ASM_POWERPC_PS3STOR_H_ */
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
new file mode 100644
index 0000000..bef5614
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Included from asm/pgtable-*.h only ! */
+
+/*
+ * Some bits are only used on some cpu families... Make sure that all
+ * the undefined gets a sensible default
+ */
+#ifndef _PAGE_HASHPTE
+#define _PAGE_HASHPTE	0
+#endif
+#ifndef _PAGE_HWWRITE
+#define _PAGE_HWWRITE	0
+#endif
+#ifndef _PAGE_EXEC
+#define _PAGE_EXEC	0
+#endif
+#ifndef _PAGE_ENDIAN
+#define _PAGE_ENDIAN	0
+#endif
+#ifndef _PAGE_COHERENT
+#define _PAGE_COHERENT	0
+#endif
+#ifndef _PAGE_WRITETHRU
+#define _PAGE_WRITETHRU	0
+#endif
+#ifndef _PAGE_4K_PFN
+#define _PAGE_4K_PFN		0
+#endif
+#ifndef _PAGE_SAO
+#define _PAGE_SAO	0
+#endif
+#ifndef _PAGE_PSIZE
+#define _PAGE_PSIZE		0
+#endif
+/* _PAGE_RO and _PAGE_RW shall not be defined at the same time */
+#ifndef _PAGE_RO
+#define _PAGE_RO 0
+#else
+#define _PAGE_RW 0
+#endif
+
+#ifndef _PAGE_PTE
+#define _PAGE_PTE 0
+#endif
+/* At least one of _PAGE_PRIVILEGED or _PAGE_USER must be defined */
+#ifndef _PAGE_PRIVILEGED
+#define _PAGE_PRIVILEGED 0
+#else
+#ifndef _PAGE_USER
+#define _PAGE_USER 0
+#endif
+#endif
+#ifndef _PAGE_NA
+#define _PAGE_NA 0
+#endif
+#ifndef _PAGE_HUGE
+#define _PAGE_HUGE 0
+#endif
+
+#ifndef _PMD_PRESENT_MASK
+#define _PMD_PRESENT_MASK	_PMD_PRESENT
+#endif
+#ifndef _PMD_USER
+#define _PMD_USER	0
+#endif
+#ifndef _PAGE_KERNEL_RO
+#define _PAGE_KERNEL_RO		(_PAGE_PRIVILEGED | _PAGE_RO)
+#endif
+#ifndef _PAGE_KERNEL_ROX
+#define _PAGE_KERNEL_ROX	(_PAGE_PRIVILEGED | _PAGE_RO | _PAGE_EXEC)
+#endif
+#ifndef _PAGE_KERNEL_RW
+#define _PAGE_KERNEL_RW		(_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
+				 _PAGE_HWWRITE)
+#endif
+#ifndef _PAGE_KERNEL_RWX
+#define _PAGE_KERNEL_RWX	(_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
+				 _PAGE_HWWRITE | _PAGE_EXEC)
+#endif
+#ifndef _PAGE_HPTEFLAGS
+#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
+#endif
+#ifndef _PTE_NONE_MASK
+#define _PTE_NONE_MASK	_PAGE_HPTEFLAGS
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Don't just check for any non zero bits in __PAGE_USER, since for book3e
+ * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
+ * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
+ */
+static inline bool pte_user(pte_t pte)
+{
+	return (pte_val(pte) & (_PAGE_USER | _PAGE_PRIVILEGED)) == _PAGE_USER;
+}
+#endif /* __ASSEMBLY__ */
+
+/* Location of the PFN in the PTE. Most 32-bit platforms use the same
+ * as _PAGE_SHIFT here (ie, naturally aligned).
+ * Platform who don't just pre-define the value so we don't override it here
+ */
+#ifndef PTE_RPN_SHIFT
+#define PTE_RPN_SHIFT	(PAGE_SHIFT)
+#endif
+
+/* The mask covered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs
+ */
+#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#define PTE_RPN_MASK	(~((1ULL<<PTE_RPN_SHIFT)-1))
+#else
+#define PTE_RPN_MASK	(~((1UL<<PTE_RPN_SHIFT)-1))
+#endif
+
+/* _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+                         _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+/* Mask of bits returned by pte_pgprot() */
+#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+			 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
+			 _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | _PAGE_NA | \
+			 _PAGE_PRIVILEGED | \
+			 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \
+	defined(CONFIG_PPC_E500MC)
+#define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE	(_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now (we could make write-only
+ * pages on BookE but we don't bother for now). Execute permission control is
+ * possible on platforms that define _PAGE_EXEC
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define PAGE_NONE	__pgprot(_PAGE_BASE | _PAGE_NA)
+#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
+				 _PAGE_EXEC)
+#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+				 _PAGE_EXEC)
+#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+				 _PAGE_EXEC)
+
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY
+#define __P100	PAGE_READONLY_X
+#define __P101	PAGE_READONLY_X
+#define __P110	PAGE_COPY_X
+#define __P111	PAGE_COPY_X
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED
+#define __S100	PAGE_READONLY_X
+#define __S101	PAGE_READONLY_X
+#define __S110	PAGE_SHARED_X
+#define __S111	PAGE_SHARED_X
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+/* Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP		(PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
+#ifndef _PAGE_READ
+/* if not defined, we should not find _PAGE_WRITE too */
+#define _PAGE_READ 0
+#define _PAGE_WRITE _PAGE_RW
+#endif
+
+#ifndef H_PAGE_4K_PFN
+#define H_PAGE_4K_PFN 0
+#endif
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
new file mode 100644
index 0000000..2d633e9
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_POWERPC_PTE_WALK_H
+#define _ASM_POWERPC_PTE_WALK_H
+
+#include <linux/sched.h>
+
+/* Don't use this directly */
+extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+			       bool *is_thp, unsigned *hshift);
+
+static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
+				    bool *is_thp, unsigned *hshift)
+{
+	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+	return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+
+static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
+{
+	pgd_t *pgdir = init_mm.pgd;
+	return __find_linux_pte(pgdir, ea, NULL, hshift);
+}
+/*
+ * This is what we should always use. Any other lockless page table lookup needs
+ * careful audit against THP split.
+ */
+static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
+					 bool *is_thp, unsigned *hshift)
+{
+	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+	VM_WARN(pgdir != current->mm->pgd,
+		"%s lock less page table lookup called on wrong mm\n", __func__);
+	return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+
+#endif /* _ASM_POWERPC_PTE_WALK_H */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
new file mode 100644
index 0000000..447cbd1
--- /dev/null
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ *
+ * this should only contain volatile regs
+ * since we can keep non-volatile in the thread_struct
+ * should set this up when only volatiles are saved
+ * by intr code.
+ *
+ * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
+ * that the overall structure is a multiple of 16 bytes in length.
+ *
+ * Note that the offsets of the fields in this struct correspond with
+ * the PT_* values below.  This simplifies arch/powerpc/kernel/ptrace.c.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_PTRACE_H
+#define _ASM_POWERPC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/asm-const.h>
+
+
+#ifdef __powerpc64__
+
+/*
+ * Size of redzone that userspace is allowed to use below the stack
+ * pointer.  This is 288 in the 64-bit big-endian ELF ABI, and 512 in
+ * the new ELFv2 little-endian ABI, so we allow the larger amount.
+ *
+ * For kernel code we allow a 288-byte redzone, in order to conserve
+ * kernel stack space; gcc currently only uses 288 bytes, and will
+ * hopefully allow explicit control of the redzone size in future.
+ */
+#define USER_REDZONE_SIZE	512
+#define KERNEL_REDZONE_SIZE	288
+
+#define STACK_FRAME_OVERHEAD	112	/* size of minimum stack frame */
+#define STACK_FRAME_LR_SAVE	2	/* Location of LR in stack frame */
+#define STACK_FRAME_REGS_MARKER	ASM_CONST(0x7265677368657265)
+#define STACK_INT_FRAME_SIZE	(sizeof(struct pt_regs) + \
+				 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
+#define STACK_FRAME_MARKER	12
+
+#ifdef PPC64_ELF_ABI_v2
+#define STACK_FRAME_MIN_SIZE	32
+#else
+#define STACK_FRAME_MIN_SIZE	STACK_FRAME_OVERHEAD
+#endif
+
+/* Size of dummy stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE	128
+#define __SIGNAL_FRAMESIZE32	64
+
+#else /* __powerpc64__ */
+
+#define USER_REDZONE_SIZE	0
+#define KERNEL_REDZONE_SIZE	0
+#define STACK_FRAME_OVERHEAD	16	/* size of minimum stack frame */
+#define STACK_FRAME_LR_SAVE	1	/* Location of LR in stack frame */
+#define STACK_FRAME_REGS_MARKER	ASM_CONST(0x72656773)
+#define STACK_INT_FRAME_SIZE	(sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
+#define STACK_FRAME_MARKER	2
+#define STACK_FRAME_MIN_SIZE	STACK_FRAME_OVERHEAD
+
+/* Size of stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE	64
+
+#endif /* __powerpc64__ */
+
+#ifndef __ASSEMBLY__
+
+#define GET_IP(regs)		((regs)->nip)
+#define GET_USP(regs)		((regs)->gpr[1])
+#define GET_FP(regs)		(0)
+#define SET_FP(regs, val)
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#define profile_pc profile_pc
+#endif
+
+#include <asm-generic/ptrace.h>
+
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+	return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+	if (is_syscall_success(regs))
+		return regs->gpr[3];
+	else
+		return -regs->gpr[3];
+}
+
+#ifdef __powerpc64__
+#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+#else
+#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
+#endif
+
+#define force_successful_syscall_return()   \
+	do { \
+		set_thread_flag(TIF_NOERROR); \
+	} while(0)
+
+struct task_struct;
+extern int ptrace_get_reg(struct task_struct *task, int regno,
+			  unsigned long *data);
+extern int ptrace_put_reg(struct task_struct *task, int regno,
+			  unsigned long data);
+
+#define current_pt_regs() \
+	((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
+/*
+ * We use the least-significant bit of the trap field to indicate
+ * whether we have saved the full set of registers, or only a
+ * partial set.  A 1 there means the partial set.
+ * On 4xx we use the next bit to indicate whether the exception
+ * is a critical exception (1 means it is).
+ */
+#define FULL_REGS(regs)		(((regs)->trap & 1) == 0)
+#ifndef __powerpc64__
+#define IS_CRITICAL_EXC(regs)	(((regs)->trap & 2) != 0)
+#define IS_MCHECK_EXC(regs)	(((regs)->trap & 4) != 0)
+#define IS_DEBUG_EXC(regs)	(((regs)->trap & 8) != 0)
+#endif /* ! __powerpc64__ */
+#define TRAP(regs)		((regs)->trap & ~0xF)
+#ifdef __powerpc64__
+#define NV_REG_POISON		0xdeadbeefdeadbeefUL
+#define CHECK_FULL_REGS(regs)	BUG_ON(regs->trap & 1)
+#else
+#define NV_REG_POISON		0xdeadbeef
+#define CHECK_FULL_REGS(regs)						      \
+do {									      \
+	if ((regs)->trap & 1)						      \
+		printk(KERN_CRIT "%s: partial register set\n", __func__); \
+} while (0)
+#endif /* __powerpc64__ */
+
+#define arch_has_single_step()	(1)
+#define arch_has_block_step()	(!cpu_has_feature(CPU_FTR_601))
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
+/*
+ * kprobe-based event tracer support
+ */
+
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:	   pt_regs from which register value is gotten
+ * @offset:    offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+						unsigned int offset)
+{
+	if (unlikely(offset > MAX_REG_OFFSET))
+		return 0;
+	return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @addr:      address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+
+static inline bool regs_within_kernel_stack(struct pt_regs *regs,
+						unsigned long addr)
+{
+	return ((addr & ~(THREAD_SIZE - 1))  ==
+		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:	pt_regs which contains kernel stack pointer.
+ * @n:		stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+						      unsigned int n)
+{
+	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+	addr += n;
+	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+		return *addr;
+	else
+		return 0;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#ifndef __powerpc64__
+#else /* __powerpc64__ */
+#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1)	/* each FP reg occupies 2 32-bit userspace slots */
+#define PT_VR0_32 164	/* each Vector reg occupies 4 slots in 32-bit */
+#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
+#define PT_VRSAVE_32 (PT_VR0 + 33*4)
+#define PT_VSR0_32 300 	/* each VSR reg occupies 4 slots in 32-bit */
+#endif /* __powerpc64__ */
+#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
new file mode 100644
index 0000000..e5b314e
--- /dev/null
+++ b/arch/powerpc/include/asm/reg.h
@@ -0,0 +1,1443 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Contains the definition of registers common to all PowerPC variants.
+ * If a register definition has been changed in a different PowerPC
+ * variant, we will case it in #ifndef XXX ... #endif, and have the
+ * number used in the Programming Environments Manual For 32-Bit
+ * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
+ */
+
+#ifndef _ASM_POWERPC_REG_H
+#define _ASM_POWERPC_REG_H
+#ifdef __KERNEL__
+
+#include <linux/stringify.h>
+#include <asm/cputable.h>
+#include <asm/asm-const.h>
+#include <asm/feature-fixups.h>
+
+/* Pickup Book E specific registers. */
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#include <asm/reg_booke.h>
+#endif /* CONFIG_BOOKE || CONFIG_40x */
+
+#ifdef CONFIG_FSL_EMB_PERFMON
+#include <asm/reg_fsl_emb.h>
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#include <asm/reg_8xx.h>
+#endif /* CONFIG_PPC_8xx */
+
+#define MSR_SF_LG	63              /* Enable 64 bit mode */
+#define MSR_ISF_LG	61              /* Interrupt 64b mode valid on 630 */
+#define MSR_HV_LG 	60              /* Hypervisor state */
+#define MSR_TS_T_LG	34		/* Trans Mem state: Transactional */
+#define MSR_TS_S_LG	33		/* Trans Mem state: Suspended */
+#define MSR_TS_LG	33		/* Trans Mem state (2 bits) */
+#define MSR_TM_LG	32		/* Trans Mem Available */
+#define MSR_VEC_LG	25	        /* Enable AltiVec */
+#define MSR_VSX_LG	23		/* Enable VSX */
+#define MSR_POW_LG	18		/* Enable Power Management */
+#define MSR_WE_LG	18		/* Wait State Enable */
+#define MSR_TGPR_LG	17		/* TLB Update registers in use */
+#define MSR_CE_LG	17		/* Critical Interrupt Enable */
+#define MSR_ILE_LG	16		/* Interrupt Little Endian */
+#define MSR_EE_LG	15		/* External Interrupt Enable */
+#define MSR_PR_LG	14		/* Problem State / Privilege Level */
+#define MSR_FP_LG	13		/* Floating Point enable */
+#define MSR_ME_LG	12		/* Machine Check Enable */
+#define MSR_FE0_LG	11		/* Floating Exception mode 0 */
+#define MSR_SE_LG	10		/* Single Step */
+#define MSR_BE_LG	9		/* Branch Trace */
+#define MSR_DE_LG	9 		/* Debug Exception Enable */
+#define MSR_FE1_LG	8		/* Floating Exception mode 1 */
+#define MSR_IP_LG	6		/* Exception prefix 0x000/0xFFF */
+#define MSR_IR_LG	5 		/* Instruction Relocate */
+#define MSR_DR_LG	4 		/* Data Relocate */
+#define MSR_PE_LG	3		/* Protection Enable */
+#define MSR_PX_LG	2		/* Protection Exclusive Mode */
+#define MSR_PMM_LG	2		/* Performance monitor */
+#define MSR_RI_LG	1		/* Recoverable Exception */
+#define MSR_LE_LG	0 		/* Little Endian */
+
+#ifdef __ASSEMBLY__
+#define __MASK(X)	(1<<(X))
+#else
+#define __MASK(X)	(1UL<<(X))
+#endif
+
+#ifdef CONFIG_PPC64
+#define MSR_SF		__MASK(MSR_SF_LG)	/* Enable 64 bit mode */
+#define MSR_ISF		__MASK(MSR_ISF_LG)	/* Interrupt 64b mode valid on 630 */
+#define MSR_HV 		__MASK(MSR_HV_LG)	/* Hypervisor state */
+#else
+/* so tests for these bits fail on 32-bit */
+#define MSR_SF		0
+#define MSR_ISF		0
+#define MSR_HV		0
+#endif
+
+/*
+ * To be used in shared book E/book S, this avoids needing to worry about
+ * book S/book E in shared code
+ */
+#ifndef MSR_SPE
+#define MSR_SPE 	0
+#endif
+
+#define MSR_VEC		__MASK(MSR_VEC_LG)	/* Enable AltiVec */
+#define MSR_VSX		__MASK(MSR_VSX_LG)	/* Enable VSX */
+#define MSR_POW		__MASK(MSR_POW_LG)	/* Enable Power Management */
+#define MSR_WE		__MASK(MSR_WE_LG)	/* Wait State Enable */
+#define MSR_TGPR	__MASK(MSR_TGPR_LG)	/* TLB Update registers in use */
+#define MSR_CE		__MASK(MSR_CE_LG)	/* Critical Interrupt Enable */
+#define MSR_ILE		__MASK(MSR_ILE_LG)	/* Interrupt Little Endian */
+#define MSR_EE		__MASK(MSR_EE_LG)	/* External Interrupt Enable */
+#define MSR_PR		__MASK(MSR_PR_LG)	/* Problem State / Privilege Level */
+#define MSR_FP		__MASK(MSR_FP_LG)	/* Floating Point enable */
+#define MSR_ME		__MASK(MSR_ME_LG)	/* Machine Check Enable */
+#define MSR_FE0		__MASK(MSR_FE0_LG)	/* Floating Exception mode 0 */
+#define MSR_SE		__MASK(MSR_SE_LG)	/* Single Step */
+#define MSR_BE		__MASK(MSR_BE_LG)	/* Branch Trace */
+#define MSR_DE		__MASK(MSR_DE_LG)	/* Debug Exception Enable */
+#define MSR_FE1		__MASK(MSR_FE1_LG)	/* Floating Exception mode 1 */
+#define MSR_IP		__MASK(MSR_IP_LG)	/* Exception prefix 0x000/0xFFF */
+#define MSR_IR		__MASK(MSR_IR_LG)	/* Instruction Relocate */
+#define MSR_DR		__MASK(MSR_DR_LG)	/* Data Relocate */
+#define MSR_PE		__MASK(MSR_PE_LG)	/* Protection Enable */
+#define MSR_PX		__MASK(MSR_PX_LG)	/* Protection Exclusive Mode */
+#ifndef MSR_PMM
+#define MSR_PMM		__MASK(MSR_PMM_LG)	/* Performance monitor */
+#endif
+#define MSR_RI		__MASK(MSR_RI_LG)	/* Recoverable Exception */
+#define MSR_LE		__MASK(MSR_LE_LG)	/* Little Endian */
+
+#define MSR_TM		__MASK(MSR_TM_LG)	/* Transactional Mem Available */
+#define MSR_TS_N	0			/*  Non-transactional */
+#define MSR_TS_S	__MASK(MSR_TS_S_LG)	/*  Transaction Suspended */
+#define MSR_TS_T	__MASK(MSR_TS_T_LG)	/*  Transaction Transactional */
+#define MSR_TS_MASK	(MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
+#define MSR_TM_TRANSACTIONAL(x)	(((x) & MSR_TS_MASK) == MSR_TS_T)
+#define MSR_TM_SUSPENDED(x)	(((x) & MSR_TS_MASK) == MSR_TS_S)
+
+#if defined(CONFIG_PPC_BOOK3S_64)
+#define MSR_64BIT	MSR_SF
+
+/* Server variant */
+#define __MSR		(MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#ifdef __BIG_ENDIAN__
+#define MSR_		__MSR
+#define MSR_IDLE	(MSR_ME | MSR_SF | MSR_HV)
+#else
+#define MSR_		(__MSR | MSR_LE)
+#define MSR_IDLE	(MSR_ME | MSR_SF | MSR_HV | MSR_LE)
+#endif
+#define MSR_KERNEL	(MSR_ | MSR_64BIT)
+#define MSR_USER32	(MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64	(MSR_USER32 | MSR_64BIT)
+#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
+/* Default MSR for kernel mode. */
+#define MSR_KERNEL	(MSR_ME|MSR_RI|MSR_IR|MSR_DR)
+#define MSR_USER	(MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
+
+#ifndef MSR_64BIT
+#define MSR_64BIT	0
+#endif
+
+/* Condition Register related */
+#define CR0_SHIFT	28
+#define CR0_MASK	0xF
+#define CR0_TBEGIN_FAILURE	(0x2 << 28) /* 0b0010 */
+
+
+/* Power Management - Processor Stop Status and Control Register Fields */
+#define PSSCR_RL_MASK		0x0000000F /* Requested Level */
+#define PSSCR_MTL_MASK		0x000000F0 /* Maximum Transition Level */
+#define PSSCR_TR_MASK		0x00000300 /* Transition State */
+#define PSSCR_PSLL_MASK		0x000F0000 /* Power-Saving Level Limit */
+#define PSSCR_EC		0x00100000 /* Exit Criterion */
+#define PSSCR_ESL		0x00200000 /* Enable State Loss */
+#define PSSCR_SD		0x00400000 /* Status Disable */
+#define PSSCR_PLS	0xf000000000000000 /* Power-saving Level Status */
+#define PSSCR_GUEST_VIS	0xf0000000000003ffUL /* Guest-visible PSSCR fields */
+#define PSSCR_FAKE_SUSPEND	0x00000400 /* Fake-suspend bit (P9 DD2.2) */
+#define PSSCR_FAKE_SUSPEND_LG	10	   /* Fake-suspend bit position */
+
+/* Floating Point Status and Control Register (FPSCR) Fields */
+#define FPSCR_FX	0x80000000	/* FPU exception summary */
+#define FPSCR_FEX	0x40000000	/* FPU enabled exception summary */
+#define FPSCR_VX	0x20000000	/* Invalid operation summary */
+#define FPSCR_OX	0x10000000	/* Overflow exception summary */
+#define FPSCR_UX	0x08000000	/* Underflow exception summary */
+#define FPSCR_ZX	0x04000000	/* Zero-divide exception summary */
+#define FPSCR_XX	0x02000000	/* Inexact exception summary */
+#define FPSCR_VXSNAN	0x01000000	/* Invalid op for SNaN */
+#define FPSCR_VXISI	0x00800000	/* Invalid op for Inv - Inv */
+#define FPSCR_VXIDI	0x00400000	/* Invalid op for Inv / Inv */
+#define FPSCR_VXZDZ	0x00200000	/* Invalid op for Zero / Zero */
+#define FPSCR_VXIMZ	0x00100000	/* Invalid op for Inv * Zero */
+#define FPSCR_VXVC	0x00080000	/* Invalid op for Compare */
+#define FPSCR_FR	0x00040000	/* Fraction rounded */
+#define FPSCR_FI	0x00020000	/* Fraction inexact */
+#define FPSCR_FPRF	0x0001f000	/* FPU Result Flags */
+#define FPSCR_FPCC	0x0000f000	/* FPU Condition Codes */
+#define FPSCR_VXSOFT	0x00000400	/* Invalid op for software request */
+#define FPSCR_VXSQRT	0x00000200	/* Invalid op for square root */
+#define FPSCR_VXCVI	0x00000100	/* Invalid op for integer convert */
+#define FPSCR_VE	0x00000080	/* Invalid op exception enable */
+#define FPSCR_OE	0x00000040	/* IEEE overflow exception enable */
+#define FPSCR_UE	0x00000020	/* IEEE underflow exception enable */
+#define FPSCR_ZE	0x00000010	/* IEEE zero divide exception enable */
+#define FPSCR_XE	0x00000008	/* FP inexact exception enable */
+#define FPSCR_NI	0x00000004	/* FPU non IEEE-Mode */
+#define FPSCR_RN	0x00000003	/* FPU rounding control */
+
+/* Bit definitions for SPEFSCR. */
+#define SPEFSCR_SOVH	0x80000000	/* Summary integer overflow high */
+#define SPEFSCR_OVH	0x40000000	/* Integer overflow high */
+#define SPEFSCR_FGH	0x20000000	/* Embedded FP guard bit high */
+#define SPEFSCR_FXH	0x10000000	/* Embedded FP sticky bit high */
+#define SPEFSCR_FINVH	0x08000000	/* Embedded FP invalid operation high */
+#define SPEFSCR_FDBZH	0x04000000	/* Embedded FP div by zero high */
+#define SPEFSCR_FUNFH	0x02000000	/* Embedded FP underflow high */
+#define SPEFSCR_FOVFH	0x01000000	/* Embedded FP overflow high */
+#define SPEFSCR_FINXS	0x00200000	/* Embedded FP inexact sticky */
+#define SPEFSCR_FINVS	0x00100000	/* Embedded FP invalid op. sticky */
+#define SPEFSCR_FDBZS	0x00080000	/* Embedded FP div by zero sticky */
+#define SPEFSCR_FUNFS	0x00040000	/* Embedded FP underflow sticky */
+#define SPEFSCR_FOVFS	0x00020000	/* Embedded FP overflow sticky */
+#define SPEFSCR_MODE	0x00010000	/* Embedded FP mode */
+#define SPEFSCR_SOV	0x00008000	/* Integer summary overflow */
+#define SPEFSCR_OV	0x00004000	/* Integer overflow */
+#define SPEFSCR_FG	0x00002000	/* Embedded FP guard bit */
+#define SPEFSCR_FX	0x00001000	/* Embedded FP sticky bit */
+#define SPEFSCR_FINV	0x00000800	/* Embedded FP invalid operation */
+#define SPEFSCR_FDBZ	0x00000400	/* Embedded FP div by zero */
+#define SPEFSCR_FUNF	0x00000200	/* Embedded FP underflow */
+#define SPEFSCR_FOVF	0x00000100	/* Embedded FP overflow */
+#define SPEFSCR_FINXE	0x00000040	/* Embedded FP inexact enable */
+#define SPEFSCR_FINVE	0x00000020	/* Embedded FP invalid op. enable */
+#define SPEFSCR_FDBZE	0x00000010	/* Embedded FP div by zero enable */
+#define SPEFSCR_FUNFE	0x00000008	/* Embedded FP underflow enable */
+#define SPEFSCR_FOVFE	0x00000004	/* Embedded FP overflow enable */
+#define SPEFSCR_FRMC 	0x00000003	/* Embedded FP rounding mode control */
+
+/* Special Purpose Registers (SPRNs)*/
+
+#ifdef CONFIG_40x
+#define SPRN_PID	0x3B1	/* Process ID */
+#else
+#define SPRN_PID	0x030	/* Process ID */
+#ifdef CONFIG_BOOKE
+#define SPRN_PID0	SPRN_PID/* Process ID Register 0 */
+#endif
+#endif
+
+#define SPRN_CTR	0x009	/* Count Register */
+#define SPRN_DSCR	0x11
+#define SPRN_CFAR	0x1c	/* Come From Address Register */
+#define SPRN_AMR	0x1d	/* Authority Mask Register */
+#define SPRN_UAMOR	0x9d	/* User Authority Mask Override Register */
+#define SPRN_AMOR	0x15d	/* Authority Mask Override Register */
+#define SPRN_ACOP	0x1F	/* Available Coprocessor Register */
+#define SPRN_TFIAR	0x81	/* Transaction Failure Inst Addr   */
+#define SPRN_TEXASR	0x82	/* Transaction EXception & Summary */
+#define SPRN_TEXASRU	0x83	/* ''	   ''	   ''	 Upper 32  */
+
+#define TEXASR_FC_LG	(63 - 7)	/* Failure Code */
+#define TEXASR_AB_LG	(63 - 31)	/* Abort */
+#define TEXASR_SU_LG	(63 - 32)	/* Suspend */
+#define TEXASR_HV_LG	(63 - 34)	/* Hypervisor state*/
+#define TEXASR_PR_LG	(63 - 35)	/* Privilege level */
+#define TEXASR_FS_LG	(63 - 36)	/* failure summary */
+#define TEXASR_EX_LG	(63 - 37)	/* TFIAR exact bit */
+#define TEXASR_ROT_LG	(63 - 38)	/* ROT bit */
+
+#define   TEXASR_ABORT	__MASK(TEXASR_AB_LG) /* terminated by tabort or treclaim */
+#define   TEXASR_SUSP	__MASK(TEXASR_SU_LG) /* tx failed in suspended state */
+#define   TEXASR_HV	__MASK(TEXASR_HV_LG) /* MSR[HV] when failure occurred */
+#define   TEXASR_PR	__MASK(TEXASR_PR_LG) /* MSR[PR] when failure occurred */
+#define   TEXASR_FS	__MASK(TEXASR_FS_LG) /* TEXASR Failure Summary */
+#define   TEXASR_EXACT	__MASK(TEXASR_EX_LG) /* TFIAR value is exact */
+#define   TEXASR_ROT	__MASK(TEXASR_ROT_LG)
+#define   TEXASR_FC	(ASM_CONST(0xFF) << TEXASR_FC_LG)
+
+#define SPRN_TFHAR	0x80	/* Transaction Failure Handler Addr */
+
+#define SPRN_TIDR	144	/* Thread ID register */
+#define SPRN_CTRLF	0x088
+#define SPRN_CTRLT	0x098
+#define   CTRL_CT	0xc0000000	/* current thread */
+#define   CTRL_CT0	0x80000000	/* thread 0 */
+#define   CTRL_CT1	0x40000000	/* thread 1 */
+#define   CTRL_TE	0x00c00000	/* thread enable */
+#define   CTRL_RUNLATCH	0x1
+#define SPRN_DAWR	0xB4
+#define SPRN_RPR	0xBA	/* Relative Priority Register */
+#define SPRN_CIABR	0xBB
+#define   CIABR_PRIV		0x3
+#define   CIABR_PRIV_USER	1
+#define   CIABR_PRIV_SUPER	2
+#define   CIABR_PRIV_HYPER	3
+#define SPRN_DAWRX	0xBC
+#define   DAWRX_USER	__MASK(0)
+#define   DAWRX_KERNEL	__MASK(1)
+#define   DAWRX_HYP	__MASK(2)
+#define   DAWRX_WTI	__MASK(3)
+#define   DAWRX_WT	__MASK(4)
+#define   DAWRX_DR	__MASK(5)
+#define   DAWRX_DW	__MASK(6)
+#define SPRN_DABR	0x3F5	/* Data Address Breakpoint Register */
+#define SPRN_DABR2	0x13D	/* e300 */
+#define SPRN_DABRX	0x3F7	/* Data Address Breakpoint Register Extension */
+#define   DABRX_USER	__MASK(0)
+#define   DABRX_KERNEL	__MASK(1)
+#define   DABRX_HYP	__MASK(2)
+#define   DABRX_BTI	__MASK(3)
+#define   DABRX_ALL     (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
+#define SPRN_DAR	0x013	/* Data Address Register */
+#define SPRN_DBCR	0x136	/* e300 Data Breakpoint Control Reg */
+#define SPRN_DSISR	0x012	/* Data Storage Interrupt Status Register */
+#define   DSISR_BAD_DIRECT_ST	0x80000000 /* Obsolete: Direct store error */
+#define   DSISR_NOHPTE		0x40000000 /* no translation found */
+#define   DSISR_ATTR_CONFLICT	0x20000000 /* P9: Process vs. Partition attr */
+#define   DSISR_NOEXEC_OR_G	0x10000000 /* Alias of SRR1 bit, see below */
+#define   DSISR_PROTFAULT	0x08000000 /* protection fault */
+#define   DSISR_BADACCESS	0x04000000 /* bad access to CI or G */
+#define   DSISR_ISSTORE		0x02000000 /* access was a store */
+#define   DSISR_DABRMATCH	0x00400000 /* hit data breakpoint */
+#define   DSISR_NOSEGMENT	0x00200000 /* STAB miss (unsupported) */
+#define   DSISR_KEYFAULT	0x00200000 /* Storage Key fault */
+#define   DSISR_BAD_EXT_CTRL	0x00100000 /* Obsolete: External ctrl error */
+#define   DSISR_UNSUPP_MMU	0x00080000 /* P9: Unsupported MMU config */
+#define   DSISR_SET_RC		0x00040000 /* P9: Failed setting of R/C bits */
+#define   DSISR_PRTABLE_FAULT   0x00020000 /* P9: Fault on process table */
+#define   DSISR_ICSWX_NO_CT     0x00004000 /* P7: icswx unavailable cp type */
+#define   DSISR_BAD_COPYPASTE   0x00000008 /* P9: Copy/Paste on wrong memtype */
+#define   DSISR_BAD_AMO		0x00000004 /* P9: Incorrect AMO opcode */
+#define   DSISR_BAD_CI_LDST	0x00000002 /* P8: Bad HV CI load/store */
+
+/*
+ * DSISR_NOEXEC_OR_G doesn't actually exist. This bit is always
+ * 0 on DSIs. However, on ISIs, the corresponding bit in SRR1
+ * indicates an attempt at executing from a no-execute PTE
+ * or segment or from a guarded page.
+ *
+ * We add a definition here for completeness as we alias
+ * DSISR and SRR1 in do_page_fault.
+ */
+
+/*
+ * DSISR bits that are treated as a fault. Any bit set
+ * here will skip hash_page, and cause do_page_fault to
+ * trigger a SIGBUS or SIGSEGV:
+ */
+#define   DSISR_BAD_FAULT_32S	(DSISR_BAD_DIRECT_ST	| \
+				 DSISR_BADACCESS	| \
+				 DSISR_BAD_EXT_CTRL)
+#define	  DSISR_BAD_FAULT_64S	(DSISR_BAD_FAULT_32S	| \
+				 DSISR_ATTR_CONFLICT	| \
+				 DSISR_UNSUPP_MMU	| \
+				 DSISR_PRTABLE_FAULT	| \
+				 DSISR_ICSWX_NO_CT	| \
+				 DSISR_BAD_COPYPASTE	| \
+				 DSISR_BAD_AMO		| \
+				 DSISR_BAD_CI_LDST)
+/*
+ * These bits are equivalent in SRR1 and DSISR for 0x400
+ * instruction access interrupts on Book3S
+ */
+#define   DSISR_SRR1_MATCH_32S	(DSISR_NOHPTE		| \
+				 DSISR_NOEXEC_OR_G	| \
+				 DSISR_PROTFAULT)
+#define   DSISR_SRR1_MATCH_64S	(DSISR_SRR1_MATCH_32S	| \
+				 DSISR_KEYFAULT		| \
+				 DSISR_UNSUPP_MMU	| \
+				 DSISR_SET_RC		| \
+				 DSISR_PRTABLE_FAULT)
+
+#define SPRN_TBRL	0x10C	/* Time Base Read Lower Register (user, R/O) */
+#define SPRN_TBRU	0x10D	/* Time Base Read Upper Register (user, R/O) */
+#define SPRN_CIR	0x11B	/* Chip Information Register (hyper, R/0) */
+#define SPRN_TBWL	0x11C	/* Time Base Lower Register (super, R/W) */
+#define SPRN_TBWU	0x11D	/* Time Base Upper Register (super, R/W) */
+#define SPRN_TBU40	0x11E	/* Timebase upper 40 bits (hyper, R/W) */
+#define SPRN_SPURR	0x134	/* Scaled PURR */
+#define SPRN_HSPRG0	0x130	/* Hypervisor Scratch 0 */
+#define SPRN_HSPRG1	0x131	/* Hypervisor Scratch 1 */
+#define SPRN_HDSISR     0x132
+#define SPRN_HDAR       0x133
+#define SPRN_HDEC	0x136	/* Hypervisor Decrementer */
+#define SPRN_HIOR	0x137	/* 970 Hypervisor interrupt offset */
+#define SPRN_RMOR	0x138	/* Real mode offset register */
+#define SPRN_HRMOR	0x139	/* Real mode offset register */
+#define SPRN_HSRR0	0x13A	/* Hypervisor Save/Restore 0 */
+#define SPRN_HSRR1	0x13B	/* Hypervisor Save/Restore 1 */
+#define SPRN_ASDR	0x330	/* Access segment descriptor register */
+#define SPRN_IC		0x350	/* Virtual Instruction Count */
+#define SPRN_VTB	0x351	/* Virtual Time Base */
+#define SPRN_LDBAR	0x352	/* LD Base Address Register */
+#define SPRN_PMICR	0x354   /* Power Management Idle Control Reg */
+#define SPRN_PMSR	0x355   /* Power Management Status Reg */
+#define SPRN_PMMAR	0x356	/* Power Management Memory Activity Register */
+#define SPRN_PSSCR	0x357	/* Processor Stop Status and Control Register (ISA 3.0) */
+#define SPRN_PSSCR_PR	0x337	/* PSSCR ISA 3.0, privileged mode access */
+#define SPRN_PMCR	0x374	/* Power Management Control Register */
+#define SPRN_RWMR	0x375	/* Region-Weighting Mode Register */
+
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_SCV_LG	12	/* Enable System Call Vectored */
+#define FSCR_MSGP_LG	10	/* Enable MSGP */
+#define FSCR_TAR_LG	8	/* Enable Target Address Register */
+#define FSCR_EBB_LG	7	/* Enable Event Based Branching */
+#define FSCR_TM_LG	5	/* Enable Transactional Memory */
+#define FSCR_BHRB_LG	4	/* Enable Branch History Rolling Buffer*/
+#define FSCR_PM_LG	3	/* Enable prob/priv access to PMU SPRs */
+#define FSCR_DSCR_LG	2	/* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG	1	/* Enable VMX/VSX  */
+#define FSCR_FP_LG	0	/* Enable Floating Point */
+#define SPRN_FSCR	0x099	/* Facility Status & Control Register */
+#define   FSCR_SCV	__MASK(FSCR_SCV_LG)
+#define   FSCR_TAR	__MASK(FSCR_TAR_LG)
+#define   FSCR_EBB	__MASK(FSCR_EBB_LG)
+#define   FSCR_DSCR	__MASK(FSCR_DSCR_LG)
+#define SPRN_HFSCR	0xbe	/* HV=1 Facility Status & Control Register */
+#define   HFSCR_MSGP	__MASK(FSCR_MSGP_LG)
+#define   HFSCR_TAR	__MASK(FSCR_TAR_LG)
+#define   HFSCR_EBB	__MASK(FSCR_EBB_LG)
+#define   HFSCR_TM	__MASK(FSCR_TM_LG)
+#define   HFSCR_PM	__MASK(FSCR_PM_LG)
+#define   HFSCR_BHRB	__MASK(FSCR_BHRB_LG)
+#define   HFSCR_DSCR	__MASK(FSCR_DSCR_LG)
+#define   HFSCR_VECVSX	__MASK(FSCR_VECVSX_LG)
+#define   HFSCR_FP	__MASK(FSCR_FP_LG)
+#define SPRN_TAR	0x32f	/* Target Address Register */
+#define SPRN_LPCR	0x13E	/* LPAR Control Register */
+#define   LPCR_VPM0		ASM_CONST(0x8000000000000000)
+#define   LPCR_VPM1		ASM_CONST(0x4000000000000000)
+#define   LPCR_ISL		ASM_CONST(0x2000000000000000)
+#define   LPCR_VC_SH		61
+#define   LPCR_DPFD_SH		52
+#define   LPCR_DPFD		(ASM_CONST(7) << LPCR_DPFD_SH)
+#define   LPCR_VRMASD_SH	47
+#define   LPCR_VRMASD		(ASM_CONST(0x1f) << LPCR_VRMASD_SH)
+#define   LPCR_VRMA_L		ASM_CONST(0x0008000000000000)
+#define   LPCR_VRMA_LP0		ASM_CONST(0x0001000000000000)
+#define   LPCR_VRMA_LP1		ASM_CONST(0x0000800000000000)
+#define   LPCR_RMLS		0x1C000000	/* Implementation dependent RMO limit sel */
+#define   LPCR_RMLS_SH		26
+#define   LPCR_ILE		ASM_CONST(0x0000000002000000)   /* !HV irqs set MSR:LE */
+#define   LPCR_AIL		ASM_CONST(0x0000000001800000)	/* Alternate interrupt location */
+#define   LPCR_AIL_0		ASM_CONST(0x0000000000000000)	/* MMU off exception offset 0x0 */
+#define   LPCR_AIL_3		ASM_CONST(0x0000000001800000)   /* MMU on exception offset 0xc00...4xxx */
+#define   LPCR_ONL		ASM_CONST(0x0000000000040000)	/* online - PURR/SPURR count */
+#define   LPCR_LD		ASM_CONST(0x0000000000020000)	/* large decremeter */
+#define   LPCR_PECE		ASM_CONST(0x000000000001f000)	/* powersave exit cause enable */
+#define     LPCR_PECEDP	ASM_CONST(0x0000000000010000)	/* directed priv dbells cause exit */
+#define     LPCR_PECEDH	ASM_CONST(0x0000000000008000)	/* directed hyp dbells cause exit */
+#define     LPCR_PECE0		ASM_CONST(0x0000000000004000)	/* ext. exceptions can cause exit */
+#define     LPCR_PECE1		ASM_CONST(0x0000000000002000)	/* decrementer can cause exit */
+#define     LPCR_PECE2		ASM_CONST(0x0000000000001000)	/* machine check etc can cause exit */
+#define     LPCR_PECE_HVEE	ASM_CONST(0x0000400000000000)	/* P9 Wakeup on HV interrupts */
+#define   LPCR_MER		ASM_CONST(0x0000000000000800)	/* Mediated External Exception */
+#define   LPCR_MER_SH		11
+#define	  LPCR_GTSE		ASM_CONST(0x0000000000000400)  	/* Guest Translation Shootdown Enable */
+#define   LPCR_TC		ASM_CONST(0x0000000000000200)	/* Translation control */
+#define   LPCR_HEIC		ASM_CONST(0x0000000000000010)   /* Hypervisor External Interrupt Control */
+#define   LPCR_LPES		0x0000000c
+#define   LPCR_LPES0		ASM_CONST(0x0000000000000008)      /* LPAR Env selector 0 */
+#define   LPCR_LPES1		ASM_CONST(0x0000000000000004)      /* LPAR Env selector 1 */
+#define   LPCR_LPES_SH		2
+#define   LPCR_RMI		ASM_CONST(0x0000000000000002)      /* real mode is cache inhibit */
+#define   LPCR_HVICE		ASM_CONST(0x0000000000000002)      /* P9: HV interrupt enable */
+#define   LPCR_HDICE		ASM_CONST(0x0000000000000001)      /* Hyp Decr enable (HV,PR,EE) */
+#define   LPCR_UPRT		ASM_CONST(0x0000000000400000)      /* Use Process Table (ISA 3) */
+#define   LPCR_HR		ASM_CONST(0x0000000000100000)
+#ifndef SPRN_LPID
+#define SPRN_LPID	0x13F	/* Logical Partition Identifier */
+#endif
+#define   LPID_RSVD	0x3ff		/* Reserved LPID for partn switching */
+#define	SPRN_HMER	0x150	/* Hypervisor maintenance exception reg */
+#define   HMER_DEBUG_TRIG	(1ul << (63 - 17)) /* Debug trigger */
+#define	SPRN_HMEER	0x151	/* Hyp maintenance exception enable reg */
+#define SPRN_PCR	0x152	/* Processor compatibility register */
+#define   PCR_VEC_DIS	(1ul << (63-0))	/* Vec. disable (bit NA since POWER8) */
+#define   PCR_VSX_DIS	(1ul << (63-1))	/* VSX disable (bit NA since POWER8) */
+#define   PCR_TM_DIS	(1ul << (63-2))	/* Trans. memory disable (POWER8) */
+/*
+ * These bits are used in the function kvmppc_set_arch_compat() to specify and
+ * determine both the compatibility level which we want to emulate and the
+ * compatibility level which the host is capable of emulating.
+ */
+#define   PCR_ARCH_207	0x8		/* Architecture 2.07 */
+#define   PCR_ARCH_206	0x4		/* Architecture 2.06 */
+#define   PCR_ARCH_205	0x2		/* Architecture 2.05 */
+#define	SPRN_HEIR	0x153	/* Hypervisor Emulated Instruction Register */
+#define SPRN_TLBINDEXR	0x154	/* P7 TLB control register */
+#define SPRN_TLBVPNR	0x155	/* P7 TLB control register */
+#define SPRN_TLBRPNR	0x156	/* P7 TLB control register */
+#define SPRN_TLBLPIDR	0x157	/* P7 TLB control register */
+#define SPRN_DBAT0L	0x219	/* Data BAT 0 Lower Register */
+#define SPRN_DBAT0U	0x218	/* Data BAT 0 Upper Register */
+#define SPRN_DBAT1L	0x21B	/* Data BAT 1 Lower Register */
+#define SPRN_DBAT1U	0x21A	/* Data BAT 1 Upper Register */
+#define SPRN_DBAT2L	0x21D	/* Data BAT 2 Lower Register */
+#define SPRN_DBAT2U	0x21C	/* Data BAT 2 Upper Register */
+#define SPRN_DBAT3L	0x21F	/* Data BAT 3 Lower Register */
+#define SPRN_DBAT3U	0x21E	/* Data BAT 3 Upper Register */
+#define SPRN_DBAT4L	0x239	/* Data BAT 4 Lower Register */
+#define SPRN_DBAT4U	0x238	/* Data BAT 4 Upper Register */
+#define SPRN_DBAT5L	0x23B	/* Data BAT 5 Lower Register */
+#define SPRN_DBAT5U	0x23A	/* Data BAT 5 Upper Register */
+#define SPRN_DBAT6L	0x23D	/* Data BAT 6 Lower Register */
+#define SPRN_DBAT6U	0x23C	/* Data BAT 6 Upper Register */
+#define SPRN_DBAT7L	0x23F	/* Data BAT 7 Lower Register */
+#define SPRN_DBAT7U	0x23E	/* Data BAT 7 Upper Register */
+#define SPRN_PPR	0x380	/* SMT Thread status Register */
+#define SPRN_TSCR	0x399	/* Thread Switch Control Register */
+
+#define SPRN_DEC	0x016		/* Decrement Register */
+#define SPRN_DER	0x095		/* Debug Enable Register */
+#define DER_RSTE	0x40000000	/* Reset Interrupt */
+#define DER_CHSTPE	0x20000000	/* Check Stop */
+#define DER_MCIE	0x10000000	/* Machine Check Interrupt */
+#define DER_EXTIE	0x02000000	/* External Interrupt */
+#define DER_ALIE	0x01000000	/* Alignment Interrupt */
+#define DER_PRIE	0x00800000	/* Program Interrupt */
+#define DER_FPUVIE	0x00400000	/* FP Unavailable Interrupt */
+#define DER_DECIE	0x00200000	/* Decrementer Interrupt */
+#define DER_SYSIE	0x00040000	/* System Call Interrupt */
+#define DER_TRE		0x00020000	/* Trace Interrupt */
+#define DER_SEIE	0x00004000	/* FP SW Emulation Interrupt */
+#define DER_ITLBMSE	0x00002000	/* Imp. Spec. Instruction TLB Miss */
+#define DER_ITLBERE	0x00001000	/* Imp. Spec. Instruction TLB Error */
+#define DER_DTLBMSE	0x00000800	/* Imp. Spec. Data TLB Miss */
+#define DER_DTLBERE	0x00000400	/* Imp. Spec. Data TLB Error */
+#define DER_LBRKE	0x00000008	/* Load/Store Breakpoint Interrupt */
+#define DER_IBRKE	0x00000004	/* Instruction Breakpoint Interrupt */
+#define DER_EBRKE	0x00000002	/* External Breakpoint Interrupt */
+#define DER_DPIE	0x00000001	/* Dev. Port Nonmaskable Request */
+#define SPRN_DMISS	0x3D0		/* Data TLB Miss Register */
+#define SPRN_DHDES	0x0B1		/* Directed Hyp. Doorbell Exc. State */
+#define SPRN_DPDES	0x0B0		/* Directed Priv. Doorbell Exc. State */
+#define SPRN_EAR	0x11A		/* External Address Register */
+#define SPRN_HASH1	0x3D2		/* Primary Hash Address Register */
+#define SPRN_HASH2	0x3D3		/* Secondary Hash Address Register */
+#define SPRN_HID0	0x3F0		/* Hardware Implementation Register 0 */
+#define HID0_HDICE_SH	(63 - 23)	/* 970 HDEC interrupt enable */
+#define HID0_EMCP	(1<<31)		/* Enable Machine Check pin */
+#define HID0_EBA	(1<<29)		/* Enable Bus Address Parity */
+#define HID0_EBD	(1<<28)		/* Enable Bus Data Parity */
+#define HID0_SBCLK	(1<<27)
+#define HID0_EICE	(1<<26)
+#define HID0_TBEN	(1<<26)		/* Timebase enable - 745x */
+#define HID0_ECLK	(1<<25)
+#define HID0_PAR	(1<<24)
+#define HID0_STEN	(1<<24)		/* Software table search enable - 745x */
+#define HID0_HIGH_BAT	(1<<23)		/* Enable high BATs - 7455 */
+#define HID0_DOZE	(1<<23)
+#define HID0_NAP	(1<<22)
+#define HID0_SLEEP	(1<<21)
+#define HID0_DPM	(1<<20)
+#define HID0_BHTCLR	(1<<18)		/* Clear branch history table - 7450 */
+#define HID0_XAEN	(1<<17)		/* Extended addressing enable - 7450 */
+#define HID0_NHR	(1<<16)		/* Not hard reset (software bit-7450)*/
+#define HID0_ICE	(1<<15)		/* Instruction Cache Enable */
+#define HID0_DCE	(1<<14)		/* Data Cache Enable */
+#define HID0_ILOCK	(1<<13)		/* Instruction Cache Lock */
+#define HID0_DLOCK	(1<<12)		/* Data Cache Lock */
+#define HID0_ICFI	(1<<11)		/* Instr. Cache Flash Invalidate */
+#define HID0_DCI	(1<<10)		/* Data Cache Invalidate */
+#define HID0_SPD	(1<<9)		/* Speculative disable */
+#define HID0_DAPUEN	(1<<8)		/* Debug APU enable */
+#define HID0_SGE	(1<<7)		/* Store Gathering Enable */
+#define HID0_SIED	(1<<7)		/* Serial Instr. Execution [Disable] */
+#define HID0_DCFA	(1<<6)		/* Data Cache Flush Assist */
+#define HID0_LRSTK	(1<<4)		/* Link register stack - 745x */
+#define HID0_BTIC	(1<<5)		/* Branch Target Instr Cache Enable */
+#define HID0_ABE	(1<<3)		/* Address Broadcast Enable */
+#define HID0_FOLD	(1<<3)		/* Branch Folding enable - 745x */
+#define HID0_BHTE	(1<<2)		/* Branch History Table Enable */
+#define HID0_BTCD	(1<<1)		/* Branch target cache disable */
+#define HID0_NOPDST	(1<<1)		/* No-op dst, dstt, etc. instr. */
+#define HID0_NOPTI	(1<<0)		/* No-op dcbt and dcbst instr. */
+/* POWER8 HID0 bits */
+#define HID0_POWER8_4LPARMODE	__MASK(61)
+#define HID0_POWER8_2LPARMODE	__MASK(57)
+#define HID0_POWER8_1TO2LPAR	__MASK(52)
+#define HID0_POWER8_1TO4LPAR	__MASK(51)
+#define HID0_POWER8_DYNLPARDIS	__MASK(48)
+
+/* POWER9 HID0 bits */
+#define HID0_POWER9_RADIX	__MASK(63 - 8)
+
+#define SPRN_HID1	0x3F1		/* Hardware Implementation Register 1 */
+#ifdef CONFIG_6xx
+#define HID1_EMCP	(1<<31)		/* 7450 Machine Check Pin Enable */
+#define HID1_DFS	(1<<22)		/* 7447A Dynamic Frequency Scaling */
+#define HID1_PC0	(1<<16)		/* 7450 PLL_CFG[0] */
+#define HID1_PC1	(1<<15)		/* 7450 PLL_CFG[1] */
+#define HID1_PC2	(1<<14)		/* 7450 PLL_CFG[2] */
+#define HID1_PC3	(1<<13)		/* 7450 PLL_CFG[3] */
+#define HID1_SYNCBE	(1<<11)		/* 7450 ABE for sync, eieio */
+#define HID1_ABE	(1<<10)		/* 7450 Address Broadcast Enable */
+#define HID1_PS		(1<<16)		/* 750FX PLL selection */
+#endif
+#define SPRN_HID2	0x3F8		/* Hardware Implementation Register 2 */
+#define SPRN_HID2_GEKKO	0x398		/* Gekko HID2 Register */
+#define SPRN_IABR	0x3F2	/* Instruction Address Breakpoint Register */
+#define SPRN_IABR2	0x3FA		/* 83xx */
+#define SPRN_IBCR	0x135		/* 83xx Insn Breakpoint Control Reg */
+#define SPRN_IAMR	0x03D		/* Instr. Authority Mask Reg */
+#define SPRN_HID4	0x3F4		/* 970 HID4 */
+#define  HID4_LPES0	 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
+#define	 HID4_RMLS2_SH	 (63 - 2)	/* Real mode limit bottom 2 bits */
+#define	 HID4_LPID5_SH	 (63 - 6)	/* partition ID bottom 4 bits */
+#define	 HID4_RMOR_SH	 (63 - 22)	/* real mode offset (16 bits) */
+#define  HID4_RMOR	 (0xFFFFul << HID4_RMOR_SH)
+#define  HID4_LPES1	 (1 << (63-57))	/* LPAR env. sel. bit 1 */
+#define  HID4_RMLS0_SH	 (63 - 58)	/* Real mode limit top bit */
+#define	 HID4_LPID1_SH	 0		/* partition ID top 2 bits */
+#define SPRN_HID4_GEKKO	0x3F3		/* Gekko HID4 */
+#define SPRN_HID5	0x3F6		/* 970 HID5 */
+#define SPRN_HID6	0x3F9	/* BE HID 6 */
+#define   HID6_LB	(0x0F<<12) /* Concurrent Large Page Modes */
+#define   HID6_DLP	(1<<20)	/* Disable all large page modes (4K only) */
+#define SPRN_TSC_CELL	0x399	/* Thread switch control on Cell */
+#define   TSC_CELL_DEC_ENABLE_0	0x400000 /* Decrementer Interrupt */
+#define   TSC_CELL_DEC_ENABLE_1	0x200000 /* Decrementer Interrupt */
+#define   TSC_CELL_EE_ENABLE	0x100000 /* External Interrupt */
+#define   TSC_CELL_EE_BOOST	0x080000 /* External Interrupt Boost */
+#define SPRN_TSC 	0x3FD	/* Thread switch control on others */
+#define SPRN_TST 	0x3FC	/* Thread switch timeout on others */
+#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
+#define SPRN_IAC1	0x3F4		/* Instruction Address Compare 1 */
+#define SPRN_IAC2	0x3F5		/* Instruction Address Compare 2 */
+#endif
+#define SPRN_IBAT0L	0x211		/* Instruction BAT 0 Lower Register */
+#define SPRN_IBAT0U	0x210		/* Instruction BAT 0 Upper Register */
+#define SPRN_IBAT1L	0x213		/* Instruction BAT 1 Lower Register */
+#define SPRN_IBAT1U	0x212		/* Instruction BAT 1 Upper Register */
+#define SPRN_IBAT2L	0x215		/* Instruction BAT 2 Lower Register */
+#define SPRN_IBAT2U	0x214		/* Instruction BAT 2 Upper Register */
+#define SPRN_IBAT3L	0x217		/* Instruction BAT 3 Lower Register */
+#define SPRN_IBAT3U	0x216		/* Instruction BAT 3 Upper Register */
+#define SPRN_IBAT4L	0x231		/* Instruction BAT 4 Lower Register */
+#define SPRN_IBAT4U	0x230		/* Instruction BAT 4 Upper Register */
+#define SPRN_IBAT5L	0x233		/* Instruction BAT 5 Lower Register */
+#define SPRN_IBAT5U	0x232		/* Instruction BAT 5 Upper Register */
+#define SPRN_IBAT6L	0x235		/* Instruction BAT 6 Lower Register */
+#define SPRN_IBAT6U	0x234		/* Instruction BAT 6 Upper Register */
+#define SPRN_IBAT7L	0x237		/* Instruction BAT 7 Lower Register */
+#define SPRN_IBAT7U	0x236		/* Instruction BAT 7 Upper Register */
+#define SPRN_ICMP	0x3D5		/* Instruction TLB Compare Register */
+#define SPRN_ICTC	0x3FB	/* Instruction Cache Throttling Control Reg */
+#ifndef SPRN_ICTRL
+#define SPRN_ICTRL	0x3F3	/* 1011 7450 icache and interrupt ctrl */
+#endif
+#define ICTRL_EICE	0x08000000	/* enable icache parity errs */
+#define ICTRL_EDC	0x04000000	/* enable dcache parity errs */
+#define ICTRL_EICP	0x00000100	/* enable icache par. check */
+#define SPRN_IMISS	0x3D4		/* Instruction TLB Miss Register */
+#define SPRN_IMMR	0x27E		/* Internal Memory Map Register */
+#define SPRN_L2CR	0x3F9		/* Level 2 Cache Control Register */
+#define SPRN_L2CR2	0x3f8
+#define L2CR_L2E		0x80000000	/* L2 enable */
+#define L2CR_L2PE		0x40000000	/* L2 parity enable */
+#define L2CR_L2SIZ_MASK		0x30000000	/* L2 size mask */
+#define L2CR_L2SIZ_256KB	0x10000000	/* L2 size 256KB */
+#define L2CR_L2SIZ_512KB	0x20000000	/* L2 size 512KB */
+#define L2CR_L2SIZ_1MB		0x30000000	/* L2 size 1MB */
+#define L2CR_L2CLK_MASK		0x0e000000	/* L2 clock mask */
+#define L2CR_L2CLK_DISABLED	0x00000000	/* L2 clock disabled */
+#define L2CR_L2CLK_DIV1		0x02000000	/* L2 clock / 1 */
+#define L2CR_L2CLK_DIV1_5	0x04000000	/* L2 clock / 1.5 */
+#define L2CR_L2CLK_DIV2		0x08000000	/* L2 clock / 2 */
+#define L2CR_L2CLK_DIV2_5	0x0a000000	/* L2 clock / 2.5 */
+#define L2CR_L2CLK_DIV3		0x0c000000	/* L2 clock / 3 */
+#define L2CR_L2RAM_MASK		0x01800000	/* L2 RAM type mask */
+#define L2CR_L2RAM_FLOW		0x00000000	/* L2 RAM flow through */
+#define L2CR_L2RAM_PIPE		0x01000000	/* L2 RAM pipelined */
+#define L2CR_L2RAM_PIPE_LW	0x01800000	/* L2 RAM pipelined latewr */
+#define L2CR_L2DO		0x00400000	/* L2 data only */
+#define L2CR_L2I		0x00200000	/* L2 global invalidate */
+#define L2CR_L2CTL		0x00100000	/* L2 RAM control */
+#define L2CR_L2WT		0x00080000	/* L2 write-through */
+#define L2CR_L2TS		0x00040000	/* L2 test support */
+#define L2CR_L2OH_MASK		0x00030000	/* L2 output hold mask */
+#define L2CR_L2OH_0_5		0x00000000	/* L2 output hold 0.5 ns */
+#define L2CR_L2OH_1_0		0x00010000	/* L2 output hold 1.0 ns */
+#define L2CR_L2SL		0x00008000	/* L2 DLL slow */
+#define L2CR_L2DF		0x00004000	/* L2 differential clock */
+#define L2CR_L2BYP		0x00002000	/* L2 DLL bypass */
+#define L2CR_L2IP		0x00000001	/* L2 GI in progress */
+#define L2CR_L2IO_745x		0x00100000	/* L2 instr. only (745x) */
+#define L2CR_L2DO_745x		0x00010000	/* L2 data only (745x) */
+#define L2CR_L2REP_745x		0x00001000	/* L2 repl. algorithm (745x) */
+#define L2CR_L2HWF_745x		0x00000800	/* L2 hardware flush (745x) */
+#define SPRN_L3CR		0x3FA	/* Level 3 Cache Control Register */
+#define L3CR_L3E		0x80000000	/* L3 enable */
+#define L3CR_L3PE		0x40000000	/* L3 data parity enable */
+#define L3CR_L3APE		0x20000000	/* L3 addr parity enable */
+#define L3CR_L3SIZ		0x10000000	/* L3 size */
+#define L3CR_L3CLKEN		0x08000000	/* L3 clock enable */
+#define L3CR_L3RES		0x04000000	/* L3 special reserved bit */
+#define L3CR_L3CLKDIV		0x03800000	/* L3 clock divisor */
+#define L3CR_L3IO		0x00400000	/* L3 instruction only */
+#define L3CR_L3SPO		0x00040000	/* L3 sample point override */
+#define L3CR_L3CKSP		0x00030000	/* L3 clock sample point */
+#define L3CR_L3PSP		0x0000e000	/* L3 P-clock sample point */
+#define L3CR_L3REP		0x00001000	/* L3 replacement algorithm */
+#define L3CR_L3HWF		0x00000800	/* L3 hardware flush */
+#define L3CR_L3I		0x00000400	/* L3 global invalidate */
+#define L3CR_L3RT		0x00000300	/* L3 SRAM type */
+#define L3CR_L3NIRCA		0x00000080	/* L3 non-integer ratio clock adj. */
+#define L3CR_L3DO		0x00000040	/* L3 data only mode */
+#define L3CR_PMEN		0x00000004	/* L3 private memory enable */
+#define L3CR_PMSIZ		0x00000001	/* L3 private memory size */
+
+#define SPRN_MSSCR0	0x3f6	/* Memory Subsystem Control Register 0 */
+#define SPRN_MSSSR0	0x3f7	/* Memory Subsystem Status Register 1 */
+#define SPRN_LDSTCR	0x3f8	/* Load/Store control register */
+#define SPRN_LDSTDB	0x3f4	/* */
+#define SPRN_LR		0x008	/* Link Register */
+#ifndef SPRN_PIR
+#define SPRN_PIR	0x3FF	/* Processor Identification Register */
+#endif
+#define SPRN_TIR	0x1BE	/* Thread Identification Register */
+#define SPRN_PTCR	0x1D0	/* Partition table control Register */
+#define SPRN_PSPB	0x09F	/* Problem State Priority Boost reg */
+#define SPRN_PTEHI	0x3D5	/* 981 7450 PTE HI word (S/W TLB load) */
+#define SPRN_PTELO	0x3D6	/* 982 7450 PTE LO word (S/W TLB load) */
+#define SPRN_PURR	0x135	/* Processor Utilization of Resources Reg */
+#define SPRN_PVR	0x11F	/* Processor Version Register */
+#define SPRN_RPA	0x3D6	/* Required Physical Address Register */
+#define SPRN_SDA	0x3BF	/* Sampled Data Address Register */
+#define SPRN_SDR1	0x019	/* MMU Hash Base Register */
+#define SPRN_ASR	0x118   /* Address Space Register */
+#define SPRN_SIA	0x3BB	/* Sampled Instruction Address Register */
+#define SPRN_SPRG0	0x110	/* Special Purpose Register General 0 */
+#define SPRN_SPRG1	0x111	/* Special Purpose Register General 1 */
+#define SPRN_SPRG2	0x112	/* Special Purpose Register General 2 */
+#define SPRN_SPRG3	0x113	/* Special Purpose Register General 3 */
+#define SPRN_USPRG3	0x103	/* SPRG3 userspace read */
+#define SPRN_SPRG4	0x114	/* Special Purpose Register General 4 */
+#define SPRN_USPRG4	0x104	/* SPRG4 userspace read */
+#define SPRN_SPRG5	0x115	/* Special Purpose Register General 5 */
+#define SPRN_USPRG5	0x105	/* SPRG5 userspace read */
+#define SPRN_SPRG6	0x116	/* Special Purpose Register General 6 */
+#define SPRN_USPRG6	0x106	/* SPRG6 userspace read */
+#define SPRN_SPRG7	0x117	/* Special Purpose Register General 7 */
+#define SPRN_USPRG7	0x107	/* SPRG7 userspace read */
+#define SPRN_SRR0	0x01A	/* Save/Restore Register 0 */
+#define SPRN_SRR1	0x01B	/* Save/Restore Register 1 */
+#define   SRR1_ISI_NOPT		0x40000000 /* ISI: Not found in hash */
+#define   SRR1_ISI_N_OR_G	0x10000000 /* ISI: Access is no-exec or G */
+#define   SRR1_ISI_PROT		0x08000000 /* ISI: Other protection fault */
+#define   SRR1_WAKEMASK		0x00380000 /* reason for wakeup */
+#define   SRR1_WAKEMASK_P8	0x003c0000 /* reason for wakeup on POWER8 and 9 */
+#define   SRR1_WAKEMCE_RESVD	0x003c0000 /* Unused/reserved value used by MCE wakeup to indicate cause to idle wakeup handler */
+#define   SRR1_WAKESYSERR	0x00300000 /* System error */
+#define   SRR1_WAKEEE		0x00200000 /* External interrupt */
+#define   SRR1_WAKEHVI		0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
+#define   SRR1_WAKEMT		0x00280000 /* mtctrl */
+#define	  SRR1_WAKEHMI		0x00280000 /* Hypervisor maintenance */
+#define   SRR1_WAKEDEC		0x00180000 /* Decrementer interrupt */
+#define   SRR1_WAKEDBELL	0x00140000 /* Privileged doorbell on P8 */
+#define   SRR1_WAKETHERM	0x00100000 /* Thermal management interrupt */
+#define	  SRR1_WAKERESET	0x00100000 /* System reset */
+#define   SRR1_WAKEHDBELL	0x000c0000 /* Hypervisor doorbell on P8 */
+#define	  SRR1_WAKESTATE	0x00030000 /* Powersave exit mask [46:47] */
+#define	  SRR1_WS_DEEPEST	0x00030000 /* Some resources not maintained,
+					  * may not be recoverable */
+#define	  SRR1_WS_DEEPER	0x00020000 /* Some resources not maintained */
+#define	  SRR1_WS_DEEP		0x00010000 /* All resources maintained */
+#define   SRR1_PROGTM		0x00200000 /* TM Bad Thing */
+#define   SRR1_PROGFPE		0x00100000 /* Floating Point Enabled */
+#define   SRR1_PROGILL		0x00080000 /* Illegal instruction */
+#define   SRR1_PROGPRIV		0x00040000 /* Privileged instruction */
+#define   SRR1_PROGTRAP		0x00020000 /* Trap */
+#define   SRR1_PROGADDR		0x00010000 /* SRR0 contains subsequent addr */
+
+#define SPRN_HSRR0	0x13A	/* Save/Restore Register 0 */
+#define SPRN_HSRR1	0x13B	/* Save/Restore Register 1 */
+#define   HSRR1_DENORM		0x00100000 /* Denorm exception */
+
+#define SPRN_TBCTL	0x35f	/* PA6T Timebase control register */
+#define   TBCTL_FREEZE		0x0000000000000000ull /* Freeze all tbs */
+#define   TBCTL_RESTART		0x0000000100000000ull /* Restart all tbs */
+#define   TBCTL_UPDATE_UPPER	0x0000000200000000ull /* Set upper 32 bits */
+#define   TBCTL_UPDATE_LOWER	0x0000000300000000ull /* Set lower 32 bits */
+
+#ifndef SPRN_SVR
+#define SPRN_SVR	0x11E	/* System Version Register */
+#endif
+#define SPRN_THRM1	0x3FC		/* Thermal Management Register 1 */
+/* these bits were defined in inverted endian sense originally, ugh, confusing */
+#define THRM1_TIN	(1 << 31)
+#define THRM1_TIV	(1 << 30)
+#define THRM1_THRES(x)	((x&0x7f)<<23)
+#define THRM3_SITV(x)	((x&0x3fff)<<1)
+#define THRM1_TID	(1<<2)
+#define THRM1_TIE	(1<<1)
+#define THRM1_V		(1<<0)
+#define SPRN_THRM2	0x3FD		/* Thermal Management Register 2 */
+#define SPRN_THRM3	0x3FE		/* Thermal Management Register 3 */
+#define THRM3_E		(1<<0)
+#define SPRN_TLBMISS	0x3D4		/* 980 7450 TLB Miss Register */
+#define SPRN_UMMCR0	0x3A8	/* User Monitor Mode Control Register 0 */
+#define SPRN_UMMCR1	0x3AC	/* User Monitor Mode Control Register 0 */
+#define SPRN_UPMC1	0x3A9	/* User Performance Counter Register 1 */
+#define SPRN_UPMC2	0x3AA	/* User Performance Counter Register 2 */
+#define SPRN_UPMC3	0x3AD	/* User Performance Counter Register 3 */
+#define SPRN_UPMC4	0x3AE	/* User Performance Counter Register 4 */
+#define SPRN_USIA	0x3AB	/* User Sampled Instruction Address Register */
+#define SPRN_VRSAVE	0x100	/* Vector Register Save Register */
+#define SPRN_XER	0x001	/* Fixed Point Exception Register */
+
+#define SPRN_MMCR0_GEKKO 0x3B8 /* Gekko Monitor Mode Control Register 0 */
+#define SPRN_MMCR1_GEKKO 0x3BC /* Gekko Monitor Mode Control Register 1 */
+#define SPRN_PMC1_GEKKO  0x3B9 /* Gekko Performance Monitor Control 1 */
+#define SPRN_PMC2_GEKKO  0x3BA /* Gekko Performance Monitor Control 2 */
+#define SPRN_PMC3_GEKKO  0x3BD /* Gekko Performance Monitor Control 3 */
+#define SPRN_PMC4_GEKKO  0x3BE /* Gekko Performance Monitor Control 4 */
+#define SPRN_WPAR_GEKKO  0x399 /* Gekko Write Pipe Address Register */
+
+#define SPRN_SCOMC	0x114	/* SCOM Access Control */
+#define SPRN_SCOMD	0x115	/* SCOM Access DATA */
+
+/* Performance monitor SPRs */
+#ifdef CONFIG_PPC64
+#define SPRN_MMCR0	795
+#define   MMCR0_FC	0x80000000UL /* freeze counters */
+#define   MMCR0_FCS	0x40000000UL /* freeze in supervisor state */
+#define   MMCR0_KERNEL_DISABLE MMCR0_FCS
+#define   MMCR0_FCP	0x20000000UL /* freeze in problem state */
+#define   MMCR0_PROBLEM_DISABLE MMCR0_FCP
+#define   MMCR0_FCM1	0x10000000UL /* freeze counters while MSR mark = 1 */
+#define   MMCR0_FCM0	0x08000000UL /* freeze counters while MSR mark = 0 */
+#define   MMCR0_PMXE	ASM_CONST(0x04000000) /* perf mon exception enable */
+#define   MMCR0_FCECE	ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */
+#define   MMCR0_TBEE	0x00400000UL /* time base exception enable */
+#define   MMCR0_BHRBA	0x00200000UL /* BHRB Access allowed in userspace */
+#define   MMCR0_EBE	0x00100000UL /* Event based branch enable */
+#define   MMCR0_PMCC	0x000c0000UL /* PMC control */
+#define   MMCR0_PMCC_U6	0x00080000UL /* PMC1-6 are R/W by user (PR) */
+#define   MMCR0_PMC1CE	0x00008000UL /* PMC1 count enable*/
+#define   MMCR0_PMCjCE	ASM_CONST(0x00004000) /* PMCj count enable*/
+#define   MMCR0_TRIGGER	0x00002000UL /* TRIGGER enable */
+#define   MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */
+#define   MMCR0_C56RUN	ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */
+/* performance monitor alert has occurred, set to 0 after handling exception */
+#define   MMCR0_PMAO	ASM_CONST(0x00000080)
+#define   MMCR0_SHRFC	0x00000040UL /* SHRre freeze conditions between threads */
+#define   MMCR0_FC56	0x00000010UL /* freeze counters 5 and 6 */
+#define   MMCR0_FCTI	0x00000008UL /* freeze counters in tags inactive mode */
+#define   MMCR0_FCTA	0x00000004UL /* freeze counters in tags active mode */
+#define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
+#define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
+#define SPRN_MMCR1	798
+#define SPRN_MMCR2	785
+#define SPRN_UMMCR2	769
+#define SPRN_MMCRA	0x312
+#define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
+#define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
+#define   MMCRA_SDAR_ERAT_MISS   0x20000000UL
+#define   MMCRA_SIHV	0x10000000UL /* state of MSR HV when SIAR set */
+#define   MMCRA_SIPR	0x08000000UL /* state of MSR PR when SIAR set */
+#define   MMCRA_SLOT	0x07000000UL /* SLOT bits (37-39) */
+#define   MMCRA_SLOT_SHIFT	24
+#define   MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define   POWER6_MMCRA_SDSYNC 0x0000080000000000ULL	/* SDAR/SIAR synced */
+#define   POWER6_MMCRA_SIHV   0x0000040000000000ULL
+#define   POWER6_MMCRA_SIPR   0x0000020000000000ULL
+#define   POWER6_MMCRA_THRM	0x00000020UL
+#define   POWER6_MMCRA_OTHER	0x0000000EUL
+
+#define   POWER7P_MMCRA_SIAR_VALID 0x10000000	/* P7+ SIAR contents valid */
+#define   POWER7P_MMCRA_SDAR_VALID 0x08000000	/* P7+ SDAR contents valid */
+
+#define SPRN_MMCRH	316	/* Hypervisor monitor mode control register */
+#define SPRN_MMCRS	894	/* Supervisor monitor mode control register */
+#define SPRN_MMCRC	851	/* Core monitor mode control register */
+#define SPRN_EBBHR	804	/* Event based branch handler register */
+#define SPRN_EBBRR	805	/* Event based branch return register */
+#define SPRN_BESCR	806	/* Branch event status and control register */
+#define   BESCR_GE	0x8000000000000000ULL /* Global Enable */
+#define SPRN_WORT	895	/* Workload optimization register - thread */
+#define SPRN_WORC	863	/* Workload optimization register - core */
+
+#define SPRN_PMC1	787
+#define SPRN_PMC2	788
+#define SPRN_PMC3	789
+#define SPRN_PMC4	790
+#define SPRN_PMC5	791
+#define SPRN_PMC6	792
+#define SPRN_PMC7	793
+#define SPRN_PMC8	794
+#define SPRN_SIER	784
+#define   SIER_SIPR		0x2000000	/* Sampled MSR_PR */
+#define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
+#define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
+#define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
+#define SPRN_SIAR	796
+#define SPRN_SDAR	797
+#define SPRN_TACR	888
+#define SPRN_TCSCR	889
+#define SPRN_CSIGR	890
+#define SPRN_SPMC1	892
+#define SPRN_SPMC2	893
+
+/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
+#define MMCR0_USER_MASK	(MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
+#define MMCR2_USER_MASK	0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */
+#define SIER_USER_MASK	0x7fffffUL
+
+#define SPRN_PA6T_MMCR0 795
+#define   PA6T_MMCR0_EN0	0x0000000000000001UL
+#define   PA6T_MMCR0_EN1	0x0000000000000002UL
+#define   PA6T_MMCR0_EN2	0x0000000000000004UL
+#define   PA6T_MMCR0_EN3	0x0000000000000008UL
+#define   PA6T_MMCR0_EN4	0x0000000000000010UL
+#define   PA6T_MMCR0_EN5	0x0000000000000020UL
+#define   PA6T_MMCR0_SUPEN	0x0000000000000040UL
+#define   PA6T_MMCR0_PREN	0x0000000000000080UL
+#define   PA6T_MMCR0_HYPEN	0x0000000000000100UL
+#define   PA6T_MMCR0_FCM0	0x0000000000000200UL
+#define   PA6T_MMCR0_FCM1	0x0000000000000400UL
+#define   PA6T_MMCR0_INTGEN	0x0000000000000800UL
+#define   PA6T_MMCR0_INTEN0	0x0000000000001000UL
+#define   PA6T_MMCR0_INTEN1	0x0000000000002000UL
+#define   PA6T_MMCR0_INTEN2	0x0000000000004000UL
+#define   PA6T_MMCR0_INTEN3	0x0000000000008000UL
+#define   PA6T_MMCR0_INTEN4	0x0000000000010000UL
+#define   PA6T_MMCR0_INTEN5	0x0000000000020000UL
+#define   PA6T_MMCR0_DISCNT	0x0000000000040000UL
+#define   PA6T_MMCR0_UOP	0x0000000000080000UL
+#define   PA6T_MMCR0_TRG	0x0000000000100000UL
+#define   PA6T_MMCR0_TRGEN	0x0000000000200000UL
+#define   PA6T_MMCR0_TRGREG	0x0000000001600000UL
+#define   PA6T_MMCR0_SIARLOG	0x0000000002000000UL
+#define   PA6T_MMCR0_SDARLOG	0x0000000004000000UL
+#define   PA6T_MMCR0_PROEN	0x0000000008000000UL
+#define   PA6T_MMCR0_PROLOG	0x0000000010000000UL
+#define   PA6T_MMCR0_DAMEN2	0x0000000020000000UL
+#define   PA6T_MMCR0_DAMEN3	0x0000000040000000UL
+#define   PA6T_MMCR0_DAMEN4	0x0000000080000000UL
+#define   PA6T_MMCR0_DAMEN5	0x0000000100000000UL
+#define   PA6T_MMCR0_DAMSEL2	0x0000000200000000UL
+#define   PA6T_MMCR0_DAMSEL3	0x0000000400000000UL
+#define   PA6T_MMCR0_DAMSEL4	0x0000000800000000UL
+#define   PA6T_MMCR0_DAMSEL5	0x0000001000000000UL
+#define   PA6T_MMCR0_HANDDIS	0x0000002000000000UL
+#define   PA6T_MMCR0_PCTEN	0x0000004000000000UL
+#define   PA6T_MMCR0_SOCEN	0x0000008000000000UL
+#define   PA6T_MMCR0_SOCMOD	0x0000010000000000UL
+
+#define SPRN_PA6T_MMCR1 798
+#define   PA6T_MMCR1_ES2	0x00000000000000ffUL
+#define   PA6T_MMCR1_ES3	0x000000000000ff00UL
+#define   PA6T_MMCR1_ES4	0x0000000000ff0000UL
+#define   PA6T_MMCR1_ES5	0x00000000ff000000UL
+
+#define SPRN_PA6T_UPMC0 771	/* User PerfMon Counter 0 */
+#define SPRN_PA6T_UPMC1 772	/* ... */
+#define SPRN_PA6T_UPMC2 773
+#define SPRN_PA6T_UPMC3 774
+#define SPRN_PA6T_UPMC4 775
+#define SPRN_PA6T_UPMC5 776
+#define SPRN_PA6T_UMMCR0 779	/* User Monitor Mode Control Register 0 */
+#define SPRN_PA6T_SIAR	780	/* Sampled Instruction Address */
+#define SPRN_PA6T_UMMCR1 782	/* User Monitor Mode Control Register 1 */
+#define SPRN_PA6T_SIER	785	/* Sampled Instruction Event Register */
+#define SPRN_PA6T_PMC0	787
+#define SPRN_PA6T_PMC1	788
+#define SPRN_PA6T_PMC2	789
+#define SPRN_PA6T_PMC3	790
+#define SPRN_PA6T_PMC4	791
+#define SPRN_PA6T_PMC5	792
+#define SPRN_PA6T_TSR0	793	/* Timestamp Register 0 */
+#define SPRN_PA6T_TSR1	794	/* Timestamp Register 1 */
+#define SPRN_PA6T_TSR2	799	/* Timestamp Register 2 */
+#define SPRN_PA6T_TSR3	784	/* Timestamp Register 3 */
+
+#define SPRN_PA6T_IER	981	/* Icache Error Register */
+#define SPRN_PA6T_DER	982	/* Dcache Error Register */
+#define SPRN_PA6T_BER	862	/* BIU Error Address Register */
+#define SPRN_PA6T_MER	849	/* MMU Error Register */
+
+#define SPRN_PA6T_IMA0	880	/* Instruction Match Array 0 */
+#define SPRN_PA6T_IMA1	881	/* ... */
+#define SPRN_PA6T_IMA2	882
+#define SPRN_PA6T_IMA3	883
+#define SPRN_PA6T_IMA4	884
+#define SPRN_PA6T_IMA5	885
+#define SPRN_PA6T_IMA6	886
+#define SPRN_PA6T_IMA7	887
+#define SPRN_PA6T_IMA8	888
+#define SPRN_PA6T_IMA9	889
+#define SPRN_PA6T_BTCR	978	/* Breakpoint and Tagging Control Register */
+#define SPRN_PA6T_IMAAT	979	/* Instruction Match Array Action Table */
+#define SPRN_PA6T_PCCR	1019	/* Power Counter Control Register */
+#define SPRN_BKMK	1020	/* Cell Bookmark Register */
+#define SPRN_PA6T_RPCCR	1021	/* Retire PC Trace Control Register */
+
+
+#else /* 32-bit */
+#define SPRN_MMCR0	952	/* Monitor Mode Control Register 0 */
+#define   MMCR0_FC	0x80000000UL /* freeze counters */
+#define   MMCR0_FCS	0x40000000UL /* freeze in supervisor state */
+#define   MMCR0_FCP	0x20000000UL /* freeze in problem state */
+#define   MMCR0_FCM1	0x10000000UL /* freeze counters while MSR mark = 1 */
+#define   MMCR0_FCM0	0x08000000UL /* freeze counters while MSR mark = 0 */
+#define   MMCR0_PMXE	0x04000000UL /* performance monitor exception enable */
+#define   MMCR0_FCECE	0x02000000UL /* freeze ctrs on enabled cond or event */
+#define   MMCR0_TBEE	0x00400000UL /* time base exception enable */
+#define   MMCR0_PMC1CE	0x00008000UL /* PMC1 count enable*/
+#define   MMCR0_PMCnCE	0x00004000UL /* count enable for all but PMC 1*/
+#define   MMCR0_TRIGGER	0x00002000UL /* TRIGGER enable */
+#define   MMCR0_PMC1SEL	0x00001fc0UL /* PMC 1 Event */
+#define   MMCR0_PMC2SEL	0x0000003fUL /* PMC 2 Event */
+
+#define SPRN_MMCR1	956
+#define   MMCR1_PMC3SEL	0xf8000000UL /* PMC 3 Event */
+#define   MMCR1_PMC4SEL	0x07c00000UL /* PMC 4 Event */
+#define   MMCR1_PMC5SEL	0x003e0000UL /* PMC 5 Event */
+#define   MMCR1_PMC6SEL 0x0001f800UL /* PMC 6 Event */
+#define SPRN_MMCR2	944
+#define SPRN_PMC1	953	/* Performance Counter Register 1 */
+#define SPRN_PMC2	954	/* Performance Counter Register 2 */
+#define SPRN_PMC3	957	/* Performance Counter Register 3 */
+#define SPRN_PMC4	958	/* Performance Counter Register 4 */
+#define SPRN_PMC5	945	/* Performance Counter Register 5 */
+#define SPRN_PMC6	946	/* Performance Counter Register 6 */
+
+#define SPRN_SIAR	955	/* Sampled Instruction Address Register */
+
+/* Bit definitions for MMCR0 and PMC1 / PMC2. */
+#define MMCR0_PMC1_CYCLES	(1 << 7)
+#define MMCR0_PMC1_ICACHEMISS	(5 << 7)
+#define MMCR0_PMC1_DTLB		(6 << 7)
+#define MMCR0_PMC2_DCACHEMISS	0x6
+#define MMCR0_PMC2_CYCLES	0x1
+#define MMCR0_PMC2_ITLB		0x7
+#define MMCR0_PMC2_LOADMISSTIME	0x5
+#endif
+
+/*
+ * SPRG usage:
+ *
+ * All 64-bit:
+ *	- SPRG1 stores PACA pointer except 64-bit server in
+ *        HV mode in which case it is HSPRG0
+ *
+ * 64-bit server:
+ *	- SPRG0 scratch for TM recheckpoint/reclaim (reserved for HV on Power4)
+ *	- SPRG2 scratch for exception vectors
+ *	- SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
+ *      - HSPRG0 stores PACA in HV mode
+ *      - HSPRG1 scratch for "HV" exceptions
+ *
+ * 64-bit embedded
+ *	- SPRG0 generic exception scratch
+ *	- SPRG2 TLB exception stack
+ *	- SPRG3 critical exception scratch (user visible, sorry!)
+ *	- SPRG4 unused (user visible)
+ *	- SPRG6 TLB miss scratch (user visible, sorry !)
+ *	- SPRG7 CPU and NUMA node for VDSO getcpu (user visible)
+ *	- SPRG8 machine check exception scratch
+ *	- SPRG9 debug exception scratch
+ *
+ * All 32-bit:
+ *	- SPRG3 current thread_info pointer
+ *        (virtual on BookE, physical on others)
+ *
+ * 32-bit classic:
+ *	- SPRG0 scratch for exception vectors
+ *	- SPRG1 scratch for exception vectors
+ *	- SPRG2 indicator that we are in RTAS
+ *	- SPRG4 (603 only) pseudo TLB LRU data
+ *
+ * 32-bit 40x:
+ *	- SPRG0 scratch for exception vectors
+ *	- SPRG1 scratch for exception vectors
+ *	- SPRG2 scratch for exception vectors
+ *	- SPRG4 scratch for exception vectors (not 403)
+ *	- SPRG5 scratch for exception vectors (not 403)
+ *	- SPRG6 scratch for exception vectors (not 403)
+ *	- SPRG7 scratch for exception vectors (not 403)
+ *
+ * 32-bit 440 and FSL BookE:
+ *	- SPRG0 scratch for exception vectors
+ *	- SPRG1 scratch for exception vectors (*)
+ *	- SPRG2 scratch for crit interrupts handler
+ *	- SPRG4 scratch for exception vectors
+ *	- SPRG5 scratch for exception vectors
+ *	- SPRG6 scratch for machine check handler
+ *	- SPRG7 scratch for exception vectors
+ *	- SPRG9 scratch for debug vectors (e500 only)
+ *
+ *      Additionally, BookE separates "read" and "write"
+ *      of those registers. That allows to use the userspace
+ *      readable variant for reads, which can avoid a fault
+ *      with KVM type virtualization.
+ *
+ * 32-bit 8xx:
+ *	- SPRG0 scratch for exception vectors
+ *	- SPRG1 scratch for exception vectors
+ *	- SPRG2 scratch for exception vectors
+ *
+ */
+#ifdef CONFIG_PPC64
+#define SPRN_SPRG_PACA 		SPRN_SPRG1
+#else
+#define SPRN_SPRG_THREAD 	SPRN_SPRG3
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define SPRN_SPRG_SCRATCH0	SPRN_SPRG2
+#define SPRN_SPRG_HPACA		SPRN_HSPRG0
+#define SPRN_SPRG_HSCRATCH0	SPRN_HSPRG1
+#define SPRN_SPRG_VDSO_READ	SPRN_USPRG3
+#define SPRN_SPRG_VDSO_WRITE	SPRN_SPRG3
+
+#define GET_PACA(rX)					\
+	BEGIN_FTR_SECTION_NESTED(66);			\
+	mfspr	rX,SPRN_SPRG_PACA;			\
+	FTR_SECTION_ELSE_NESTED(66);			\
+	mfspr	rX,SPRN_SPRG_HPACA;			\
+	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define SET_PACA(rX)					\
+	BEGIN_FTR_SECTION_NESTED(66);			\
+	mtspr	SPRN_SPRG_PACA,rX;			\
+	FTR_SECTION_ELSE_NESTED(66);			\
+	mtspr	SPRN_SPRG_HPACA,rX;			\
+	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define GET_SCRATCH0(rX)				\
+	BEGIN_FTR_SECTION_NESTED(66);			\
+	mfspr	rX,SPRN_SPRG_SCRATCH0;			\
+	FTR_SECTION_ELSE_NESTED(66);			\
+	mfspr	rX,SPRN_SPRG_HSCRATCH0;			\
+	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define SET_SCRATCH0(rX)				\
+	BEGIN_FTR_SECTION_NESTED(66);			\
+	mtspr	SPRN_SPRG_SCRATCH0,rX;			\
+	FTR_SECTION_ELSE_NESTED(66);			\
+	mtspr	SPRN_SPRG_HSCRATCH0,rX;			\
+	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#else /* CONFIG_PPC_BOOK3S_64 */
+#define GET_SCRATCH0(rX)	mfspr	rX,SPRN_SPRG_SCRATCH0
+#define SET_SCRATCH0(rX)	mtspr	SPRN_SPRG_SCRATCH0,rX
+
+#endif
+
+#ifdef CONFIG_PPC_BOOK3E_64
+#define SPRN_SPRG_MC_SCRATCH	SPRN_SPRG8
+#define SPRN_SPRG_CRIT_SCRATCH	SPRN_SPRG3
+#define SPRN_SPRG_DBG_SCRATCH	SPRN_SPRG9
+#define SPRN_SPRG_TLB_EXFRAME	SPRN_SPRG2
+#define SPRN_SPRG_TLB_SCRATCH	SPRN_SPRG6
+#define SPRN_SPRG_GEN_SCRATCH	SPRN_SPRG0
+#define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH
+#define SPRN_SPRG_VDSO_READ	SPRN_USPRG7
+#define SPRN_SPRG_VDSO_WRITE	SPRN_SPRG7
+
+#define SET_PACA(rX)	mtspr	SPRN_SPRG_PACA,rX
+#define GET_PACA(rX)	mfspr	rX,SPRN_SPRG_PACA
+
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#define SPRN_SPRG_SCRATCH0	SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1	SPRN_SPRG1
+#define SPRN_SPRG_RTAS		SPRN_SPRG2
+#define SPRN_SPRG_603_LRU	SPRN_SPRG4
+#endif
+
+#ifdef CONFIG_40x
+#define SPRN_SPRG_SCRATCH0	SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1	SPRN_SPRG1
+#define SPRN_SPRG_SCRATCH2	SPRN_SPRG2
+#define SPRN_SPRG_SCRATCH3	SPRN_SPRG4
+#define SPRN_SPRG_SCRATCH4	SPRN_SPRG5
+#define SPRN_SPRG_SCRATCH5	SPRN_SPRG6
+#define SPRN_SPRG_SCRATCH6	SPRN_SPRG7
+#endif
+
+#ifdef CONFIG_BOOKE
+#define SPRN_SPRG_RSCRATCH0	SPRN_SPRG0
+#define SPRN_SPRG_WSCRATCH0	SPRN_SPRG0
+#define SPRN_SPRG_RSCRATCH1	SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH1	SPRN_SPRG1
+#define SPRN_SPRG_RSCRATCH_CRIT	SPRN_SPRG2
+#define SPRN_SPRG_WSCRATCH_CRIT	SPRN_SPRG2
+#define SPRN_SPRG_RSCRATCH2	SPRN_SPRG4R
+#define SPRN_SPRG_WSCRATCH2	SPRN_SPRG4W
+#define SPRN_SPRG_RSCRATCH3	SPRN_SPRG5R
+#define SPRN_SPRG_WSCRATCH3	SPRN_SPRG5W
+#define SPRN_SPRG_RSCRATCH_MC	SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH_MC	SPRN_SPRG1
+#define SPRN_SPRG_RSCRATCH4	SPRN_SPRG7R
+#define SPRN_SPRG_WSCRATCH4	SPRN_SPRG7W
+#ifdef CONFIG_E200
+#define SPRN_SPRG_RSCRATCH_DBG	SPRN_SPRG6R
+#define SPRN_SPRG_WSCRATCH_DBG	SPRN_SPRG6W
+#else
+#define SPRN_SPRG_RSCRATCH_DBG	SPRN_SPRG9
+#define SPRN_SPRG_WSCRATCH_DBG	SPRN_SPRG9
+#endif
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#define SPRN_SPRG_SCRATCH0	SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1	SPRN_SPRG1
+#define SPRN_SPRG_SCRATCH2	SPRN_SPRG2
+#endif
+
+
+
+/*
+ * An mtfsf instruction with the L bit set. On CPUs that support this a
+ * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
+ *
+ * Until binutils gets the new form of mtfsf, hardwire the instruction.
+ */
+#ifdef CONFIG_PPC64
+#define MTFSF_L(REG) \
+	.long (0xfc00058e | ((0xff) << 17) | ((REG) << 11) | (1 << 25))
+#else
+#define MTFSF_L(REG)	mtfsf	0xff, (REG)
+#endif
+
+/* Processor Version Register (PVR) field extraction */
+
+#define PVR_VER(pvr)	(((pvr) >>  16) & 0xFFFF)	/* Version field */
+#define PVR_REV(pvr)	(((pvr) >>   0) & 0xFFFF)	/* Revison field */
+
+#define pvr_version_is(pvr)	(PVR_VER(mfspr(SPRN_PVR)) == (pvr))
+
+/*
+ * IBM has further subdivided the standard PowerPC 16-bit version and
+ * revision subfields of the PVR for the PowerPC 403s into the following:
+ */
+
+#define PVR_FAM(pvr)	(((pvr) >> 20) & 0xFFF)	/* Family field */
+#define PVR_MEM(pvr)	(((pvr) >> 16) & 0xF)	/* Member field */
+#define PVR_CORE(pvr)	(((pvr) >> 12) & 0xF)	/* Core field */
+#define PVR_CFG(pvr)	(((pvr) >>  8) & 0xF)	/* Configuration field */
+#define PVR_MAJ(pvr)	(((pvr) >>  4) & 0xF)	/* Major revision field */
+#define PVR_MIN(pvr)	(((pvr) >>  0) & 0xF)	/* Minor revision field */
+
+/* Processor Version Numbers */
+
+#define PVR_403GA	0x00200000
+#define PVR_403GB	0x00200100
+#define PVR_403GC	0x00200200
+#define PVR_403GCX	0x00201400
+#define PVR_405GP	0x40110000
+#define PVR_476		0x11a52000
+#define PVR_476FPE	0x7ff50000
+#define PVR_STB03XXX	0x40310000
+#define PVR_NP405H	0x41410000
+#define PVR_NP405L	0x41610000
+#define PVR_601		0x00010000
+#define PVR_602		0x00050000
+#define PVR_603		0x00030000
+#define PVR_603e	0x00060000
+#define PVR_603ev	0x00070000
+#define PVR_603r	0x00071000
+#define PVR_604		0x00040000
+#define PVR_604e	0x00090000
+#define PVR_604r	0x000A0000
+#define PVR_620		0x00140000
+#define PVR_740		0x00080000
+#define PVR_750		PVR_740
+#define PVR_740P	0x10080000
+#define PVR_750P	PVR_740P
+#define PVR_7400	0x000C0000
+#define PVR_7410	0x800C0000
+#define PVR_7450	0x80000000
+#define PVR_8540	0x80200000
+#define PVR_8560	0x80200000
+#define PVR_VER_E500V1	0x8020
+#define PVR_VER_E500V2	0x8021
+#define PVR_VER_E500MC	0x8023
+#define PVR_VER_E5500	0x8024
+#define PVR_VER_E6500	0x8040
+
+/*
+ * For the 8xx processors, all of them report the same PVR family for
+ * the PowerPC core. The various versions of these processors must be
+ * differentiated by the version number in the Communication Processor
+ * Module (CPM).
+ */
+#define PVR_8xx		0x00500000
+
+#define PVR_8240	0x00810100
+#define PVR_8245	0x80811014
+#define PVR_8260	PVR_8240
+
+/* 476 Simulator seems to currently have the PVR of the 602... */
+#define PVR_476_ISS	0x00052000
+
+/* 64-bit processors */
+#define PVR_NORTHSTAR	0x0033
+#define PVR_PULSAR	0x0034
+#define PVR_POWER4	0x0035
+#define PVR_ICESTAR	0x0036
+#define PVR_SSTAR	0x0037
+#define PVR_POWER4p	0x0038
+#define PVR_970		0x0039
+#define PVR_POWER5	0x003A
+#define PVR_POWER5p	0x003B
+#define PVR_970FX	0x003C
+#define PVR_POWER6	0x003E
+#define PVR_POWER7	0x003F
+#define PVR_630		0x0040
+#define PVR_630p	0x0041
+#define PVR_970MP	0x0044
+#define PVR_970GX	0x0045
+#define PVR_POWER7p	0x004A
+#define PVR_POWER8E	0x004B
+#define PVR_POWER8NVL	0x004C
+#define PVR_POWER8	0x004D
+#define PVR_POWER9	0x004E
+#define PVR_BE		0x0070
+#define PVR_PA6T	0x0090
+
+/* "Logical" PVR values defined in PAPR, representing architecture levels */
+#define PVR_ARCH_204	0x0f000001
+#define PVR_ARCH_205	0x0f000002
+#define PVR_ARCH_206	0x0f000003
+#define PVR_ARCH_206p	0x0f100003
+#define PVR_ARCH_207	0x0f000004
+#define PVR_ARCH_300	0x0f000005
+
+/* Macros for setting and retrieving special purpose registers */
+#ifndef __ASSEMBLY__
+#define mfmsr()		({unsigned long rval; \
+			asm volatile("mfmsr %0" : "=r" (rval) : \
+						: "memory"); rval;})
+#ifdef CONFIG_PPC_BOOK3S_64
+#define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \
+				     : : "r" (v) : "memory")
+#define mtmsr(v)	__mtmsrd((v), 0)
+#define __MTMSR		"mtmsrd"
+#else
+#define mtmsr(v)	asm volatile("mtmsr %0" : \
+				     : "r" ((unsigned long)(v)) \
+				     : "memory")
+#define __MTMSR		"mtmsr"
+#endif
+
+static inline void mtmsr_isync(unsigned long val)
+{
+	asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : :
+			"r" (val), "i" (CPU_FTR_ARCH_206) : "memory");
+}
+
+#define mfspr(rn)	({unsigned long rval; \
+			asm volatile("mfspr %0," __stringify(rn) \
+				: "=r" (rval)); rval;})
+#ifndef mtspr
+#define mtspr(rn, v)	asm volatile("mtspr " __stringify(rn) ",%0" : \
+				     : "r" ((unsigned long)(v)) \
+				     : "memory")
+#endif
+#define wrtspr(rn)	asm volatile("mtspr " __stringify(rn) ",0" : \
+				     : : "memory")
+
+extern unsigned long msr_check_and_set(unsigned long bits);
+extern bool strict_msr_control;
+extern void __msr_check_and_clear(unsigned long bits);
+static inline void msr_check_and_clear(unsigned long bits)
+{
+	if (strict_msr_control)
+		__msr_check_and_clear(bits);
+}
+
+#ifdef __powerpc64__
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
+#define mftb()		({unsigned long rval;				\
+			asm volatile(					\
+				"90:	mfspr %0, %2;\n"		\
+				"97:	cmpwi %0,0;\n"			\
+				"	beq- 90b;\n"			\
+				"99:\n"					\
+				".section __ftr_fixup,\"a\"\n"		\
+				".align 3\n"				\
+				"98:\n"					\
+				"	.8byte %1\n"			\
+				"	.8byte %1\n"			\
+				"	.8byte 97b-98b\n"		\
+				"	.8byte 99b-98b\n"		\
+				"	.8byte 0\n"			\
+				"	.8byte 0\n"			\
+				".previous"				\
+			: "=r" (rval) \
+			: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
+			rval;})
+#else
+#define mftb()		({unsigned long rval;	\
+			asm volatile("mfspr %0, %1" : \
+				     "=r" (rval) : "i" (SPRN_TBRL)); rval;})
+#endif /* !CONFIG_PPC_CELL */
+
+#else /* __powerpc64__ */
+
+#if defined(CONFIG_PPC_8xx)
+#define mftbl()		({unsigned long rval;	\
+			asm volatile("mftbl %0" : "=r" (rval)); rval;})
+#define mftbu()		({unsigned long rval;	\
+			asm volatile("mftbu %0" : "=r" (rval)); rval;})
+#else
+#define mftbl()		({unsigned long rval;	\
+			asm volatile("mfspr %0, %1" : "=r" (rval) : \
+				"i" (SPRN_TBRL)); rval;})
+#define mftbu()		({unsigned long rval;	\
+			asm volatile("mfspr %0, %1" : "=r" (rval) : \
+				"i" (SPRN_TBRU)); rval;})
+#endif
+#define mftb()		mftbl()
+#endif /* !__powerpc64__ */
+
+#define mttbl(v)	asm volatile("mttbl %0":: "r"(v))
+#define mttbu(v)	asm volatile("mttbu %0":: "r"(v))
+
+#ifdef CONFIG_PPC32
+#define mfsrin(v)	({unsigned int rval; \
+			asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
+					rval;})
+#endif
+
+#define proc_trap()	asm volatile("trap")
+
+extern unsigned long current_stack_pointer(void);
+
+extern unsigned long scom970_read(unsigned int address);
+extern void scom970_write(unsigned int address, unsigned long value);
+
+struct pt_regs;
+
+extern void ppc_save_regs(struct pt_regs *regs);
+
+static inline void update_power8_hid0(unsigned long hid0)
+{
+	/*
+	 *  The HID0 update on Power8 should at the very least be
+	 *  preceded by a a SYNC instruction followed by an ISYNC
+	 *  instruction
+	 */
+	asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
new file mode 100644
index 0000000..7192eec
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Contains register definitions common to PowerPC 8xx CPUs.  Notice
+ */
+#ifndef _ASM_POWERPC_REG_8xx_H
+#define _ASM_POWERPC_REG_8xx_H
+
+#include <asm/mmu.h>
+
+/* Cache control on the MPC8xx is provided through some additional
+ * special purpose registers.
+ */
+#define SPRN_IC_CST	560	/* Instruction cache control/status */
+#define SPRN_IC_ADR	561	/* Address needed for some commands */
+#define SPRN_IC_DAT	562	/* Read-only data register */
+#define SPRN_DC_CST	568	/* Data cache control/status */
+#define SPRN_DC_ADR	569	/* Address needed for some commands */
+#define SPRN_DC_DAT	570	/* Read-only data register */
+
+/* Misc Debug */
+#define SPRN_DPDR	630
+#define SPRN_MI_CAM	816
+#define SPRN_MI_RAM0	817
+#define SPRN_MI_RAM1	818
+#define SPRN_MD_CAM	824
+#define SPRN_MD_RAM0	825
+#define SPRN_MD_RAM1	826
+
+/* Special MSR manipulation registers */
+#define SPRN_EIE	80	/* External interrupt enable (EE=1, RI=1) */
+#define SPRN_EID	81	/* External interrupt disable (EE=0, RI=1) */
+#define SPRN_NRI	82	/* Non recoverable interrupt (EE=0, RI=0) */
+
+/* Debug registers */
+#define SPRN_CMPA	144
+#define SPRN_COUNTA	150
+#define SPRN_CMPE	152
+#define SPRN_CMPF	153
+#define SPRN_LCTRL1	156
+#define SPRN_LCTRL2	157
+#define SPRN_ICTRL	158
+#define SPRN_BAR	159
+
+/* Commands.  Only the first few are available to the instruction cache.
+*/
+#define	IDC_ENABLE	0x02000000	/* Cache enable */
+#define IDC_DISABLE	0x04000000	/* Cache disable */
+#define IDC_LDLCK	0x06000000	/* Load and lock */
+#define IDC_UNLINE	0x08000000	/* Unlock line */
+#define IDC_UNALL	0x0a000000	/* Unlock all */
+#define IDC_INVALL	0x0c000000	/* Invalidate all */
+
+#define DC_FLINE	0x0e000000	/* Flush data cache line */
+#define DC_SFWT		0x01000000	/* Set forced writethrough mode */
+#define DC_CFWT		0x03000000	/* Clear forced writethrough mode */
+#define DC_SLES		0x05000000	/* Set little endian swap mode */
+#define DC_CLES		0x07000000	/* Clear little endian swap mode */
+
+/* Status.
+*/
+#define IDC_ENABLED	0x80000000	/* Cache is enabled */
+#define IDC_CERR1	0x00200000	/* Cache error 1 */
+#define IDC_CERR2	0x00100000	/* Cache error 2 */
+#define IDC_CERR3	0x00080000	/* Cache error 3 */
+
+#define DC_DFWT		0x40000000	/* Data cache is forced write through */
+#define DC_LES		0x20000000	/* Caches are little endian mode */
+
+#endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
new file mode 100644
index 0000000..74c2c57
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -0,0 +1,158 @@
+/*
+ *  Register definitions specific to the A2 core
+ *
+ *  Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_POWERPC_REG_A2_H__
+#define __ASM_POWERPC_REG_A2_H__
+
+#include <asm/asm-const.h>
+
+#define SPRN_TENSR	0x1b5
+#define SPRN_TENS	0x1b6	/* Thread ENable Set */
+#define SPRN_TENC	0x1b7	/* Thread ENable Clear */
+
+#define SPRN_A2_CCR0	0x3f0	/* Core Configuration Register 0 */
+#define SPRN_A2_CCR1	0x3f1	/* Core Configuration Register 1 */
+#define SPRN_A2_CCR2	0x3f2	/* Core Configuration Register 2 */
+#define SPRN_MMUCR0	0x3fc	/* MMU Control Register 0 */
+#define SPRN_MMUCR1	0x3fd	/* MMU Control Register 1 */
+#define SPRN_MMUCR2	0x3fe	/* MMU Control Register 2 */
+#define SPRN_MMUCR3	0x3ff	/* MMU Control Register 3 */
+
+#define SPRN_IAR	0x372
+
+#define SPRN_IUCR0	0x3f3
+#define IUCR0_ICBI_ACK	0x1000
+
+#define SPRN_XUCR0	0x3f6	/* Execution Unit Config Register 0 */
+
+#define A2_IERAT_SIZE	16
+#define A2_DERAT_SIZE	32
+
+/* A2 MMUCR0 bits */
+#define MMUCR0_ECL	0x80000000	/* Extended Class for TLB fills */
+#define MMUCR0_TID_NZ	0x40000000	/* TID is non-zero */
+#define MMUCR0_TS	0x10000000	/* Translation space for TLB fills */
+#define MMUCR0_TGS	0x20000000	/* Guest space for TLB fills */
+#define MMUCR0_TLBSEL	0x0c000000	/* TLB or ERAT target for TLB fills */
+#define MMUCR0_TLBSEL_U	0x00000000	/*  TLBSEL = UTLB */
+#define MMUCR0_TLBSEL_I	0x08000000	/*  TLBSEL = I-ERAT */
+#define MMUCR0_TLBSEL_D	0x0c000000	/*  TLBSEL = D-ERAT */
+#define MMUCR0_LOCKSRSH	0x02000000	/* Use TLB lock on tlbsx. */
+#define MMUCR0_TID_MASK	0x000000ff	/* TID field */
+
+/* A2 MMUCR1 bits */
+#define MMUCR1_IRRE		0x80000000	/* I-ERAT round robin enable */
+#define MMUCR1_DRRE		0x40000000	/* D-ERAT round robin enable */
+#define MMUCR1_REE		0x20000000	/* Reference Exception Enable*/
+#define MMUCR1_CEE		0x10000000	/* Change exception enable */
+#define MMUCR1_CSINV_ALL	0x00000000	/* Inval ERAT on all CS evts */
+#define MMUCR1_CSINV_NISYNC	0x04000000	/* Inval ERAT on all ex isync*/
+#define MMUCR1_CSINV_NEVER	0x0c000000	/* Don't inval ERAT on CS */
+#define MMUCR1_ICTID		0x00080000	/* IERAT class field as TID */
+#define MMUCR1_ITTID		0x00040000	/* IERAT thdid field as TID */
+#define MMUCR1_DCTID		0x00020000	/* DERAT class field as TID */
+#define MMUCR1_DTTID		0x00010000	/* DERAT thdid field as TID */
+#define MMUCR1_DCCD		0x00008000	/* DERAT class ignore */
+#define MMUCR1_TLBWE_BINV	0x00004000	/* back invalidate on tlbwe */
+
+/* A2 MMUCR2 bits */
+#define MMUCR2_PSSEL_SHIFT	4
+
+/* A2 MMUCR3 bits */
+#define MMUCR3_THID		0x0000000f	/* Thread ID */
+
+/* *** ERAT TLB bits definitions */
+#define TLB0_EPN_MASK		ASM_CONST(0xfffffffffffff000)
+#define TLB0_CLASS_MASK		ASM_CONST(0x0000000000000c00)
+#define TLB0_CLASS_00		ASM_CONST(0x0000000000000000)
+#define TLB0_CLASS_01		ASM_CONST(0x0000000000000400)
+#define TLB0_CLASS_10		ASM_CONST(0x0000000000000800)
+#define TLB0_CLASS_11		ASM_CONST(0x0000000000000c00)
+#define TLB0_V			ASM_CONST(0x0000000000000200)
+#define TLB0_X			ASM_CONST(0x0000000000000100)
+#define TLB0_SIZE_MASK		ASM_CONST(0x00000000000000f0)
+#define TLB0_SIZE_4K		ASM_CONST(0x0000000000000010)
+#define TLB0_SIZE_64K		ASM_CONST(0x0000000000000030)
+#define TLB0_SIZE_1M		ASM_CONST(0x0000000000000050)
+#define TLB0_SIZE_16M		ASM_CONST(0x0000000000000070)
+#define TLB0_SIZE_1G		ASM_CONST(0x00000000000000a0)
+#define TLB0_THDID_MASK		ASM_CONST(0x000000000000000f)
+#define TLB0_THDID_0		ASM_CONST(0x0000000000000001)
+#define TLB0_THDID_1		ASM_CONST(0x0000000000000002)
+#define TLB0_THDID_2		ASM_CONST(0x0000000000000004)
+#define TLB0_THDID_3		ASM_CONST(0x0000000000000008)
+#define TLB0_THDID_ALL		ASM_CONST(0x000000000000000f)
+
+#define TLB1_RESVATTR		ASM_CONST(0x00f0000000000000)
+#define TLB1_U0			ASM_CONST(0x0008000000000000)
+#define TLB1_U1			ASM_CONST(0x0004000000000000)
+#define TLB1_U2			ASM_CONST(0x0002000000000000)
+#define TLB1_U3			ASM_CONST(0x0001000000000000)
+#define TLB1_R			ASM_CONST(0x0000800000000000)
+#define TLB1_C			ASM_CONST(0x0000400000000000)
+#define TLB1_RPN_MASK		ASM_CONST(0x000003fffffff000)
+#define TLB1_W			ASM_CONST(0x0000000000000800)
+#define TLB1_I			ASM_CONST(0x0000000000000400)
+#define TLB1_M			ASM_CONST(0x0000000000000200)
+#define TLB1_G			ASM_CONST(0x0000000000000100)
+#define TLB1_E			ASM_CONST(0x0000000000000080)
+#define TLB1_VF			ASM_CONST(0x0000000000000040)
+#define TLB1_UX			ASM_CONST(0x0000000000000020)
+#define TLB1_SX			ASM_CONST(0x0000000000000010)
+#define TLB1_UW			ASM_CONST(0x0000000000000008)
+#define TLB1_SW			ASM_CONST(0x0000000000000004)
+#define TLB1_UR			ASM_CONST(0x0000000000000002)
+#define TLB1_SR			ASM_CONST(0x0000000000000001)
+
+/* A2 erativax attributes definitions */
+#define ERATIVAX_RS_IS_ALL		0x000
+#define ERATIVAX_RS_IS_TID		0x040
+#define ERATIVAX_RS_IS_CLASS		0x080
+#define ERATIVAX_RS_IS_FULLMATCH	0x0c0
+#define ERATIVAX_CLASS_00		0x000
+#define ERATIVAX_CLASS_01		0x010
+#define ERATIVAX_CLASS_10		0x020
+#define ERATIVAX_CLASS_11		0x030
+#define ERATIVAX_PSIZE_4K		(TLB_PSIZE_4K >> 1)
+#define ERATIVAX_PSIZE_64K		(TLB_PSIZE_64K >> 1)
+#define ERATIVAX_PSIZE_1M		(TLB_PSIZE_1M >> 1)
+#define ERATIVAX_PSIZE_16M		(TLB_PSIZE_16M >> 1)
+#define ERATIVAX_PSIZE_1G		(TLB_PSIZE_1G >> 1)
+
+/* A2 eratilx attributes definitions */
+#define ERATILX_T_ALL			0
+#define ERATILX_T_TID			1
+#define ERATILX_T_TGS			2
+#define ERATILX_T_FULLMATCH		3
+#define ERATILX_T_CLASS0		4
+#define ERATILX_T_CLASS1		5
+#define ERATILX_T_CLASS2		6
+#define ERATILX_T_CLASS3		7
+
+/* XUCR0 bits */
+#define XUCR0_TRACE_UM_T0		0x40000000	/* Thread 0 */
+#define XUCR0_TRACE_UM_T1		0x20000000	/* Thread 1 */
+#define XUCR0_TRACE_UM_T2		0x10000000	/* Thread 2 */
+#define XUCR0_TRACE_UM_T3		0x08000000	/* Thread 3 */
+
+/* A2 CCR0 register */
+#define A2_CCR0_PME_DISABLED		0x00000000
+#define A2_CCR0_PME_SLEEP		0x40000000
+#define A2_CCR0_PME_RVW			0x80000000
+#define A2_CCR0_PME_DISABLED2		0xc0000000
+
+/* A2 CCR2 register */
+#define A2_CCR2_ERAT_ONLY_MODE		0x00000001
+#define A2_CCR2_ENABLE_ICSWX		0x00000002
+#define A2_CCR2_ENABLE_PC		0x20000000
+#define A2_CCR2_ENABLE_TRACE		0x40000000
+
+#endif /* __ASM_POWERPC_REG_A2_H__ */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
new file mode 100644
index 0000000..eb2a33d
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -0,0 +1,767 @@
+/*
+ * Contains register definitions common to the Book E PowerPC
+ * specification.  Notice that while the IBM-40x series of CPUs
+ * are not true Book E PowerPCs, they borrowed a number of features
+ * before Book E was finalized, and are included here as well.  Unfortunately,
+ * they sometimes used different locations than true Book E CPUs did.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * Copyright 2009-2010 Freescale Semiconductor, Inc.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_REG_BOOKE_H__
+#define __ASM_POWERPC_REG_BOOKE_H__
+
+#include <asm/ppc-opcode.h>
+
+/* Machine State Register (MSR) Fields */
+#define MSR_GS_LG	28	/* Guest state */
+#define MSR_UCLE_LG	26	/* User-mode cache lock enable */
+#define MSR_SPE_LG	25	/* Enable SPE */
+#define MSR_DWE_LG	10	/* Debug Wait Enable */
+#define MSR_UBLE_LG	10	/* BTB lock enable (e500) */
+#define MSR_IS_LG	MSR_IR_LG /* Instruction Space */
+#define MSR_DS_LG	MSR_DR_LG /* Data Space */
+#define MSR_PMM_LG	2	/* Performance monitor mark bit */
+#define MSR_CM_LG	31	/* Computation Mode (0=32-bit, 1=64-bit) */
+
+#define MSR_GS		__MASK(MSR_GS_LG)
+#define MSR_UCLE	__MASK(MSR_UCLE_LG)
+#define MSR_SPE		__MASK(MSR_SPE_LG)
+#define MSR_DWE		__MASK(MSR_DWE_LG)
+#define MSR_UBLE	__MASK(MSR_UBLE_LG)
+#define MSR_IS		__MASK(MSR_IS_LG)
+#define MSR_DS		__MASK(MSR_DS_LG)
+#define MSR_PMM		__MASK(MSR_PMM_LG)
+#define MSR_CM		__MASK(MSR_CM_LG)
+
+#if defined(CONFIG_PPC_BOOK3E_64)
+#define MSR_64BIT	MSR_CM
+
+#define MSR_		(MSR_ME | MSR_CE)
+#define MSR_KERNEL	(MSR_ | MSR_64BIT)
+#define MSR_USER32	(MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64	(MSR_USER32 | MSR_64BIT)
+#elif defined (CONFIG_40x)
+#define MSR_KERNEL	(MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
+#define MSR_USER	(MSR_KERNEL|MSR_PR|MSR_EE)
+#else
+#define MSR_KERNEL	(MSR_ME|MSR_RI|MSR_CE)
+#define MSR_USER	(MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
+
+/* Special Purpose Registers (SPRNs)*/
+#define SPRN_DECAR	0x036	/* Decrementer Auto Reload Register */
+#define SPRN_IVPR	0x03F	/* Interrupt Vector Prefix Register */
+#define SPRN_USPRG0	0x100	/* User Special Purpose Register General 0 */
+#define SPRN_SPRG3R	0x103	/* Special Purpose Register General 3 Read */
+#define SPRN_SPRG4R	0x104	/* Special Purpose Register General 4 Read */
+#define SPRN_SPRG5R	0x105	/* Special Purpose Register General 5 Read */
+#define SPRN_SPRG6R	0x106	/* Special Purpose Register General 6 Read */
+#define SPRN_SPRG7R	0x107	/* Special Purpose Register General 7 Read */
+#define SPRN_SPRG4W	0x114	/* Special Purpose Register General 4 Write */
+#define SPRN_SPRG5W	0x115	/* Special Purpose Register General 5 Write */
+#define SPRN_SPRG6W	0x116	/* Special Purpose Register General 6 Write */
+#define SPRN_SPRG7W	0x117	/* Special Purpose Register General 7 Write */
+#define SPRN_EPCR	0x133	/* Embedded Processor Control Register */
+#define SPRN_DBCR2	0x136	/* Debug Control Register 2 */
+#define SPRN_DBCR4	0x233	/* Debug Control Register 4 */
+#define SPRN_MSRP	0x137	/* MSR Protect Register */
+#define SPRN_IAC3	0x13A	/* Instruction Address Compare 3 */
+#define SPRN_IAC4	0x13B	/* Instruction Address Compare 4 */
+#define SPRN_DVC1	0x13E	/* Data Value Compare Register 1 */
+#define SPRN_DVC2	0x13F	/* Data Value Compare Register 2 */
+#define SPRN_LPID	0x152	/* Logical Partition ID */
+#define SPRN_MAS8	0x155	/* MMU Assist Register 8 */
+#define SPRN_TLB0PS	0x158	/* TLB 0 Page Size Register */
+#define SPRN_TLB1PS	0x159	/* TLB 1 Page Size Register */
+#define SPRN_MAS5_MAS6	0x15c	/* MMU Assist Register 5 || 6 */
+#define SPRN_MAS8_MAS1	0x15d	/* MMU Assist Register 8 || 1 */
+#define SPRN_EPTCFG	0x15e	/* Embedded Page Table Config */
+#define SPRN_GSPRG0	0x170	/* Guest SPRG0 */
+#define SPRN_GSPRG1	0x171	/* Guest SPRG1 */
+#define SPRN_GSPRG2	0x172	/* Guest SPRG2 */
+#define SPRN_GSPRG3	0x173	/* Guest SPRG3 */
+#define SPRN_MAS7_MAS3	0x174	/* MMU Assist Register 7 || 3 */
+#define SPRN_MAS0_MAS1	0x175	/* MMU Assist Register 0 || 1 */
+#define SPRN_GSRR0	0x17A	/* Guest SRR0 */
+#define SPRN_GSRR1	0x17B	/* Guest SRR1 */
+#define SPRN_GEPR	0x17C	/* Guest EPR */
+#define SPRN_GDEAR	0x17D	/* Guest DEAR */
+#define SPRN_GPIR	0x17E	/* Guest PIR */
+#define SPRN_GESR	0x17F	/* Guest Exception Syndrome Register */
+#define SPRN_IVOR0	0x190	/* Interrupt Vector Offset Register 0 */
+#define SPRN_IVOR1	0x191	/* Interrupt Vector Offset Register 1 */
+#define SPRN_IVOR2	0x192	/* Interrupt Vector Offset Register 2 */
+#define SPRN_IVOR3	0x193	/* Interrupt Vector Offset Register 3 */
+#define SPRN_IVOR4	0x194	/* Interrupt Vector Offset Register 4 */
+#define SPRN_IVOR5	0x195	/* Interrupt Vector Offset Register 5 */
+#define SPRN_IVOR6	0x196	/* Interrupt Vector Offset Register 6 */
+#define SPRN_IVOR7	0x197	/* Interrupt Vector Offset Register 7 */
+#define SPRN_IVOR8	0x198	/* Interrupt Vector Offset Register 8 */
+#define SPRN_IVOR9	0x199	/* Interrupt Vector Offset Register 9 */
+#define SPRN_IVOR10	0x19A	/* Interrupt Vector Offset Register 10 */
+#define SPRN_IVOR11	0x19B	/* Interrupt Vector Offset Register 11 */
+#define SPRN_IVOR12	0x19C	/* Interrupt Vector Offset Register 12 */
+#define SPRN_IVOR13	0x19D	/* Interrupt Vector Offset Register 13 */
+#define SPRN_IVOR14	0x19E	/* Interrupt Vector Offset Register 14 */
+#define SPRN_IVOR15	0x19F	/* Interrupt Vector Offset Register 15 */
+#define SPRN_IVOR38	0x1B0	/* Interrupt Vector Offset Register 38 */
+#define SPRN_IVOR39	0x1B1	/* Interrupt Vector Offset Register 39 */
+#define SPRN_IVOR40	0x1B2	/* Interrupt Vector Offset Register 40 */
+#define SPRN_IVOR41	0x1B3	/* Interrupt Vector Offset Register 41 */
+#define SPRN_IVOR42	0x1B4	/* Interrupt Vector Offset Register 42 */
+#define SPRN_GIVOR2	0x1B8	/* Guest IVOR2 */
+#define SPRN_GIVOR3	0x1B9	/* Guest IVOR3 */
+#define SPRN_GIVOR4	0x1BA	/* Guest IVOR4 */
+#define SPRN_GIVOR8	0x1BB	/* Guest IVOR8 */
+#define SPRN_GIVOR13	0x1BC	/* Guest IVOR13 */
+#define SPRN_GIVOR14	0x1BD	/* Guest IVOR14 */
+#define SPRN_GIVPR	0x1BF	/* Guest IVPR */
+#define SPRN_SPEFSCR	0x200	/* SPE & Embedded FP Status & Control */
+#define SPRN_BBEAR	0x201	/* Branch Buffer Entry Address Register */
+#define SPRN_BBTAR	0x202	/* Branch Buffer Target Address Register */
+#define SPRN_L1CFG0	0x203	/* L1 Cache Configure Register 0 */
+#define SPRN_L1CFG1	0x204	/* L1 Cache Configure Register 1 */
+#define SPRN_ATB	0x20E	/* Alternate Time Base */
+#define SPRN_ATBL	0x20E	/* Alternate Time Base Lower */
+#define SPRN_ATBU	0x20F	/* Alternate Time Base Upper */
+#define SPRN_IVOR32	0x210	/* Interrupt Vector Offset Register 32 */
+#define SPRN_IVOR33	0x211	/* Interrupt Vector Offset Register 33 */
+#define SPRN_IVOR34	0x212	/* Interrupt Vector Offset Register 34 */
+#define SPRN_IVOR35	0x213	/* Interrupt Vector Offset Register 35 */
+#define SPRN_IVOR36	0x214	/* Interrupt Vector Offset Register 36 */
+#define SPRN_IVOR37	0x215	/* Interrupt Vector Offset Register 37 */
+#define SPRN_MCARU	0x239	/* Machine Check Address Register Upper */
+#define SPRN_MCSRR0	0x23A	/* Machine Check Save and Restore Register 0 */
+#define SPRN_MCSRR1	0x23B	/* Machine Check Save and Restore Register 1 */
+#define SPRN_MCSR	0x23C	/* Machine Check Status Register */
+#define SPRN_MCAR	0x23D	/* Machine Check Address Register */
+#define SPRN_DSRR0	0x23E	/* Debug Save and Restore Register 0 */
+#define SPRN_DSRR1	0x23F	/* Debug Save and Restore Register 1 */
+#define SPRN_SPRG8	0x25C	/* Special Purpose Register General 8 */
+#define SPRN_SPRG9	0x25D	/* Special Purpose Register General 9 */
+#define SPRN_L1CSR2	0x25E	/* L1 Cache Control and Status Register 2 */
+#define SPRN_MAS0	0x270	/* MMU Assist Register 0 */
+#define SPRN_MAS1	0x271	/* MMU Assist Register 1 */
+#define SPRN_MAS2	0x272	/* MMU Assist Register 2 */
+#define SPRN_MAS3	0x273	/* MMU Assist Register 3 */
+#define SPRN_MAS4	0x274	/* MMU Assist Register 4 */
+#define SPRN_MAS5	0x153	/* MMU Assist Register 5 */
+#define SPRN_MAS6	0x276	/* MMU Assist Register 6 */
+#define SPRN_PID1	0x279	/* Process ID Register 1 */
+#define SPRN_PID2	0x27A	/* Process ID Register 2 */
+#define SPRN_TLB0CFG	0x2B0	/* TLB 0 Config Register */
+#define SPRN_TLB1CFG	0x2B1	/* TLB 1 Config Register */
+#define SPRN_TLB2CFG	0x2B2	/* TLB 2 Config Register */
+#define SPRN_TLB3CFG	0x2B3	/* TLB 3 Config Register */
+#define SPRN_EPR	0x2BE	/* External Proxy Register */
+#define SPRN_CCR1	0x378	/* Core Configuration Register 1 */
+#define SPRN_ZPR	0x3B0	/* Zone Protection Register (40x) */
+#define SPRN_MAS7	0x3B0	/* MMU Assist Register 7 */
+#define SPRN_MMUCR	0x3B2	/* MMU Control Register */
+#define SPRN_CCR0	0x3B3	/* Core Configuration Register 0 */
+#define SPRN_EPLC	0x3B3	/* External Process ID Load Context */
+#define SPRN_EPSC	0x3B4	/* External Process ID Store Context */
+#define SPRN_SGR	0x3B9	/* Storage Guarded Register */
+#define SPRN_DCWR	0x3BA	/* Data Cache Write-thru Register */
+#define SPRN_SLER	0x3BB	/* Little-endian real mode */
+#define SPRN_SU0R	0x3BC	/* "User 0" real mode (40x) */
+#define SPRN_DCMP	0x3D1	/* Data TLB Compare Register */
+#define SPRN_ICDBDR	0x3D3	/* Instruction Cache Debug Data Register */
+#define SPRN_EVPR	0x3D6	/* Exception Vector Prefix Register */
+#define SPRN_L1CSR0	0x3F2	/* L1 Cache Control and Status Register 0 */
+#define SPRN_L1CSR1	0x3F3	/* L1 Cache Control and Status Register 1 */
+#define SPRN_MMUCSR0	0x3F4	/* MMU Control and Status Register 0 */
+#define SPRN_MMUCFG	0x3F7	/* MMU Configuration Register */
+#define SPRN_PIT	0x3DB	/* Programmable Interval Timer */
+#define SPRN_BUCSR	0x3F5	/* Branch Unit Control and Status */
+#define SPRN_L2CSR0	0x3F9	/* L2 Data Cache Control and Status Register 0 */
+#define SPRN_L2CSR1	0x3FA	/* L2 Data Cache Control and Status Register 1 */
+#define SPRN_DCCR	0x3FA	/* Data Cache Cacheability Register */
+#define SPRN_ICCR	0x3FB	/* Instruction Cache Cacheability Register */
+#define SPRN_PWRMGTCR0	0x3FB	/* Power management control register 0 */
+#define SPRN_SVR	0x3FF	/* System Version Register */
+
+/*
+ * SPRs which have conflicting definitions on true Book E versus classic,
+ * or IBM 40x.
+ */
+#ifdef CONFIG_BOOKE
+#define SPRN_CSRR0	0x03A	/* Critical Save and Restore Register 0 */
+#define SPRN_CSRR1	0x03B	/* Critical Save and Restore Register 1 */
+#define SPRN_DEAR	0x03D	/* Data Error Address Register */
+#define SPRN_ESR	0x03E	/* Exception Syndrome Register */
+#define SPRN_PIR	0x11E	/* Processor Identification Register */
+#define SPRN_DBSR	0x130	/* Debug Status Register */
+#define SPRN_DBCR0	0x134	/* Debug Control Register 0 */
+#define SPRN_DBCR1	0x135	/* Debug Control Register 1 */
+#define SPRN_IAC1	0x138	/* Instruction Address Compare 1 */
+#define SPRN_IAC2	0x139	/* Instruction Address Compare 2 */
+#define SPRN_DAC1	0x13C	/* Data Address Compare 1 */
+#define SPRN_DAC2	0x13D	/* Data Address Compare 2 */
+#define SPRN_TSR	0x150	/* Timer Status Register */
+#define SPRN_TCR	0x154	/* Timer Control Register */
+#endif /* Book E */
+#ifdef CONFIG_40x
+#define SPRN_DBCR1	0x3BD	/* Debug Control Register 1 */		
+#define SPRN_ESR	0x3D4	/* Exception Syndrome Register */
+#define SPRN_DEAR	0x3D5	/* Data Error Address Register */
+#define SPRN_TSR	0x3D8	/* Timer Status Register */
+#define SPRN_TCR	0x3DA	/* Timer Control Register */
+#define SPRN_SRR2	0x3DE	/* Save/Restore Register 2 */
+#define SPRN_SRR3	0x3DF	/* Save/Restore Register 3 */
+#define SPRN_DBSR	0x3F0	/* Debug Status Register */		
+#define SPRN_DBCR0	0x3F2	/* Debug Control Register 0 */
+#define SPRN_DAC1	0x3F6	/* Data Address Compare 1 */
+#define SPRN_DAC2	0x3F7	/* Data Address Compare 2 */
+#define SPRN_CSRR0	SPRN_SRR2 /* Critical Save and Restore Register 0 */
+#define SPRN_CSRR1	SPRN_SRR3 /* Critical Save and Restore Register 1 */
+#endif
+#define SPRN_HACOP	0x15F	/* Hypervisor Available Coprocessor Register */
+
+/* Bit definitions for CCR1. */
+#define	CCR1_DPC	0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
+#define	CCR1_TCS	0x00000080 /* Timer Clock Select */
+
+/* Bit definitions for PWRMGTCR0. */
+#define PWRMGTCR0_PW20_WAIT		(1 << 14) /* PW20 state enable bit */
+#define PWRMGTCR0_PW20_ENT_SHIFT	8
+#define PWRMGTCR0_PW20_ENT		0x3F00
+#define PWRMGTCR0_AV_IDLE_PD_EN		(1 << 22) /* Altivec idle enable */
+#define PWRMGTCR0_AV_IDLE_CNT_SHIFT	16
+#define PWRMGTCR0_AV_IDLE_CNT		0x3F0000
+
+/* Bit definitions for the MCSR. */
+#define MCSR_MCS	0x80000000 /* Machine Check Summary */
+#define MCSR_IB		0x40000000 /* Instruction PLB Error */
+#define MCSR_DRB	0x20000000 /* Data Read PLB Error */
+#define MCSR_DWB	0x10000000 /* Data Write PLB Error */
+#define MCSR_TLBP	0x08000000 /* TLB Parity Error */
+#define MCSR_ICP	0x04000000 /* I-Cache Parity Error */
+#define MCSR_DCSP	0x02000000 /* D-Cache Search Parity Error */
+#define MCSR_DCFP	0x01000000 /* D-Cache Flush Parity Error */
+#define MCSR_IMPE	0x00800000 /* Imprecise Machine Check Exception */
+
+#define PPC47x_MCSR_GPR	0x01000000 /* GPR parity error */
+#define PPC47x_MCSR_FPR	0x00800000 /* FPR parity error */
+#define PPC47x_MCSR_IPR	0x00400000 /* Imprecise Machine Check Exception */
+
+#ifdef CONFIG_E500
+/* All e500 */
+#define MCSR_MCP 	0x80000000UL /* Machine Check Input Pin */
+#define MCSR_ICPERR 	0x40000000UL /* I-Cache Parity Error */
+
+/* e500v1/v2 */
+#define MCSR_DCP_PERR 	0x20000000UL /* D-Cache Push Parity Error */
+#define MCSR_DCPERR 	0x10000000UL /* D-Cache Parity Error */
+#define MCSR_BUS_IAERR 	0x00000080UL /* Instruction Address Error */
+#define MCSR_BUS_RAERR 	0x00000040UL /* Read Address Error */
+#define MCSR_BUS_WAERR 	0x00000020UL /* Write Address Error */
+#define MCSR_BUS_IBERR 	0x00000010UL /* Instruction Data Error */
+#define MCSR_BUS_RBERR 	0x00000008UL /* Read Data Bus Error */
+#define MCSR_BUS_WBERR 	0x00000004UL /* Write Data Bus Error */
+#define MCSR_BUS_IPERR 	0x00000002UL /* Instruction parity Error */
+#define MCSR_BUS_RPERR 	0x00000001UL /* Read parity Error */
+
+/* e500mc */
+#define MCSR_DCPERR_MC	0x20000000UL /* D-Cache Parity Error */
+#define MCSR_L2MMU_MHIT	0x08000000UL /* Hit on multiple TLB entries */
+#define MCSR_NMI	0x00100000UL /* Non-Maskable Interrupt */
+#define MCSR_MAV	0x00080000UL /* MCAR address valid */
+#define MCSR_MEA	0x00040000UL /* MCAR is effective address */
+#define MCSR_IF		0x00010000UL /* Instruction Fetch */
+#define MCSR_LD		0x00008000UL /* Load */
+#define MCSR_ST		0x00004000UL /* Store */
+#define MCSR_LDG	0x00002000UL /* Guarded Load */
+#define MCSR_TLBSYNC	0x00000002UL /* Multiple tlbsyncs detected */
+#define MCSR_BSL2_ERR	0x00000001UL /* Backside L2 cache error */
+
+#define MSRP_UCLEP	0x04000000 /* Protect MSR[UCLE] */
+#define MSRP_DEP	0x00000200 /* Protect MSR[DE] */
+#define MSRP_PMMP	0x00000004 /* Protect MSR[PMM] */
+#endif
+
+#ifdef CONFIG_E200
+#define MCSR_MCP 	0x80000000UL /* Machine Check Input Pin */
+#define MCSR_CP_PERR 	0x20000000UL /* Cache Push Parity Error */
+#define MCSR_CPERR 	0x10000000UL /* Cache Parity Error */
+#define MCSR_EXCP_ERR 	0x08000000UL /* ISI, ITLB, or Bus Error on 1st insn
+					fetch for an exception handler */
+#define MCSR_BUS_IRERR 	0x00000010UL /* Read Bus Error on instruction fetch*/
+#define MCSR_BUS_DRERR 	0x00000008UL /* Read Bus Error on data load */
+#define MCSR_BUS_WRERR 	0x00000004UL /* Write Bus Error on buffered
+					store or cache line push */
+#endif
+
+/* Bit definitions for the HID1 */
+#ifdef CONFIG_E500
+/* e500v1/v2 */
+#define HID1_PLL_CFG_MASK 0xfc000000	/* PLL_CFG input pins */
+#define HID1_RFXE	0x00020000	/* Read fault exception enable */
+#define HID1_R1DPE	0x00008000	/* R1 data bus parity enable */
+#define HID1_R2DPE	0x00004000	/* R2 data bus parity enable */
+#define HID1_ASTME	0x00002000	/* Address bus streaming mode enable */
+#define HID1_ABE	0x00001000	/* Address broadcast enable */
+#define HID1_MPXTT	0x00000400	/* MPX re-map transfer type */
+#define HID1_ATS	0x00000080	/* Atomic status */
+#define HID1_MID_MASK	0x0000000f	/* MID input pins */
+#endif
+
+/* Bit definitions for the DBSR. */
+/*
+ * DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
+ */
+#ifdef CONFIG_BOOKE
+#define DBSR_IDE	0x80000000	/* Imprecise Debug Event */
+#define DBSR_MRR	0x30000000	/* Most Recent Reset */
+#define DBSR_IC		0x08000000	/* Instruction Completion */
+#define DBSR_BT		0x04000000	/* Branch Taken */
+#define DBSR_IRPT	0x02000000	/* Exception Debug Event */
+#define DBSR_TIE	0x01000000	/* Trap Instruction Event */
+#define DBSR_IAC1	0x00800000	/* Instr Address Compare 1 Event */
+#define DBSR_IAC2	0x00400000	/* Instr Address Compare 2 Event */
+#define DBSR_IAC3	0x00200000	/* Instr Address Compare 3 Event */
+#define DBSR_IAC4	0x00100000	/* Instr Address Compare 4 Event */
+#define DBSR_DAC1R	0x00080000	/* Data Addr Compare 1 Read Event */
+#define DBSR_DAC1W	0x00040000	/* Data Addr Compare 1 Write Event */
+#define DBSR_DAC2R	0x00020000	/* Data Addr Compare 2 Read Event */
+#define DBSR_DAC2W	0x00010000	/* Data Addr Compare 2 Write Event */
+#define DBSR_RET	0x00008000	/* Return Debug Event */
+#define DBSR_CIRPT	0x00000040	/* Critical Interrupt Taken Event */
+#define DBSR_CRET	0x00000020	/* Critical Return Debug Event */
+#define DBSR_IAC12ATS	0x00000002	/* Instr Address Compare 1/2 Toggle */
+#define DBSR_IAC34ATS	0x00000001	/* Instr Address Compare 3/4 Toggle */
+#endif
+#ifdef CONFIG_40x
+#define DBSR_IC		0x80000000	/* Instruction Completion */
+#define DBSR_BT		0x40000000	/* Branch taken */
+#define DBSR_IRPT	0x20000000	/* Exception Debug Event */
+#define DBSR_TIE	0x10000000	/* Trap Instruction debug Event */
+#define DBSR_IAC1	0x04000000	/* Instruction Address Compare 1 Event */
+#define DBSR_IAC2	0x02000000	/* Instruction Address Compare 2 Event */
+#define DBSR_IAC3	0x00080000	/* Instruction Address Compare 3 Event */
+#define DBSR_IAC4	0x00040000	/* Instruction Address Compare 4 Event */
+#define DBSR_DAC1R	0x01000000	/* Data Address Compare 1 Read Event */
+#define DBSR_DAC1W	0x00800000	/* Data Address Compare 1 Write Event */
+#define DBSR_DAC2R	0x00400000	/* Data Address Compare 2 Read Event */
+#define DBSR_DAC2W	0x00200000	/* Data Address Compare 2 Write Event */
+#endif
+
+/* Bit definitions related to the ESR. */
+#define ESR_MCI		0x80000000	/* Machine Check - Instruction */
+#define ESR_IMCP	0x80000000	/* Instr. Machine Check - Protection */
+#define ESR_IMCN	0x40000000	/* Instr. Machine Check - Non-config */
+#define ESR_IMCB	0x20000000	/* Instr. Machine Check - Bus error */
+#define ESR_IMCT	0x10000000	/* Instr. Machine Check - Timeout */
+#define ESR_PIL		0x08000000	/* Program Exception - Illegal */
+#define ESR_PPR		0x04000000	/* Program Exception - Privileged */
+#define ESR_PTR		0x02000000	/* Program Exception - Trap */
+#define ESR_FP		0x01000000	/* Floating Point Operation */
+#define ESR_DST		0x00800000	/* Storage Exception - Data miss */
+#define ESR_DIZ		0x00400000	/* Storage Exception - Zone fault */
+#define ESR_ST		0x00800000	/* Store Operation */
+#define ESR_DLK		0x00200000	/* Data Cache Locking */
+#define ESR_ILK		0x00100000	/* Instr. Cache Locking */
+#define ESR_PUO		0x00040000	/* Unimplemented Operation exception */
+#define ESR_BO		0x00020000	/* Byte Ordering */
+#define ESR_SPV		0x00000080	/* Signal Processing operation */
+
+/* Bit definitions related to the DBCR0. */
+#if defined(CONFIG_40x)
+#define DBCR0_EDM	0x80000000	/* External Debug Mode */
+#define DBCR0_IDM	0x40000000	/* Internal Debug Mode */
+#define DBCR0_RST	0x30000000	/* all the bits in the RST field */
+#define DBCR0_RST_SYSTEM 0x30000000	/* System Reset */
+#define DBCR0_RST_CHIP	0x20000000	/* Chip Reset */
+#define DBCR0_RST_CORE	0x10000000	/* Core Reset */
+#define DBCR0_RST_NONE	0x00000000	/* No Reset */
+#define DBCR0_IC	0x08000000	/* Instruction Completion */
+#define DBCR0_ICMP	DBCR0_IC
+#define DBCR0_BT	0x04000000	/* Branch Taken */
+#define DBCR0_BRT	DBCR0_BT
+#define DBCR0_EDE	0x02000000	/* Exception Debug Event */
+#define DBCR0_IRPT	DBCR0_EDE
+#define DBCR0_TDE	0x01000000	/* TRAP Debug Event */
+#define DBCR0_IA1	0x00800000	/* Instr Addr compare 1 enable */
+#define DBCR0_IAC1	DBCR0_IA1
+#define DBCR0_IA2	0x00400000	/* Instr Addr compare 2 enable */
+#define DBCR0_IAC2	DBCR0_IA2
+#define DBCR0_IA12	0x00200000	/* Instr Addr 1-2 range enable */
+#define DBCR0_IA12X	0x00100000	/* Instr Addr 1-2 range eXclusive */
+#define DBCR0_IA3	0x00080000	/* Instr Addr compare 3 enable */
+#define DBCR0_IAC3	DBCR0_IA3
+#define DBCR0_IA4	0x00040000	/* Instr Addr compare 4 enable */
+#define DBCR0_IAC4	DBCR0_IA4
+#define DBCR0_IA34	0x00020000	/* Instr Addr 3-4 range Enable */
+#define DBCR0_IA34X	0x00010000	/* Instr Addr 3-4 range eXclusive */
+#define DBCR0_IA12T	0x00008000	/* Instr Addr 1-2 range Toggle */
+#define DBCR0_IA34T	0x00004000	/* Instr Addr 3-4 range Toggle */
+#define DBCR0_FT	0x00000001	/* Freeze Timers on debug event */
+
+#define dbcr_iac_range(task)	((task)->thread.debug.dbcr0)
+#define DBCR_IAC12I	DBCR0_IA12			/* Range Inclusive */
+#define DBCR_IAC12X	(DBCR0_IA12 | DBCR0_IA12X)	/* Range Exclusive */
+#define DBCR_IAC12MODE	(DBCR0_IA12 | DBCR0_IA12X)	/* IAC 1-2 Mode Bits */
+#define DBCR_IAC34I	DBCR0_IA34			/* Range Inclusive */
+#define DBCR_IAC34X	(DBCR0_IA34 | DBCR0_IA34X)	/* Range Exclusive */
+#define DBCR_IAC34MODE	(DBCR0_IA34 | DBCR0_IA34X)	/* IAC 3-4 Mode Bits */
+
+/* Bit definitions related to the DBCR1. */
+#define DBCR1_DAC1R	0x80000000	/* DAC1 Read Debug Event */
+#define DBCR1_DAC2R	0x40000000	/* DAC2 Read Debug Event */
+#define DBCR1_DAC1W	0x20000000	/* DAC1 Write Debug Event */
+#define DBCR1_DAC2W	0x10000000	/* DAC2 Write Debug Event */
+
+#define dbcr_dac(task)	((task)->thread.debug.dbcr1)
+#define DBCR_DAC1R	DBCR1_DAC1R
+#define DBCR_DAC1W	DBCR1_DAC1W
+#define DBCR_DAC2R	DBCR1_DAC2R
+#define DBCR_DAC2W	DBCR1_DAC2W
+
+/*
+ * Are there any active Debug Events represented in the
+ * Debug Control Registers?
+ */
+#define DBCR0_ACTIVE_EVENTS	(DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \
+				 DBCR0_IAC3 | DBCR0_IAC4)
+#define DBCR1_ACTIVE_EVENTS	(DBCR1_DAC1R | DBCR1_DAC2R | \
+				 DBCR1_DAC1W | DBCR1_DAC2W)
+#define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1)  (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \
+					   ((dbcr1) & DBCR1_ACTIVE_EVENTS))
+
+#elif defined(CONFIG_BOOKE)
+#define DBCR0_EDM	0x80000000	/* External Debug Mode */
+#define DBCR0_IDM	0x40000000	/* Internal Debug Mode */
+#define DBCR0_RST	0x30000000	/* all the bits in the RST field */
+/* DBCR0_RST_* is 44x specific and not followed in fsl booke */
+#define DBCR0_RST_SYSTEM 0x30000000	/* System Reset */
+#define DBCR0_RST_CHIP	0x20000000	/* Chip Reset */
+#define DBCR0_RST_CORE	0x10000000	/* Core Reset */
+#define DBCR0_RST_NONE	0x00000000	/* No Reset */
+#define DBCR0_ICMP	0x08000000	/* Instruction Completion */
+#define DBCR0_IC	DBCR0_ICMP
+#define DBCR0_BRT	0x04000000	/* Branch Taken */
+#define DBCR0_BT	DBCR0_BRT
+#define DBCR0_IRPT	0x02000000	/* Exception Debug Event */
+#define DBCR0_TDE	0x01000000	/* TRAP Debug Event */
+#define DBCR0_TIE	DBCR0_TDE
+#define DBCR0_IAC1	0x00800000	/* Instr Addr compare 1 enable */
+#define DBCR0_IAC2	0x00400000	/* Instr Addr compare 2 enable */
+#define DBCR0_IAC3	0x00200000	/* Instr Addr compare 3 enable */
+#define DBCR0_IAC4	0x00100000	/* Instr Addr compare 4 enable */
+#define DBCR0_DAC1R	0x00080000	/* DAC 1 Read enable */
+#define DBCR0_DAC1W	0x00040000	/* DAC 1 Write enable */
+#define DBCR0_DAC2R	0x00020000	/* DAC 2 Read enable */
+#define DBCR0_DAC2W	0x00010000	/* DAC 2 Write enable */
+#define DBCR0_RET	0x00008000	/* Return Debug Event */
+#define DBCR0_CIRPT	0x00000040	/* Critical Interrupt Taken Event */
+#define DBCR0_CRET	0x00000020	/* Critical Return Debug Event */
+#define DBCR0_FT	0x00000001	/* Freeze Timers on debug event */
+
+#define dbcr_dac(task)	((task)->thread.debug.dbcr0)
+#define DBCR_DAC1R	DBCR0_DAC1R
+#define DBCR_DAC1W	DBCR0_DAC1W
+#define DBCR_DAC2R	DBCR0_DAC2R
+#define DBCR_DAC2W	DBCR0_DAC2W
+
+/* Bit definitions related to the DBCR1. */
+#define DBCR1_IAC1US	0xC0000000	/* Instr Addr Cmp 1 Sup/User   */
+#define DBCR1_IAC1ER	0x30000000	/* Instr Addr Cmp 1 Eff/Real */
+#define DBCR1_IAC1ER_01	0x10000000	/* reserved */
+#define DBCR1_IAC1ER_10	0x20000000	/* Instr Addr Cmp 1 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC1ER_11	0x30000000	/* Instr Addr Cmp 1 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC2US	0x0C000000	/* Instr Addr Cmp 2 Sup/User   */
+#define DBCR1_IAC2ER	0x03000000	/* Instr Addr Cmp 2 Eff/Real */
+#define DBCR1_IAC2ER_01	0x01000000	/* reserved */
+#define DBCR1_IAC2ER_10	0x02000000	/* Instr Addr Cmp 2 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC2ER_11	0x03000000	/* Instr Addr Cmp 2 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC12M	0x00800000	/* Instr Addr 1-2 range enable */
+#define DBCR1_IAC12MX	0x00C00000	/* Instr Addr 1-2 range eXclusive */
+#define DBCR1_IAC12AT	0x00010000	/* Instr Addr 1-2 range Toggle */
+#define DBCR1_IAC3US	0x0000C000	/* Instr Addr Cmp 3 Sup/User   */
+#define DBCR1_IAC3ER	0x00003000	/* Instr Addr Cmp 3 Eff/Real */
+#define DBCR1_IAC3ER_01	0x00001000	/* reserved */
+#define DBCR1_IAC3ER_10	0x00002000	/* Instr Addr Cmp 3 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC3ER_11	0x00003000	/* Instr Addr Cmp 3 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC4US	0x00000C00	/* Instr Addr Cmp 4 Sup/User   */
+#define DBCR1_IAC4ER	0x00000300	/* Instr Addr Cmp 4 Eff/Real */
+#define DBCR1_IAC4ER_01	0x00000100	/* Instr Addr Cmp 4 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC4ER_10	0x00000200	/* Instr Addr Cmp 4 Eff/Real MSR[IS]=0 */
+#define DBCR1_IAC4ER_11	0x00000300	/* Instr Addr Cmp 4 Eff/Real MSR[IS]=1 */
+#define DBCR1_IAC34M	0x00000080	/* Instr Addr 3-4 range enable */
+#define DBCR1_IAC34MX	0x000000C0	/* Instr Addr 3-4 range eXclusive */
+#define DBCR1_IAC34AT	0x00000001	/* Instr Addr 3-4 range Toggle */
+
+#define dbcr_iac_range(task)	((task)->thread.debug.dbcr1)
+#define DBCR_IAC12I	DBCR1_IAC12M	/* Range Inclusive */
+#define DBCR_IAC12X	DBCR1_IAC12MX	/* Range Exclusive */
+#define DBCR_IAC12MODE	DBCR1_IAC12MX	/* IAC 1-2 Mode Bits */
+#define DBCR_IAC34I	DBCR1_IAC34M	/* Range Inclusive */
+#define DBCR_IAC34X	DBCR1_IAC34MX	/* Range Exclusive */
+#define DBCR_IAC34MODE	DBCR1_IAC34MX	/* IAC 3-4 Mode Bits */
+
+/* Bit definitions related to the DBCR2. */
+#define DBCR2_DAC1US	0xC0000000	/* Data Addr Cmp 1 Sup/User   */
+#define DBCR2_DAC1ER	0x30000000	/* Data Addr Cmp 1 Eff/Real */
+#define DBCR2_DAC2US	0x0C000000	/* Data Addr Cmp 2 Sup/User   */
+#define DBCR2_DAC2ER	0x03000000	/* Data Addr Cmp 2 Eff/Real */
+#define DBCR2_DAC12M	0x00800000	/* DAC 1-2 range enable */
+#define DBCR2_DAC12MM	0x00400000	/* DAC 1-2 Mask mode*/
+#define DBCR2_DAC12MX	0x00C00000	/* DAC 1-2 range eXclusive */
+#define DBCR2_DAC12MODE	0x00C00000	/* DAC 1-2 Mode Bits */
+#define DBCR2_DAC12A	0x00200000	/* DAC 1-2 Asynchronous */
+#define DBCR2_DVC1M	0x000C0000	/* Data Value Comp 1 Mode */
+#define DBCR2_DVC1M_SHIFT	18	/* # of bits to shift DBCR2_DVC1M */
+#define DBCR2_DVC2M	0x00030000	/* Data Value Comp 2 Mode */
+#define DBCR2_DVC2M_SHIFT	16	/* # of bits to shift DBCR2_DVC2M */
+#define DBCR2_DVC1BE	0x00000F00	/* Data Value Comp 1 Byte */
+#define DBCR2_DVC1BE_SHIFT	8	/* # of bits to shift DBCR2_DVC1BE */
+#define DBCR2_DVC2BE	0x0000000F	/* Data Value Comp 2 Byte */
+#define DBCR2_DVC2BE_SHIFT	0	/* # of bits to shift DBCR2_DVC2BE */
+
+/*
+ * Are there any active Debug Events represented in the
+ * Debug Control Registers?
+ */
+#define DBCR0_ACTIVE_EVENTS  (DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \
+			      DBCR0_IAC3 | DBCR0_IAC4 | DBCR0_DAC1R | \
+			      DBCR0_DAC1W  | DBCR0_DAC2R | DBCR0_DAC2W)
+#define DBCR1_ACTIVE_EVENTS	0
+
+#define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1)  (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \
+					   ((dbcr1) & DBCR1_ACTIVE_EVENTS))
+#endif /* #elif defined(CONFIG_BOOKE) */
+
+/* Bit definitions related to the TCR. */
+#define TCR_WP(x)	(((x)&0x3)<<30)	/* WDT Period */
+#define TCR_WP_MASK	TCR_WP(3)
+#define WP_2_17		0		/* 2^17 clocks */
+#define WP_2_21		1		/* 2^21 clocks */
+#define WP_2_25		2		/* 2^25 clocks */
+#define WP_2_29		3		/* 2^29 clocks */
+#define TCR_WRC(x)	(((x)&0x3)<<28)	/* WDT Reset Control */
+#define TCR_WRC_MASK	TCR_WRC(3)
+#define WRC_NONE	0		/* No reset will occur */
+#define WRC_CORE	1		/* Core reset will occur */
+#define WRC_CHIP	2		/* Chip reset will occur */
+#define WRC_SYSTEM	3		/* System reset will occur */
+#define TCR_WIE		0x08000000	/* WDT Interrupt Enable */
+#define TCR_PIE		0x04000000	/* PIT Interrupt Enable */
+#define TCR_DIE		TCR_PIE		/* DEC Interrupt Enable */
+#define TCR_FP(x)	(((x)&0x3)<<24)	/* FIT Period */
+#define TCR_FP_MASK	TCR_FP(3)
+#define FP_2_9		0		/* 2^9 clocks */
+#define FP_2_13		1		/* 2^13 clocks */
+#define FP_2_17		2		/* 2^17 clocks */
+#define FP_2_21		3		/* 2^21 clocks */
+#define TCR_FIE		0x00800000	/* FIT Interrupt Enable */
+#define TCR_ARE		0x00400000	/* Auto Reload Enable */
+
+#ifdef CONFIG_E500
+#define TCR_GET_WP(tcr)  ((((tcr) & 0xC0000000) >> 30) | \
+			      (((tcr) & 0x1E0000) >> 15))
+#else
+#define TCR_GET_WP(tcr)  (((tcr) & 0xC0000000) >> 30)
+#endif
+
+/* Bit definitions for the TSR. */
+#define TSR_ENW		0x80000000	/* Enable Next Watchdog */
+#define TSR_WIS		0x40000000	/* WDT Interrupt Status */
+#define TSR_WRS(x)	(((x)&0x3)<<28)	/* WDT Reset Status */
+#define WRS_NONE	0		/* No WDT reset occurred */
+#define WRS_CORE	1		/* WDT forced core reset */
+#define WRS_CHIP	2		/* WDT forced chip reset */
+#define WRS_SYSTEM	3		/* WDT forced system reset */
+#define TSR_PIS		0x08000000	/* PIT Interrupt Status */
+#define TSR_DIS		TSR_PIS		/* DEC Interrupt Status */
+#define TSR_FIS		0x04000000	/* FIT Interrupt Status */
+
+/* Bit definitions for the DCCR. */
+#define DCCR_NOCACHE	0		/* Noncacheable */
+#define DCCR_CACHE	1		/* Cacheable */
+
+/* Bit definitions for DCWR. */
+#define DCWR_COPY	0		/* Copy-back */
+#define DCWR_WRITE	1		/* Write-through */
+
+/* Bit definitions for ICCR. */
+#define ICCR_NOCACHE	0		/* Noncacheable */
+#define ICCR_CACHE	1		/* Cacheable */
+
+/* Bit definitions for L1CSR0. */
+#define L1CSR0_CPE	0x00010000	/* Data Cache Parity Enable */
+#define L1CSR0_CUL	0x00000400	/* Data Cache Unable to Lock */
+#define L1CSR0_CLFC	0x00000100	/* Cache Lock Bits Flash Clear */
+#define L1CSR0_DCFI	0x00000002	/* Data Cache Flash Invalidate */
+#define L1CSR0_CFI	0x00000002	/* Cache Flash Invalidate */
+#define L1CSR0_DCE	0x00000001	/* Data Cache Enable */
+
+/* Bit definitions for L1CSR1. */
+#define L1CSR1_CPE	0x00010000	/* Instruction Cache Parity Enable */
+#define L1CSR1_ICLFR	0x00000100	/* Instr Cache Lock Bits Flash Reset */
+#define L1CSR1_ICFI	0x00000002	/* Instr Cache Flash Invalidate */
+#define L1CSR1_ICE	0x00000001	/* Instr Cache Enable */
+
+/* Bit definitions for L1CSR2. */
+#define L1CSR2_DCWS	0x40000000	/* Data Cache write shadow */
+
+/* Bit definitions for BUCSR. */
+#define BUCSR_STAC_EN	0x01000000	/* Segment Target Address Cache */
+#define BUCSR_LS_EN	0x00400000	/* Link Stack */
+#define BUCSR_BBFI	0x00000200	/* Branch Buffer flash invalidate */
+#define BUCSR_BPEN	0x00000001	/* Branch prediction enable */
+#define BUCSR_INIT	(BUCSR_STAC_EN | BUCSR_LS_EN | BUCSR_BBFI | BUCSR_BPEN)
+
+/* Bit definitions for L2CSR0. */
+#define L2CSR0_L2E	0x80000000	/* L2 Cache Enable */
+#define L2CSR0_L2PE	0x40000000	/* L2 Cache Parity/ECC Enable */
+#define L2CSR0_L2WP	0x1c000000	/* L2 I/D Way Partioning */
+#define L2CSR0_L2CM	0x03000000	/* L2 Cache Coherency Mode */
+#define L2CSR0_L2FI	0x00200000	/* L2 Cache Flash Invalidate */
+#define L2CSR0_L2IO	0x00100000	/* L2 Cache Instruction Only */
+#define L2CSR0_L2DO	0x00010000	/* L2 Cache Data Only */
+#define L2CSR0_L2REP	0x00003000	/* L2 Line Replacement Algo */
+#define L2CSR0_L2FL	0x00000800	/* L2 Cache Flush */
+#define L2CSR0_L2LFC	0x00000400	/* L2 Cache Lock Flash Clear */
+#define L2CSR0_L2LOA	0x00000080	/* L2 Cache Lock Overflow Allocate */
+#define L2CSR0_L2LO	0x00000020	/* L2 Cache Lock Overflow */
+
+/* Bit definitions for SGR. */
+#define SGR_NORMAL	0		/* Speculative fetching allowed. */
+#define SGR_GUARDED	1		/* Speculative fetching disallowed. */
+
+/* Bit definitions for EPCR */
+#define SPRN_EPCR_EXTGS		0x80000000	/* External Input interrupt
+						 * directed to Guest state */
+#define SPRN_EPCR_DTLBGS	0x40000000	/* Data TLB Error interrupt
+						 * directed to guest state */
+#define SPRN_EPCR_ITLBGS	0x20000000	/* Instr. TLB error interrupt
+						 * directed to guest state */
+#define SPRN_EPCR_DSIGS		0x10000000	/* Data Storage interrupt
+						 * directed to guest state */
+#define SPRN_EPCR_ISIGS		0x08000000	/* Instr. Storage interrupt
+						 * directed to guest state */
+#define SPRN_EPCR_DUVD		0x04000000	/* Disable Hypervisor Debug */
+#define SPRN_EPCR_ICM		0x02000000	/* Interrupt computation mode
+						 * (copied to MSR:CM on intr) */
+#define SPRN_EPCR_GICM		0x01000000	/* Guest Interrupt Comp. mode */
+#define SPRN_EPCR_DGTMI		0x00800000	/* Disable TLB Guest Management
+						 * instructions */
+#define SPRN_EPCR_DMIUH		0x00400000	/* Disable MAS Interrupt updates
+						 * for hypervisor */
+
+/* Bit definitions for EPLC/EPSC */
+#define EPC_EPR		0x80000000 /* 1 = user, 0 = kernel */
+#define EPC_EPR_SHIFT	31
+#define EPC_EAS		0x40000000 /* Address Space */
+#define EPC_EAS_SHIFT	30
+#define EPC_EGS		0x20000000 /* 1 = guest, 0 = hypervisor */
+#define EPC_EGS_SHIFT	29
+#define EPC_ELPID	0x00ff0000
+#define EPC_ELPID_SHIFT	16
+#define EPC_EPID	0x00003fff
+#define EPC_EPID_SHIFT	0
+
+/*
+ * The IBM-403 is an even more odd special case, as it is much
+ * older than the IBM-405 series.  We put these down here incase someone
+ * wishes to support these machines again.
+ */
+#ifdef CONFIG_403GCX
+/* Special Purpose Registers (SPRNs)*/
+#define SPRN_TBHU	0x3CC	/* Time Base High User-mode */
+#define SPRN_TBLU	0x3CD	/* Time Base Low User-mode */
+#define SPRN_CDBCR	0x3D7	/* Cache Debug Control Register */
+#define SPRN_TBHI	0x3DC	/* Time Base High */
+#define SPRN_TBLO	0x3DD	/* Time Base Low */
+#define SPRN_DBCR	0x3F2	/* Debug Control Register */
+#define SPRN_PBL1	0x3FC	/* Protection Bound Lower 1 */
+#define SPRN_PBL2	0x3FE	/* Protection Bound Lower 2 */
+#define SPRN_PBU1	0x3FD	/* Protection Bound Upper 1 */
+#define SPRN_PBU2	0x3FF	/* Protection Bound Upper 2 */
+
+
+/* Bit definitions for the DBCR. */
+#define DBCR_EDM	DBCR0_EDM
+#define DBCR_IDM	DBCR0_IDM
+#define DBCR_RST(x)	(((x) & 0x3) << 28)
+#define DBCR_RST_NONE	0
+#define DBCR_RST_CORE	1
+#define DBCR_RST_CHIP	2
+#define DBCR_RST_SYSTEM	3
+#define DBCR_IC		DBCR0_IC	/* Instruction Completion Debug Evnt */
+#define DBCR_BT		DBCR0_BT	/* Branch Taken Debug Event */
+#define DBCR_EDE	DBCR0_EDE	/* Exception Debug Event */
+#define DBCR_TDE	DBCR0_TDE	/* TRAP Debug Event */
+#define DBCR_FER	0x00F80000	/* First Events Remaining Mask */
+#define DBCR_FT		0x00040000	/* Freeze Timers on Debug Event */
+#define DBCR_IA1	0x00020000	/* Instr. Addr. Compare 1 Enable */
+#define DBCR_IA2	0x00010000	/* Instr. Addr. Compare 2 Enable */
+#define DBCR_D1R	0x00008000	/* Data Addr. Compare 1 Read Enable */
+#define DBCR_D1W	0x00004000	/* Data Addr. Compare 1 Write Enable */
+#define DBCR_D1S(x)	(((x) & 0x3) << 12)	/* Data Adrr. Compare 1 Size */
+#define DAC_BYTE	0
+#define DAC_HALF	1
+#define DAC_WORD	2
+#define DAC_QUAD	3
+#define DBCR_D2R	0x00000800	/* Data Addr. Compare 2 Read Enable */
+#define DBCR_D2W	0x00000400	/* Data Addr. Compare 2 Write Enable */
+#define DBCR_D2S(x)	(((x) & 0x3) << 8)	/* Data Addr. Compare 2 Size */
+#define DBCR_SBT	0x00000040	/* Second Branch Taken Debug Event */
+#define DBCR_SED	0x00000020	/* Second Exception Debug Event */
+#define DBCR_STD	0x00000010	/* Second Trap Debug Event */
+#define DBCR_SIA	0x00000008	/* Second IAC Enable */
+#define DBCR_SDA	0x00000004	/* Second DAC Enable */
+#define DBCR_JOI	0x00000002	/* JTAG Serial Outbound Int. Enable */
+#define DBCR_JII	0x00000001	/* JTAG Serial Inbound Int. Enable */
+#endif /* 403GCX */
+
+/* Some 476 specific registers */
+#define SPRN_SSPCR		830
+#define SPRN_USPCR		831
+#define SPRN_ISPCR		829
+#define SPRN_MMUBE0		820
+#define MMUBE0_IBE0_SHIFT	24
+#define MMUBE0_IBE1_SHIFT	16
+#define MMUBE0_IBE2_SHIFT	8
+#define MMUBE0_VBE0		0x00000004
+#define MMUBE0_VBE1		0x00000002
+#define MMUBE0_VBE2		0x00000001
+#define SPRN_MMUBE1		821
+#define MMUBE1_IBE3_SHIFT	24
+#define MMUBE1_IBE4_SHIFT	16
+#define MMUBE1_IBE5_SHIFT	8
+#define MMUBE1_VBE3		0x00000004
+#define MMUBE1_VBE4		0x00000002
+#define MMUBE1_VBE5		0x00000001
+
+#define TMRN_TMCFG0      16	/* Thread Management Configuration Register 0 */
+#define TMRN_TMCFG0_NPRIBITS       0x003f0000 /* Bits of thread priority */
+#define TMRN_TMCFG0_NPRIBITS_SHIFT 16
+#define TMRN_TMCFG0_NATHRD         0x00003f00 /* Number of active threads */
+#define TMRN_TMCFG0_NATHRD_SHIFT   8
+#define TMRN_TMCFG0_NTHRD          0x0000003f /* Number of threads */
+#define TMRN_IMSR0	0x120	/* Initial MSR Register 0 (e6500) */
+#define TMRN_IMSR1	0x121	/* Initial MSR Register 1 (e6500) */
+#define TMRN_INIA0	0x140	/* Next Instruction Address Register 0 */
+#define TMRN_INIA1	0x141	/* Next Instruction Address Register 1 */
+#define SPRN_TENSR	0x1b5	/* Thread Enable Status Register */
+#define SPRN_TENS	0x1b6	/* Thread Enable Set Register */
+#define SPRN_TENC	0x1b7	/* Thread Enable Clear Register */
+
+#define TEN_THREAD(x)	(1 << (x))
+
+#ifndef __ASSEMBLY__
+#define mftmr(rn)	({unsigned long rval; \
+			asm volatile(MFTMR(rn, %0) : "=r" (rval)); rval;})
+#define mttmr(rn, v)	asm volatile(MTTMR(rn, %0) : \
+				     : "r" ((unsigned long)(v)) \
+				     : "memory")
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
new file mode 100644
index 0000000..a21f529
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Contains register definitions for the Freescale Embedded Performance
+ * Monitor.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_REG_FSL_EMB_H__
+#define __ASM_POWERPC_REG_FSL_EMB_H__
+
+#include <linux/stringify.h>
+
+#ifndef __ASSEMBLY__
+/* Performance Monitor Registers */
+#define mfpmr(rn)	({unsigned int rval; \
+			asm volatile("mfpmr %0," __stringify(rn) \
+				     : "=r" (rval)); rval;})
+#define mtpmr(rn, v)	asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
+#endif /* __ASSEMBLY__ */
+
+/* Freescale Book E Performance Monitor APU Registers */
+#define PMRN_PMC0	0x010	/* Performance Monitor Counter 0 */
+#define PMRN_PMC1	0x011	/* Performance Monitor Counter 1 */
+#define PMRN_PMC2	0x012	/* Performance Monitor Counter 2 */
+#define PMRN_PMC3	0x013	/* Performance Monitor Counter 3 */
+#define PMRN_PMC4	0x014	/* Performance Monitor Counter 4 */
+#define PMRN_PMC5	0x015	/* Performance Monitor Counter 5 */
+#define PMRN_PMLCA0	0x090	/* PM Local Control A0 */
+#define PMRN_PMLCA1	0x091	/* PM Local Control A1 */
+#define PMRN_PMLCA2	0x092	/* PM Local Control A2 */
+#define PMRN_PMLCA3	0x093	/* PM Local Control A3 */
+#define PMRN_PMLCA4	0x094	/* PM Local Control A4 */
+#define PMRN_PMLCA5	0x095	/* PM Local Control A5 */
+
+#define PMLCA_FC	0x80000000	/* Freeze Counter */
+#define PMLCA_FCS	0x40000000	/* Freeze in Supervisor */
+#define PMLCA_FCU	0x20000000	/* Freeze in User */
+#define PMLCA_FCM1	0x10000000	/* Freeze when PMM==1 */
+#define PMLCA_FCM0	0x08000000	/* Freeze when PMM==0 */
+#define PMLCA_CE	0x04000000	/* Condition Enable */
+#define PMLCA_FGCS1	0x00000002	/* Freeze in guest state */
+#define PMLCA_FGCS0	0x00000001	/* Freeze in hypervisor state */
+
+#define PMLCA_EVENT_MASK 0x01ff0000	/* Event field */
+#define PMLCA_EVENT_SHIFT	16
+
+#define PMRN_PMLCB0	0x110	/* PM Local Control B0 */
+#define PMRN_PMLCB1	0x111	/* PM Local Control B1 */
+#define PMRN_PMLCB2	0x112	/* PM Local Control B2 */
+#define PMRN_PMLCB3	0x113	/* PM Local Control B3 */
+#define PMRN_PMLCB4	0x114	/* PM Local Control B4 */
+#define PMRN_PMLCB5	0x115	/* PM Local Control B5 */
+
+#define PMLCB_THRESHMUL_MASK	0x0700	/* Threshold Multiple Field */
+#define PMLCB_THRESHMUL_SHIFT	8
+
+#define PMLCB_THRESHOLD_MASK	0x003f	/* Threshold Field */
+#define PMLCB_THRESHOLD_SHIFT	0
+
+#define PMRN_PMGC0	0x190	/* PM Global Control 0 */
+
+#define PMGC0_FAC	0x80000000	/* Freeze all Counters */
+#define PMGC0_PMIE	0x40000000	/* Interrupt Enable */
+#define PMGC0_FCECE	0x20000000	/* Freeze countes on
+					   Enabled Condition or
+					   Event */
+
+#define PMRN_UPMC0	0x000	/* User Performance Monitor Counter 0 */
+#define PMRN_UPMC1	0x001	/* User Performance Monitor Counter 1 */
+#define PMRN_UPMC2	0x002	/* User Performance Monitor Counter 2 */
+#define PMRN_UPMC3	0x003	/* User Performance Monitor Counter 3 */
+#define PMRN_UPMC4	0x004	/* User Performance Monitor Counter 4 */
+#define PMRN_UPMC5	0x005	/* User Performance Monitor Counter 5 */
+#define PMRN_UPMLCA0	0x080	/* User PM Local Control A0 */
+#define PMRN_UPMLCA1	0x081	/* User PM Local Control A1 */
+#define PMRN_UPMLCA2	0x082	/* User PM Local Control A2 */
+#define PMRN_UPMLCA3	0x083	/* User PM Local Control A3 */
+#define PMRN_UPMLCA4	0x084	/* User PM Local Control A4 */
+#define PMRN_UPMLCA5	0x085	/* User PM Local Control A5 */
+#define PMRN_UPMLCB0	0x100	/* User PM Local Control B0 */
+#define PMRN_UPMLCB1	0x101	/* User PM Local Control B1 */
+#define PMRN_UPMLCB2	0x102	/* User PM Local Control B2 */
+#define PMRN_UPMLCB3	0x103	/* User PM Local Control B3 */
+#define PMRN_UPMLCB4	0x104	/* User PM Local Control B4 */
+#define PMRN_UPMLCB5	0x105	/* User PM Local Control B5 */
+#define PMRN_UPMGC0	0x180	/* User PM Global Control 0 */
+
+
+#endif /* __ASM_POWERPC_REG_FSL_EMB_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/rheap.h b/arch/powerpc/include/asm/rheap.h
new file mode 100644
index 0000000..8e83703
--- /dev/null
+++ b/arch/powerpc/include/asm/rheap.h
@@ -0,0 +1,92 @@
+/*
+ * include/asm-ppc/rheap.h
+ *
+ * Header file for the implementation of a remote heap.
+ *
+ * Author: Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __ASM_PPC_RHEAP_H__
+#define __ASM_PPC_RHEAP_H__
+
+#include <linux/list.h>
+
+typedef struct _rh_block {
+	struct list_head list;
+	unsigned long start;
+	int size;
+	const char *owner;
+} rh_block_t;
+
+typedef struct _rh_info {
+	unsigned int alignment;
+	int max_blocks;
+	int empty_slots;
+	rh_block_t *block;
+	struct list_head empty_list;
+	struct list_head free_list;
+	struct list_head taken_list;
+	unsigned int flags;
+} rh_info_t;
+
+#define RHIF_STATIC_INFO	0x1
+#define RHIF_STATIC_BLOCK	0x2
+
+typedef struct _rh_stats {
+	unsigned long start;
+	int size;
+	const char *owner;
+} rh_stats_t;
+
+#define RHGS_FREE	0
+#define RHGS_TAKEN	1
+
+/* Create a remote heap dynamically */
+extern rh_info_t *rh_create(unsigned int alignment);
+
+/* Destroy a remote heap, created by rh_create() */
+extern void rh_destroy(rh_info_t * info);
+
+/* Initialize in place a remote info block */
+extern void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
+		    rh_block_t * block);
+
+/* Attach a free region to manage */
+extern int rh_attach_region(rh_info_t * info, unsigned long start, int size);
+
+/* Detach a free region */
+extern unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size);
+
+/* Allocate the given size from the remote heap (with alignment) */
+extern unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment,
+		const char *owner);
+
+/* Allocate the given size from the remote heap */
+extern unsigned long rh_alloc(rh_info_t * info, int size, const char *owner);
+
+/* Allocate the given size from the given address */
+extern unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size,
+			    const char *owner);
+
+/* Free the allocated area */
+extern int rh_free(rh_info_t * info, unsigned long start);
+
+/* Get stats for debugging purposes */
+extern int rh_get_stats(rh_info_t * info, int what, int max_stats,
+			rh_stats_t * stats);
+
+/* Simple dump of remote heap info */
+extern void rh_dump(rh_info_t * info);
+
+/* Simple dump of remote info block */
+void rh_dump_blk(rh_info_t *info, rh_block_t *blk);
+
+/* Set owner of taken block */
+extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner);
+
+#endif				/* __ASM_PPC_RHEAP_H__ */
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
new file mode 100644
index 0000000..ec800f2
--- /dev/null
+++ b/arch/powerpc/include/asm/rio.h
@@ -0,0 +1,22 @@
+/*
+ * RapidIO architecture support
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ASM_PPC_RIO_H
+#define ASM_PPC_RIO_H
+
+#ifdef CONFIG_FSL_RIO
+extern int fsl_rio_mcheck_exception(struct pt_regs *);
+#else
+static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
+#endif
+
+#endif				/* ASM_PPC_RIO_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
new file mode 100644
index 0000000..71e393c
--- /dev/null
+++ b/arch/powerpc/include/asm/rtas.h
@@ -0,0 +1,475 @@
+#ifndef _POWERPC_RTAS_H
+#define _POWERPC_RTAS_H
+#ifdef __KERNEL__
+
+#include <linux/spinlock.h>
+#include <asm/page.h>
+#include <linux/time.h>
+
+/*
+ * Definitions for talking to the RTAS on CHRP machines.
+ *
+ * Copyright (C) 2001 Peter Bergner
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define RTAS_UNKNOWN_SERVICE (-1)
+#define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
+
+/* Buffer size for ppc_rtas system call. */
+#define RTAS_RMOBUF_MAX (64 * 1024)
+
+/* RTAS return status codes */
+#define RTAS_NOT_SUSPENDABLE	-9004
+#define RTAS_BUSY		-2    /* RTAS Busy */
+#define RTAS_EXTENDED_DELAY_MIN	9900
+#define RTAS_EXTENDED_DELAY_MAX	9905
+
+/*
+ * In general to call RTAS use rtas_token("string") to lookup
+ * an RTAS token for the given string (e.g. "event-scan").
+ * To actually perform the call use
+ *    ret = rtas_call(token, n_in, n_out, ...)
+ * Where n_in is the number of input parameters and
+ *       n_out is the number of output parameters
+ *
+ * If the "string" is invalid on this system, RTAS_UNKNOWN_SERVICE
+ * will be returned as a token.  rtas_call() does look for this
+ * token and error out gracefully so rtas_call(rtas_token("str"), ...)
+ * may be safely used for one-shot calls to RTAS.
+ *
+ */
+
+typedef __be32 rtas_arg_t;
+
+struct rtas_args {
+	__be32 token;
+	__be32 nargs;
+	__be32 nret; 
+	rtas_arg_t args[16];
+	rtas_arg_t *rets;     /* Pointer to return values in args[]. */
+};  
+
+struct rtas_t {
+	unsigned long entry;		/* physical address pointer */
+	unsigned long base;		/* physical address pointer */
+	unsigned long size;
+	arch_spinlock_t lock;
+	struct rtas_args args;
+	struct device_node *dev;	/* virtual address pointer */
+};
+
+struct rtas_suspend_me_data {
+	atomic_t working; /* number of cpus accessing this struct */
+	atomic_t done;
+	int token; /* ibm,suspend-me */
+	atomic_t error;
+	struct completion *complete; /* wait on this until working == 0 */
+};
+
+/* RTAS event classes */
+#define RTAS_INTERNAL_ERROR		0x80000000 /* set bit 0 */
+#define RTAS_EPOW_WARNING		0x40000000 /* set bit 1 */
+#define RTAS_HOTPLUG_EVENTS		0x10000000 /* set bit 3 */
+#define RTAS_IO_EVENTS			0x08000000 /* set bit 4 */
+#define RTAS_EVENT_SCAN_ALL_EVENTS	0xffffffff
+
+/* RTAS event severity */
+#define RTAS_SEVERITY_FATAL		0x5
+#define RTAS_SEVERITY_ERROR		0x4
+#define RTAS_SEVERITY_ERROR_SYNC	0x3
+#define RTAS_SEVERITY_WARNING		0x2
+#define RTAS_SEVERITY_EVENT		0x1
+#define RTAS_SEVERITY_NO_ERROR		0x0
+
+/* RTAS event disposition */
+#define RTAS_DISP_FULLY_RECOVERED	0x0
+#define RTAS_DISP_LIMITED_RECOVERY	0x1
+#define RTAS_DISP_NOT_RECOVERED		0x2
+
+/* RTAS event initiator */
+#define RTAS_INITIATOR_UNKNOWN		0x0
+#define RTAS_INITIATOR_CPU		0x1
+#define RTAS_INITIATOR_PCI		0x2
+#define RTAS_INITIATOR_ISA		0x3
+#define RTAS_INITIATOR_MEMORY		0x4
+#define RTAS_INITIATOR_POWERMGM		0x5
+
+/* RTAS event target */
+#define RTAS_TARGET_UNKNOWN		0x0
+#define RTAS_TARGET_CPU			0x1
+#define RTAS_TARGET_PCI			0x2
+#define RTAS_TARGET_ISA			0x3
+#define RTAS_TARGET_MEMORY		0x4
+#define RTAS_TARGET_POWERMGM		0x5
+
+/* RTAS event type */
+#define RTAS_TYPE_RETRY			0x01
+#define RTAS_TYPE_TCE_ERR		0x02
+#define RTAS_TYPE_INTERN_DEV_FAIL	0x03
+#define RTAS_TYPE_TIMEOUT		0x04
+#define RTAS_TYPE_DATA_PARITY		0x05
+#define RTAS_TYPE_ADDR_PARITY		0x06
+#define RTAS_TYPE_CACHE_PARITY		0x07
+#define RTAS_TYPE_ADDR_INVALID		0x08
+#define RTAS_TYPE_ECC_UNCORR		0x09
+#define RTAS_TYPE_ECC_CORR		0x0a
+#define RTAS_TYPE_EPOW			0x40
+#define RTAS_TYPE_PLATFORM		0xE0
+#define RTAS_TYPE_IO			0xE1
+#define RTAS_TYPE_INFO			0xE2
+#define RTAS_TYPE_DEALLOC		0xE3
+#define RTAS_TYPE_DUMP			0xE4
+/* I don't add PowerMGM events right now, this is a different topic */ 
+#define RTAS_TYPE_PMGM_POWER_SW_ON	0x60
+#define RTAS_TYPE_PMGM_POWER_SW_OFF	0x61
+#define RTAS_TYPE_PMGM_LID_OPEN		0x62
+#define RTAS_TYPE_PMGM_LID_CLOSE	0x63
+#define RTAS_TYPE_PMGM_SLEEP_BTN	0x64
+#define RTAS_TYPE_PMGM_WAKE_BTN		0x65
+#define RTAS_TYPE_PMGM_BATTERY_WARN	0x66
+#define RTAS_TYPE_PMGM_BATTERY_CRIT	0x67
+#define RTAS_TYPE_PMGM_SWITCH_TO_BAT	0x68
+#define RTAS_TYPE_PMGM_SWITCH_TO_AC	0x69
+#define RTAS_TYPE_PMGM_KBD_OR_MOUSE	0x6a
+#define RTAS_TYPE_PMGM_ENCLOS_OPEN	0x6b
+#define RTAS_TYPE_PMGM_ENCLOS_CLOSED	0x6c
+#define RTAS_TYPE_PMGM_RING_INDICATE	0x6d
+#define RTAS_TYPE_PMGM_LAN_ATTENTION	0x6e
+#define RTAS_TYPE_PMGM_TIME_ALARM	0x6f
+#define RTAS_TYPE_PMGM_CONFIG_CHANGE	0x70
+#define RTAS_TYPE_PMGM_SERVICE_PROC	0x71
+/* Platform Resource Reassignment Notification */
+#define RTAS_TYPE_PRRN			0xA0
+
+/* RTAS check-exception vector offset */
+#define RTAS_VECTOR_EXTERNAL_INTERRUPT	0x500
+
+struct rtas_error_log {
+	/* Byte 0 */
+	uint8_t		byte0;			/* Architectural version */
+
+	/* Byte 1 */
+	uint8_t		byte1;
+	/* XXXXXXXX
+	 * XXX		3: Severity level of error
+	 *    XX	2: Degree of recovery
+	 *      X	1: Extended log present?
+	 *       XX	2: Reserved
+	 */
+
+	/* Byte 2 */
+	uint8_t		byte2;
+	/* XXXXXXXX
+	 * XXXX		4: Initiator of event
+	 *     XXXX	4: Target of failed operation
+	 */
+	uint8_t		byte3;			/* General event or error*/
+	__be32		extended_log_length;	/* length in bytes */
+	unsigned char	buffer[1];		/* Start of extended log */
+						/* Variable length.      */
+};
+
+static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog)
+{
+	return (elog->byte1 & 0xE0) >> 5;
+}
+
+static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog)
+{
+	return (elog->byte1 & 0x18) >> 3;
+}
+
+static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog)
+{
+	return (elog->byte1 & 0x04) >> 2;
+}
+
+#define rtas_error_type(x)	((x)->byte3)
+
+static inline
+uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog)
+{
+	return be32_to_cpu(elog->extended_log_length);
+}
+
+#define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG	14
+
+#define RTAS_V6EXT_COMPANY_ID_IBM	(('I' << 24) | ('B' << 16) | ('M' << 8))
+
+/* RTAS general extended event log, Version 6. The extended log starts
+ * from "buffer" field of struct rtas_error_log defined above.
+ */
+struct rtas_ext_event_log_v6 {
+	/* Byte 0 */
+	uint8_t byte0;
+	/* XXXXXXXX
+	 * X		1: Log valid
+	 *  X		1: Unrecoverable error
+	 *   X		1: Recoverable (correctable or successfully retried)
+	 *    X		1: Bypassed unrecoverable error (degraded operation)
+	 *     X	1: Predictive error
+	 *      X	1: "New" log (always 1 for data returned from RTAS)
+	 *       X	1: Big Endian
+	 *        X	1: Reserved
+	 */
+
+	/* Byte 1 */
+	uint8_t byte1;			/* reserved */
+
+	/* Byte 2 */
+	uint8_t byte2;
+	/* XXXXXXXX
+	 * X		1: Set to 1 (indicating log is in PowerPC format)
+	 *  XXX		3: Reserved
+	 *     XXXX	4: Log format used for bytes 12-2047
+	 */
+
+	/* Byte 3 */
+	uint8_t byte3;			/* reserved */
+	/* Byte 4-11 */
+	uint8_t reserved[8];		/* reserved */
+	/* Byte 12-15 */
+	__be32  company_id;		/* Company ID of the company	*/
+					/* that defines the format for	*/
+					/* the vendor specific log type	*/
+	/* Byte 16-end of log */
+	uint8_t vendor_log[1];		/* Start of vendor specific log	*/
+					/* Variable length.		*/
+};
+
+static
+inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log)
+{
+	return ext_log->byte2 & 0x0F;
+}
+
+static
+inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
+{
+	return be32_to_cpu(ext_log->company_id);
+}
+
+/* pSeries event log format */
+
+/* Two bytes ASCII section IDs */
+#define PSERIES_ELOG_SECT_ID_PRIV_HDR		(('P' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_HDR		(('U' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC	(('P' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_EXTENDED_UH	(('E' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FAILING_MTMS	(('M' << 8) | 'T')
+#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC	(('S' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR	(('D' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FW_ERROR		(('S' << 8) | 'W')
+#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID	(('L' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID	(('L' << 8) | 'R')
+#define PSERIES_ELOG_SECT_ID_HMC_ID		(('H' << 8) | 'M')
+#define PSERIES_ELOG_SECT_ID_EPOW		(('E' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_IO_EVENT		(('I' << 8) | 'E')
+#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO	(('M' << 8) | 'I')
+#define PSERIES_ELOG_SECT_ID_CALL_HOME		(('C' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_DEF		(('U' << 8) | 'D')
+#define PSERIES_ELOG_SECT_ID_HOTPLUG		(('H' << 8) | 'P')
+
+/* Vendor specific Platform Event Log Format, Version 6, section header */
+struct pseries_errorlog {
+	__be16 id;			/* 0x00 2-byte ASCII section ID	*/
+	__be16 length;			/* 0x02 Section length in bytes	*/
+	uint8_t version;		/* 0x04 Section version		*/
+	uint8_t subtype;		/* 0x05 Section subtype		*/
+	__be16 creator_component;	/* 0x06 Creator component ID	*/
+	uint8_t data[];			/* 0x08 Start of section data	*/
+};
+
+static
+inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect)
+{
+	return be16_to_cpu(sect->id);
+}
+
+static
+inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
+{
+	return be16_to_cpu(sect->length);
+}
+
+/* RTAS pseries hotplug errorlog section */
+struct pseries_hp_errorlog {
+	u8	resource;
+	u8	action;
+	u8	id_type;
+	u8	reserved;
+	union {
+		__be32	drc_index;
+		__be32	drc_count;
+		struct { __be32 count, index; } ic;
+		char	drc_name[1];
+	} _drc_u;
+};
+
+#define PSERIES_HP_ELOG_RESOURCE_CPU	1
+#define PSERIES_HP_ELOG_RESOURCE_MEM	2
+#define PSERIES_HP_ELOG_RESOURCE_SLOT	3
+#define PSERIES_HP_ELOG_RESOURCE_PHB	4
+
+#define PSERIES_HP_ELOG_ACTION_ADD	1
+#define PSERIES_HP_ELOG_ACTION_REMOVE	2
+#define PSERIES_HP_ELOG_ACTION_READD	3
+
+#define PSERIES_HP_ELOG_ID_DRC_NAME	1
+#define PSERIES_HP_ELOG_ID_DRC_INDEX	2
+#define PSERIES_HP_ELOG_ID_DRC_COUNT	3
+#define PSERIES_HP_ELOG_ID_DRC_IC	4
+
+struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+					      uint16_t section_id);
+
+/*
+ * This can be set by the rtas_flash module so that it can get called
+ * as the absolutely last thing before the kernel terminates.
+ */
+extern void (*rtas_flash_term_hook)(int);
+
+extern struct rtas_t rtas;
+
+extern int rtas_token(const char *service);
+extern int rtas_service_present(const char *service);
+extern int rtas_call(int token, int, int, int *, ...);
+void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
+			int nret, ...);
+extern void __noreturn rtas_restart(char *cmd);
+extern void rtas_power_off(void);
+extern void __noreturn rtas_halt(void);
+extern void rtas_os_term(char *str);
+extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
+extern int rtas_get_power_level(int powerdomain, int *level);
+extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+extern bool rtas_indicator_present(int token, int *maxindex);
+extern int rtas_set_indicator(int indicator, int index, int new_value);
+extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
+extern void rtas_progress(char *s, unsigned short hex);
+extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
+extern int rtas_ibm_suspend_me(u64 handle);
+
+struct rtc_time;
+extern time64_t rtas_get_boot_time(void);
+extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
+extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
+
+extern unsigned int rtas_busy_delay_time(int status);
+extern unsigned int rtas_busy_delay(int status);
+
+extern int early_init_dt_scan_rtas(unsigned long node,
+		const char *uname, int depth, void *data);
+
+extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+
+#ifdef CONFIG_PPC_PSERIES
+extern time64_t last_rtas_event;
+extern int clobbering_unread_rtas_event(void);
+extern int pseries_devicetree_update(s32 scope);
+extern void post_mobility_fixup(void);
+#else
+static inline int clobbering_unread_rtas_event(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PPC_RTAS_DAEMON
+extern void rtas_cancel_event_scan(void);
+#else
+static inline void rtas_cancel_event_scan(void) { }
+#endif
+
+/* Error types logged.  */
+#define ERR_FLAG_ALREADY_LOGGED	0x0
+#define ERR_FLAG_BOOT		0x1 	/* log was pulled from NVRAM on boot */
+#define ERR_TYPE_RTAS_LOG	0x2	/* from rtas event-scan */
+#define ERR_TYPE_KERNEL_PANIC	0x4	/* from die()/panic() */
+#define ERR_TYPE_KERNEL_PANIC_GZ 0x8	/* ditto, compressed */
+
+/* All the types and not flags */
+#define ERR_TYPE_MASK \
+	(ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC | ERR_TYPE_KERNEL_PANIC_GZ)
+
+#define RTAS_DEBUG KERN_DEBUG "RTAS: "
+ 
+#define RTAS_ERROR_LOG_MAX 2048
+
+/*
+ * Return the firmware-specified size of the error log buffer
+ *  for all rtas calls that require an error buffer argument.
+ *  This includes 'check-exception' and 'rtas-last-error'.
+ */
+extern int rtas_get_error_log_max(void);
+
+/* Event Scan Parameters */
+#define EVENT_SCAN_ALL_EVENTS	0xf0000000
+#define SURVEILLANCE_TOKEN	9000
+#define LOG_NUMBER		64		/* must be a power of two */
+#define LOG_NUMBER_MASK		(LOG_NUMBER-1)
+
+/* Some RTAS ops require a data buffer and that buffer must be < 4G.
+ * Rather than having a memory allocator, just use this buffer
+ * (get the lock first), make the RTAS call.  Copy the data instead
+ * of holding the buffer for long.
+ */
+
+#define RTAS_DATA_BUF_SIZE 4096
+extern spinlock_t rtas_data_buf_lock;
+extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
+
+/* RMO buffer reserved for user-space RTAS use */
+extern unsigned long rtas_rmo_buf;
+
+#define GLOBAL_INTERRUPT_QUEUE 9005
+
+/**
+ * rtas_config_addr - Format a busno, devfn and reg for RTAS.
+ * @busno: The bus number.
+ * @devfn: The device and function number as encoded by PCI_DEVFN().
+ * @reg: The register number.
+ *
+ * This function encodes the given busno, devfn and register number as
+ * required for RTAS calls that take a "config_addr" parameter.
+ * See PAPR requirement 7.3.4-1 for more info.
+ */
+static inline u32 rtas_config_addr(int busno, int devfn, int reg)
+{
+	return ((reg & 0xf00) << 20) | ((busno & 0xff) << 16) |
+			(devfn << 8) | (reg & 0xff);
+}
+
+extern void rtas_give_timebase(void);
+extern void rtas_take_timebase(void);
+
+#ifdef CONFIG_PPC_RTAS
+static inline int page_is_rtas_user_buf(unsigned long pfn)
+{
+	unsigned long paddr = (pfn << PAGE_SHIFT);
+	if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
+		return 1;
+	return 0;
+}
+
+/* Not the best place to put pSeries_coalesce_init, will be fixed when we
+ * move some of the rtas suspend-me stuff to pseries */
+extern void pSeries_coalesce_init(void);
+void rtas_initialize(void);
+#else
+static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
+static inline void pSeries_coalesce_init(void) { }
+static inline void rtas_initialize(void) { };
+#endif
+
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h
new file mode 100644
index 0000000..cfb390e
--- /dev/null
+++ b/arch/powerpc/include/asm/runlatch.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_RUNLATCH_H
+#define _ASM_POWERPC_RUNLATCH_H
+
+#ifdef CONFIG_PPC64
+
+extern void __ppc64_runlatch_on(void);
+extern void __ppc64_runlatch_off(void);
+
+/*
+ * We manually hard enable-disable, this is called
+ * in the idle loop and we don't want to mess up
+ * with soft-disable/enable & interrupt replay.
+ */
+#define ppc64_runlatch_off()					\
+	do {							\
+		if (cpu_has_feature(CPU_FTR_CTRL) &&		\
+		    test_thread_local_flags(_TLF_RUNLATCH)) {	\
+			unsigned long msr = mfmsr();		\
+			__hard_irq_disable();			\
+			__ppc64_runlatch_off();			\
+			if (msr & MSR_EE)			\
+				__hard_irq_enable();		\
+		}      						\
+	} while (0)
+
+#define ppc64_runlatch_on()					\
+	do {							\
+		if (cpu_has_feature(CPU_FTR_CTRL) &&		\
+		    !test_thread_local_flags(_TLF_RUNLATCH)) {	\
+			unsigned long msr = mfmsr();		\
+			__hard_irq_disable();			\
+			__ppc64_runlatch_on();			\
+			if (msr & MSR_EE)			\
+				__hard_irq_enable();		\
+		}      						\
+	} while (0)
+#else
+#define ppc64_runlatch_on()
+#define ppc64_runlatch_off()
+#endif /* CONFIG_PPC64 */
+
+#endif /* _ASM_POWERPC_RUNLATCH_H */
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h
new file mode 100644
index 0000000..f5cde45
--- /dev/null
+++ b/arch/powerpc/include/asm/scom.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
+ *                <benh@kernel.crashing.org>
+ *     and        David Gibson, IBM Corporation.
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_SCOM_H
+#define _ASM_POWERPC_SCOM_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_SCOM
+
+/*
+ * The SCOM bus is a sideband bus used for accessing various internal
+ * registers of the processor or the chipset. The implementation details
+ * differ between processors and platforms, and the access method as
+ * well.
+ *
+ * This API allows to "map" ranges of SCOM register numbers associated
+ * with a given SCOM controller. The later must be represented by a
+ * device node, though some implementations might support NULL if there
+ * is no possible ambiguity
+ *
+ * Then, scom_read/scom_write can be used to accesses registers inside
+ * that range. The argument passed is a register number relative to
+ * the beginning of the range mapped.
+ */
+
+typedef void *scom_map_t;
+
+/* Value for an invalid SCOM map */
+#define SCOM_MAP_INVALID	(NULL)
+
+/* The scom_controller data structure is what the platform passes
+ * to the core code in scom_init, it provides the actual implementation
+ * of all the SCOM functions
+ */
+struct scom_controller {
+	scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
+	void (*unmap)(scom_map_t map);
+
+	int (*read)(scom_map_t map, u64 reg, u64 *value);
+	int (*write)(scom_map_t map, u64 reg, u64 value);
+};
+
+extern const struct scom_controller *scom_controller;
+
+/**
+ * scom_init - Initialize the SCOM backend, called by the platform
+ * @controller: The platform SCOM controller
+ */
+static inline void scom_init(const struct scom_controller *controller)
+{
+	scom_controller = controller;
+}
+
+/**
+ * scom_map_ok - Test is a SCOM mapping is successful
+ * @map: The result of scom_map to test
+ */
+static inline int scom_map_ok(scom_map_t map)
+{
+	return map != SCOM_MAP_INVALID;
+}
+
+/**
+ * scom_map - Map a block of SCOM registers
+ * @ctrl_dev: Device node of the SCOM controller
+ *            some implementations allow NULL here
+ * @reg: first SCOM register to map
+ * @count: Number of SCOM registers to map
+ */
+
+static inline scom_map_t scom_map(struct device_node *ctrl_dev,
+				  u64 reg, u64 count)
+{
+	return scom_controller->map(ctrl_dev, reg, count);
+}
+
+/**
+ * scom_find_parent - Find the SCOM controller for a device
+ * @dev: OF node of the device
+ *
+ * This is not meant for general usage, but in combination with
+ * scom_map() allows to map registers not represented by the
+ * device own scom-reg property. Useful for applying HW workarounds
+ * on things not properly represented in the device-tree for example.
+ */
+struct device_node *scom_find_parent(struct device_node *dev);
+
+
+/**
+ * scom_map_device - Map a device's block of SCOM registers
+ * @dev: OF node of the device
+ * @index: Register bank index (index in "scom-reg" property)
+ *
+ * This function will use the device-tree binding for SCOM which
+ * is to follow "scom-parent" properties until it finds a node with
+ * a "scom-controller" property to find the controller. It will then
+ * use the "scom-reg" property which is made of reg/count pairs,
+ * each of them having a size defined by the controller's #scom-cells
+ * property
+ */
+extern scom_map_t scom_map_device(struct device_node *dev, int index);
+
+
+/**
+ * scom_unmap - Unmap a block of SCOM registers
+ * @map: Result of scom_map is to be unmapped
+ */
+static inline void scom_unmap(scom_map_t map)
+{
+	if (scom_map_ok(map))
+		scom_controller->unmap(map);
+}
+
+/**
+ * scom_read - Read a SCOM register
+ * @map: Result of scom_map
+ * @reg: Register index within that map
+ * @value: Updated with the value read
+ *
+ * Returns 0 (success) or a negative error code
+ */
+static inline int scom_read(scom_map_t map, u64 reg, u64 *value)
+{
+	int rc;
+
+	rc = scom_controller->read(map, reg, value);
+	if (rc)
+		*value = 0xfffffffffffffffful;
+	return rc;
+}
+
+/**
+ * scom_write - Write to a SCOM register
+ * @map: Result of scom_map
+ * @reg: Register index within that map
+ * @value: Value to write
+ *
+ * Returns 0 (success) or a negative error code
+ */
+static inline int scom_write(scom_map_t map, u64 reg, u64 value)
+{
+	return scom_controller->write(map, reg, value);
+}
+
+
+#endif /* CONFIG_PPC_SCOM */
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SCOM_H */
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
new file mode 100644
index 0000000..51209f6
--- /dev/null
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SECCOMP_H
+#define _ASM_POWERPC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#include <asm-generic/seccomp.h>
+
+#endif	/* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
new file mode 100644
index 0000000..e335a8f
--- /dev/null
+++ b/arch/powerpc/include/asm/sections.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SECTIONS_H
+#define _ASM_POWERPC_SECTIONS_H
+#ifdef __KERNEL__
+
+#include <linux/elf.h>
+#include <linux/uaccess.h>
+#include <asm-generic/sections.h>
+
+extern char __head_end[];
+
+#ifdef __powerpc64__
+
+extern char __start_interrupts[];
+extern char __end_interrupts[];
+
+extern char __prom_init_toc_start[];
+extern char __prom_init_toc_end[];
+
+static inline int in_kernel_text(unsigned long addr)
+{
+	if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
+		return 1;
+
+	return 0;
+}
+
+static inline unsigned long kernel_toc_addr(void)
+{
+	/* Defined by the linker, see vmlinux.lds.S */
+	extern unsigned long __toc_start;
+
+	/*
+	 * The TOC register (r2) points 32kB into the TOC, so that 64kB of
+	 * the TOC can be addressed using a single machine instruction.
+	 */
+	return (unsigned long)(&__toc_start) + 0x8000UL;
+}
+
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+							unsigned long end)
+{
+	unsigned long real_start, real_end;
+	real_start = __start_interrupts - _stext;
+	real_end = __end_interrupts - _stext;
+
+	return start < (unsigned long)__va(real_end) &&
+		(unsigned long)__va(real_start) < end;
+}
+
+static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
+{
+	return start < (unsigned long)__init_end &&
+		(unsigned long)_stext < end;
+}
+
+static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_KVM_GUEST
+	extern char kvm_tmp[];
+	return start < (unsigned long)kvm_tmp &&
+		(unsigned long)&kvm_tmp[1024 * 1024] < end;
+#else
+	return 0;
+#endif
+}
+
+#ifdef PPC64_ELF_ABI_v1
+
+#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1
+
+#undef dereference_function_descriptor
+static inline void *dereference_function_descriptor(void *ptr)
+{
+	struct ppc64_opd_entry *desc = ptr;
+	void *p;
+
+	if (!probe_kernel_address(&desc->funcaddr, p))
+		ptr = p;
+	return ptr;
+}
+
+#undef dereference_kernel_function_descriptor
+static inline void *dereference_kernel_function_descriptor(void *ptr)
+{
+	if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd)
+		return ptr;
+
+	return dereference_function_descriptor(ptr);
+}
+#endif /* PPC64_ELF_ABI_v1 */
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_SECTIONS_H */
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
new file mode 100644
index 0000000..759597b
--- /dev/null
+++ b/arch/powerpc/include/asm/security_features.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Security related feature bit definitions.
+ *
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
+#define _ASM_POWERPC_SECURITY_FEATURES_H
+
+
+extern unsigned long powerpc_security_features;
+extern bool rfi_flush;
+
+/* These are bit flags */
+enum stf_barrier_type {
+	STF_BARRIER_NONE	= 0x1,
+	STF_BARRIER_FALLBACK	= 0x2,
+	STF_BARRIER_EIEIO	= 0x4,
+	STF_BARRIER_SYNC_ORI	= 0x8,
+};
+
+void setup_stf_barrier(void);
+void do_stf_barrier_fixups(enum stf_barrier_type types);
+void setup_count_cache_flush(void);
+
+static inline void security_ftr_set(unsigned long feature)
+{
+	powerpc_security_features |= feature;
+}
+
+static inline void security_ftr_clear(unsigned long feature)
+{
+	powerpc_security_features &= ~feature;
+}
+
+static inline bool security_ftr_enabled(unsigned long feature)
+{
+	return !!(powerpc_security_features & feature);
+}
+
+
+// Features indicating support for Spectre/Meltdown mitigations
+
+// The L1-D cache can be flushed with ori r30,r30,0
+#define SEC_FTR_L1D_FLUSH_ORI30		0x0000000000000001ull
+
+// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
+#define SEC_FTR_L1D_FLUSH_TRIG2		0x0000000000000002ull
+
+// ori r31,r31,0 acts as a speculation barrier
+#define SEC_FTR_SPEC_BAR_ORI31		0x0000000000000004ull
+
+// Speculation past bctr is disabled
+#define SEC_FTR_BCCTRL_SERIALISED	0x0000000000000008ull
+
+// Entries in L1-D are private to a SMT thread
+#define SEC_FTR_L1D_THREAD_PRIV		0x0000000000000010ull
+
+// Indirect branch prediction cache disabled
+#define SEC_FTR_COUNT_CACHE_DISABLED	0x0000000000000020ull
+
+// bcctr 2,0,0 triggers a hardware assisted count cache flush
+#define SEC_FTR_BCCTR_FLUSH_ASSIST	0x0000000000000800ull
+
+
+// Features indicating need for Spectre/Meltdown mitigations
+
+// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
+#define SEC_FTR_L1D_FLUSH_HV		0x0000000000000040ull
+
+// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
+#define SEC_FTR_L1D_FLUSH_PR		0x0000000000000080ull
+
+// A speculation barrier should be used for bounds checks (Spectre variant 1)
+#define SEC_FTR_BNDS_CHK_SPEC_BAR	0x0000000000000100ull
+
+// Firmware configuration indicates user favours security over performance
+#define SEC_FTR_FAVOUR_SECURITY		0x0000000000000200ull
+
+// Software required to flush count cache on context switch
+#define SEC_FTR_FLUSH_COUNT_CACHE	0x0000000000000400ull
+
+
+// Features enabled by default
+#define SEC_FTR_DEFAULT \
+	(SEC_FTR_L1D_FLUSH_HV | \
+	 SEC_FTR_L1D_FLUSH_PR | \
+	 SEC_FTR_BNDS_CHK_SPEC_BAR | \
+	 SEC_FTR_FAVOUR_SECURITY)
+
+#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/serial.h b/arch/powerpc/include/asm/serial.h
new file mode 100644
index 0000000..3e8589b
--- /dev/null
+++ b/arch/powerpc/include/asm/serial.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_SERIAL_H
+#define _ASM_POWERPC_SERIAL_H
+
+/*
+ * Serial ports are not listed here, because they are discovered
+ * through the device tree.
+ */
+
+/* Default baud base if not found in device-tree */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#ifdef CONFIG_PPC_UDBG_16550
+extern void find_legacy_serial_ports(void);
+#else
+#define find_legacy_serial_ports()	do { } while (0)
+#endif
+
+#endif /* _PPC64_SERIAL_H */
diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
new file mode 100644
index 0000000..279d03a
--- /dev/null
+++ b/arch/powerpc/include/asm/setjmp.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright © 2008 Michael Neuling IBM Corporation
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _ASM_POWERPC_SETJMP_H
+#define _ASM_POWERPC_SETJMP_H
+
+#define JMP_BUF_LEN    23
+
+extern long setjmp(long *);
+extern void longjmp(long *, long);
+
+#endif /* _ASM_POWERPC_SETJMP_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
new file mode 100644
index 0000000..1fffbba
--- /dev/null
+++ b/arch/powerpc/include/asm/setup.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SETUP_H
+#define _ASM_POWERPC_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+#ifndef __ASSEMBLY__
+extern void ppc_printk_progress(char *s, unsigned short hex);
+
+extern unsigned int rtas_data;
+extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
+extern unsigned long klimit;
+extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+
+struct device_node;
+extern void note_scsi_host(struct device_node *, void *);
+
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x)	((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+void check_for_initrd(void);
+void mem_topology_setup(void);
+void initmem_init(void);
+void setup_panic(void);
+#define ARCH_PANIC_TIMEOUT 180
+
+#ifdef CONFIG_PPC_PSERIES
+extern void pseries_enable_reloc_on_exc(void);
+extern void pseries_disable_reloc_on_exc(void);
+extern void pseries_big_endian_exceptions(void);
+extern void pseries_little_endian_exceptions(void);
+#else
+static inline void pseries_enable_reloc_on_exc(void) {}
+static inline void pseries_disable_reloc_on_exc(void) {}
+static inline void pseries_big_endian_exceptions(void) {}
+static inline void pseries_little_endian_exceptions(void) {}
+#endif /* CONFIG_PPC_PSERIES */
+
+void rfi_flush_enable(bool enable);
+
+/* These are bit flags */
+enum l1d_flush_type {
+	L1D_FLUSH_NONE		= 0x1,
+	L1D_FLUSH_FALLBACK	= 0x2,
+	L1D_FLUSH_ORI		= 0x4,
+	L1D_FLUSH_MTTRIG	= 0x8,
+};
+
+void setup_rfi_flush(enum l1d_flush_type, bool enable);
+void do_rfi_flush_fixups(enum l1d_flush_type types);
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void setup_barrier_nospec(void);
+#else
+static inline void setup_barrier_nospec(void) { };
+#endif
+void do_barrier_nospec_fixups(bool enable);
+extern bool barrier_nospec_enabled;
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+#else
+static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif	/* _ASM_POWERPC_SETUP_H */
+
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
new file mode 100644
index 0000000..d89beab
--- /dev/null
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -0,0 +1,377 @@
+/* Machine-dependent software floating-point definitions.  PPC version.
+   Copyright (C) 1997 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Library General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Library General Public License for more details.
+
+   You should have received a copy of the GNU Library General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, write to the Free Software Foundation, Inc.,
+   59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+   Actually, this is a PPC (32bit) version, written based on the
+   i386, sparc, and sparc64 versions, by me,
+   Peter Maydell (pmaydell@chiark.greenend.org.uk).
+   Comments are by and large also mine, although they may be inaccurate.
+
+   In picking out asm fragments I've gone with the lowest common
+   denominator, which also happens to be the hardware I have :->
+   That is, a SPARC without hardware multiply and divide.
+ */
+
+/* basic word size definitions */
+#define _FP_W_TYPE_SIZE		32
+#define _FP_W_TYPE		unsigned int
+#define _FP_WS_TYPE		signed int
+#define _FP_I_TYPE		int
+
+#define __ll_B			((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t)		((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t)	((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* You can optionally code some things like addition in asm. For
+ * example, i386 defines __FP_FRAC_ADD_2 as asm. If you don't
+ * then you get a fragment of C code [if you change an #ifdef 0
+ * in op-2.h] or a call to add_ssaaaa (see below).
+ * Good places to look for asm fragments to use are gcc and glibc.
+ * gcc's longlong.h is useful.
+ */
+
+/* We need to know how to multiply and divide. If the host word size
+ * is >= 2*fracbits you can use FP_MUL_MEAT_n_imm(t,R,X,Y) which
+ * codes the multiply with whatever gcc does to 'a * b'.
+ * _FP_MUL_MEAT_n_wide(t,R,X,Y,f) is used when you have an asm
+ * function that can multiply two 1W values and get a 2W result.
+ * Otherwise you're stuck with _FP_MUL_MEAT_n_hard(t,R,X,Y) which
+ * does bitshifting to avoid overflow.
+ * For division there is FP_DIV_MEAT_n_imm(t,R,X,Y,f) for word size
+ * >= 2*fracbits, where f is either _FP_DIV_HELP_imm or
+ * _FP_DIV_HELP_ldiv (see op-1.h).
+ * _FP_DIV_MEAT_udiv() is if you have asm to do 2W/1W => (1W, 1W).
+ * [GCC and glibc have longlong.h which has the asm macro udiv_qrnnd
+ * to do this.]
+ * In general, 'n' is the number of words required to hold the type,
+ * and 't' is either S, D or Q for single/double/quad.
+ *           -- PMM
+ */
+/* Example: SPARC64:
+ * #define _FP_MUL_MEAT_S(R,X,Y)	_FP_MUL_MEAT_1_imm(S,R,X,Y)
+ * #define _FP_MUL_MEAT_D(R,X,Y)	_FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm)
+ * #define _FP_MUL_MEAT_Q(R,X,Y)	_FP_MUL_MEAT_2_wide(Q,R,X,Y,umul_ppmm)
+ *
+ * #define _FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
+ * #define _FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_1_udiv(D,R,X,Y)
+ * #define _FP_DIV_MEAT_Q(R,X,Y)	_FP_DIV_MEAT_2_udiv_64(Q,R,X,Y)
+ *
+ * Example: i386:
+ * #define _FP_MUL_MEAT_S(R,X,Y)   _FP_MUL_MEAT_1_wide(S,R,X,Y,_i386_mul_32_64)
+ * #define _FP_MUL_MEAT_D(R,X,Y)   _FP_MUL_MEAT_2_wide(D,R,X,Y,_i386_mul_32_64)
+ *
+ * #define _FP_DIV_MEAT_S(R,X,Y)   _FP_DIV_MEAT_1_udiv(S,R,X,Y,_i386_div_64_32)
+ * #define _FP_DIV_MEAT_D(R,X,Y)   _FP_DIV_MEAT_2_udiv_64(D,R,X,Y)
+ */
+
+#define _FP_MUL_MEAT_S(R,X,Y)   _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y)   _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_2_udiv(D,R,X,Y)
+
+/* These macros define what NaN looks like. They're supposed to expand to
+ * a comma-separated set of 32bit unsigned ints that encode NaN.
+ */
+#define _FP_NANFRAC_S		((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D		((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q		((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_S		0
+#define _FP_NANSIGN_D		0
+#define _FP_NANSIGN_Q		0
+
+#define _FP_KEEPNANFRACP 1
+
+#ifdef FP_EX_BOOKE_E500_SPE
+#define FP_EX_INEXACT		(1 << 21)
+#define FP_EX_INVALID		(1 << 20)
+#define FP_EX_DIVZERO		(1 << 19)
+#define FP_EX_UNDERFLOW		(1 << 18)
+#define FP_EX_OVERFLOW		(1 << 17)
+#define FP_INHIBIT_RESULTS	0
+
+#define __FPU_FPSCR	(current->thread.spefscr)
+#define __FPU_ENABLED_EXC		\
+({					\
+	(__FPU_FPSCR >> 2) & 0x1f;	\
+})
+#else
+/* Exception flags.  We use the bit positions of the appropriate bits
+   in the FPSCR, which also correspond to the FE_* bits.  This makes
+   everything easier ;-).  */
+#define FP_EX_INVALID         (1 << (31 - 2))
+#define FP_EX_INVALID_SNAN	EFLAG_VXSNAN
+#define FP_EX_INVALID_ISI	EFLAG_VXISI
+#define FP_EX_INVALID_IDI	EFLAG_VXIDI
+#define FP_EX_INVALID_ZDZ	EFLAG_VXZDZ
+#define FP_EX_INVALID_IMZ	EFLAG_VXIMZ
+#define FP_EX_OVERFLOW        (1 << (31 - 3))
+#define FP_EX_UNDERFLOW       (1 << (31 - 4))
+#define FP_EX_DIVZERO         (1 << (31 - 5))
+#define FP_EX_INEXACT         (1 << (31 - 6))
+
+#define __FPU_FPSCR	(current->thread.fp_state.fpscr)
+
+/* We only actually write to the destination register
+ * if exceptions signalled (if any) will not trap.
+ */
+#define __FPU_ENABLED_EXC \
+({						\
+	(__FPU_FPSCR >> 3) & 0x1f;	\
+})
+
+#endif
+
+/*
+ * If one NaN is signaling and the other is not,
+ * we choose that one, otherwise we choose X.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)			\
+  do {								\
+    if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)		\
+	&& !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs))	\
+      {								\
+	R##_s = X##_s;						\
+	_FP_FRAC_COPY_##wc(R,X);				\
+      }								\
+    else							\
+      {								\
+	R##_s = Y##_s;						\
+	_FP_FRAC_COPY_##wc(R,Y);				\
+      }								\
+    R##_c = FP_CLS_NAN;						\
+  } while (0)
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#define __FPU_TRAP_P(bits) \
+	((__FPU_ENABLED_EXC & (bits)) != 0)
+
+#define __FP_PACK_S(val,X)			\
+({  int __exc = _FP_PACK_CANONICAL(S,1,X);	\
+    if(!__exc || !__FPU_TRAP_P(__exc))		\
+        _FP_PACK_RAW_1_P(S,val,X);		\
+    __exc;					\
+})
+
+#define __FP_PACK_D(val,X)			\
+   do {									\
+	_FP_PACK_CANONICAL(D, 2, X);					\
+	if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS))	\
+		_FP_PACK_RAW_2_P(D, val, X);				\
+   } while (0)
+
+#define __FP_PACK_DS(val,X)							\
+   do {										\
+	   FP_DECL_S(__X);							\
+	   FP_CONV(S, D, 1, 2, __X, X);						\
+	   _FP_PACK_CANONICAL(S, 1, __X);					\
+	   if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) {	\
+		   _FP_UNPACK_CANONICAL(S, 1, __X);				\
+		   FP_CONV(D, S, 2, 1, X, __X);					\
+		   _FP_PACK_CANONICAL(D, 2, X);					\
+		   if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS))	\
+		   _FP_PACK_RAW_2_P(D, val, X);					\
+	   }									\
+   } while (0)
+
+/* Obtain the current rounding mode. */
+#define FP_ROUNDMODE			\
+({					\
+	__FPU_FPSCR & 0x3;		\
+})
+
+/* the asm fragments go here: all these are taken from glibc-2.0.5's
+ * stdlib/longlong.h
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* add_ssaaaa is used in op-2.h and should be equivalent to
+ * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al))
+ * add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ * high_addend_2, low_addend_2) adds two UWtype integers, composed by
+ * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+ * respectively.  The result is placed in HIGH_SUM and LOW_SUM.  Overflow
+ * (i.e. carry out) is not stored anywhere, and is lost.
+ */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl)				\
+  do {									\
+    if (__builtin_constant_p (bh) && (bh) == 0)				\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"		\
+	     : "=r" ((USItype)(sh)),					\
+	       "=&r" ((USItype)(sl))					\
+	     : "%r" ((USItype)(ah)),					\
+	       "%r" ((USItype)(al)),					\
+	       "rI" ((USItype)(bl)));					\
+    else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0)		\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"		\
+	     : "=r" ((USItype)(sh)),					\
+	       "=&r" ((USItype)(sl))					\
+	     : "%r" ((USItype)(ah)),					\
+	       "%r" ((USItype)(al)),					\
+	       "rI" ((USItype)(bl)));					\
+    else								\
+      __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"		\
+	     : "=r" ((USItype)(sh)),					\
+	       "=&r" ((USItype)(sl))					\
+	     : "%r" ((USItype)(ah)),					\
+	       "r" ((USItype)(bh)),					\
+	       "%r" ((USItype)(al)),					\
+	       "rI" ((USItype)(bl)));					\
+  } while (0)
+
+/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to
+ * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al))
+ * sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+ * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+ * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+ * LOW_SUBTRAHEND_2 respectively.  The result is placed in HIGH_DIFFERENCE
+ * and LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
+ * and is lost.
+ */
+#define sub_ddmmss(sh, sl, ah, al, bh, bl)				\
+  do {									\
+    if (__builtin_constant_p (ah) && (ah) == 0)				\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"	\
+	       : "=r" ((USItype)(sh)),					\
+		 "=&r" ((USItype)(sl))					\
+	       : "r" ((USItype)(bh)),					\
+		 "rI" ((USItype)(al)),					\
+		 "r" ((USItype)(bl)));					\
+    else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"	\
+	       : "=r" ((USItype)(sh)),					\
+		 "=&r" ((USItype)(sl))					\
+	       : "r" ((USItype)(bh)),					\
+		 "rI" ((USItype)(al)),					\
+		 "r" ((USItype)(bl)));					\
+    else if (__builtin_constant_p (bh) && (bh) == 0)			\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"		\
+	       : "=r" ((USItype)(sh)),					\
+		 "=&r" ((USItype)(sl))					\
+	       : "r" ((USItype)(ah)),					\
+		 "rI" ((USItype)(al)),					\
+		 "r" ((USItype)(bl)));					\
+    else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"		\
+	       : "=r" ((USItype)(sh)),					\
+		 "=&r" ((USItype)(sl))					\
+	       : "r" ((USItype)(ah)),					\
+		 "rI" ((USItype)(al)),					\
+		 "r" ((USItype)(bl)));					\
+    else								\
+      __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"	\
+	       : "=r" ((USItype)(sh)),					\
+		 "=&r" ((USItype)(sl))					\
+	       : "r" ((USItype)(ah)),					\
+		 "r" ((USItype)(bh)),					\
+		 "rI" ((USItype)(al)),					\
+		 "r" ((USItype)(bl)));					\
+  } while (0)
+
+/* asm fragments for mul and div */
+
+/* umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
+ * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ * word product in HIGH_PROD and LOW_PROD.
+ */
+#define umul_ppmm(ph, pl, m0, m1)					\
+  do {									\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mulhwu %0,%1,%2"						\
+	     : "=r" ((USItype)(ph))					\
+	     : "%r" (__m0),						\
+               "r" (__m1));						\
+    (pl) = __m0 * __m1;							\
+  } while (0)
+
+/* udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ * denominator) divides a UDWtype, composed by the UWtype integers
+ * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+ * in QUOTIENT and the remainder in REMAINDER.  HIGH_NUMERATOR must be less
+ * than DENOMINATOR for correct operation.  If, in addition, the most
+ * significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+ * UDIV_NEEDS_NORMALIZATION is defined to 1.
+ */
+#define udiv_qrnnd(q, r, n1, n0, d)					\
+  do {									\
+    UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;			\
+    __d1 = __ll_highpart (d);						\
+    __d0 = __ll_lowpart (d);						\
+									\
+    __r1 = (n1) % __d1;							\
+    __q1 = (n1) / __d1;							\
+    __m = (UWtype) __q1 * __d0;						\
+    __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
+    if (__r1 < __m)							\
+      {									\
+	__q1--, __r1 += (d);						\
+	if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */	\
+	  if (__r1 < __m)						\
+	    __q1--, __r1 += (d);					\
+      }									\
+    __r1 -= __m;							\
+									\
+    __r0 = __r1 % __d1;							\
+    __q0 = __r1 / __d1;							\
+    __m = (UWtype) __q0 * __d0;						\
+    __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
+    if (__r0 < __m)							\
+      {									\
+	__q0--, __r0 += (d);						\
+	if (__r0 >= (d))						\
+	  if (__r0 < __m)						\
+	    __q0--, __r0 += (d);					\
+      }									\
+    __r0 -= __m;							\
+									\
+    (q) = (UWtype) __q1 * __ll_B | __q0;				\
+    (r) = __r0;								\
+  } while (0)
+
+#define UDIV_NEEDS_NORMALIZATION 1
+
+#define abort()								\
+	return 0
+
+#ifdef __BIG_ENDIAN
+#define __BYTE_ORDER __BIG_ENDIAN
+#else
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+/* Exception flags. */
+#define EFLAG_INVALID		(1 << (31 - 2))
+#define EFLAG_OVERFLOW		(1 << (31 - 3))
+#define EFLAG_UNDERFLOW		(1 << (31 - 4))
+#define EFLAG_DIVZERO		(1 << (31 - 5))
+#define EFLAG_INEXACT		(1 << (31 - 6))
+
+#define EFLAG_VXSNAN		(1 << (31 - 7))
+#define EFLAG_VXISI		(1 << (31 - 8))
+#define EFLAG_VXIDI		(1 << (31 - 9))
+#define EFLAG_VXZDZ		(1 << (31 - 10))
+#define EFLAG_VXIMZ		(1 << (31 - 11))
+#define EFLAG_VXVC		(1 << (31 - 12))
+#define EFLAG_VXSOFT		(1 << (31 - 21))
+#define EFLAG_VXSQRT		(1 << (31 - 22))
+#define EFLAG_VXCVI		(1 << (31 - 23))
diff --git a/arch/powerpc/include/asm/shmparam.h b/arch/powerpc/include/asm/shmparam.h
new file mode 100644
index 0000000..bc09688
--- /dev/null
+++ b/arch/powerpc/include/asm/shmparam.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SHMPARAM_H
+#define _ASM_POWERPC_SHMPARAM_H
+
+#define	SHMLBA PAGE_SIZE		 /* attach addr a multiple of this */
+
+#endif	/* _ASM_POWERPC_SHMPARAM_H */
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
new file mode 100644
index 0000000..0803ca8
--- /dev/null
+++ b/arch/powerpc/include/asm/signal.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SIGNAL_H
+#define _ASM_POWERPC_SIGNAL_H
+
+#define __ARCH_HAS_SA_RESTORER
+#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
new file mode 100644
index 0000000..e40406c
--- /dev/null
+++ b/arch/powerpc/include/asm/slice.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SLICE_H
+#define _ASM_POWERPC_SLICE_H
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/slice.h>
+#elif defined(CONFIG_PPC64)
+#include <asm/nohash/64/slice.h>
+#elif defined(CONFIG_PPC_MMU_NOHASH)
+#include <asm/nohash/32/slice.h>
+#endif
+
+#ifdef CONFIG_PPC_MM_SLICES
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
+unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+				      unsigned long flags, unsigned int psize,
+				      int topdown);
+
+unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
+
+void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+			   unsigned long len, unsigned int psize);
+
+void slice_init_new_context_exec(struct mm_struct *mm);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_PPC_MM_SLICES */
+
+#endif /* _ASM_POWERPC_SLICE_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
new file mode 100644
index 0000000..95b66a0
--- /dev/null
+++ b/arch/powerpc/include/asm/smp.h
@@ -0,0 +1,234 @@
+/* 
+ * smp.h: PowerPC-specific SMP code.
+ *
+ * Original was a copy of sparc smp.h.  Now heavily modified
+ * for PPC.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_SMP_H
+#define _ASM_POWERPC_SMP_H
+#ifdef __KERNEL__
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/irqreturn.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+#include <asm/percpu.h>
+
+extern int boot_cpuid;
+extern int spinning_secondaries;
+extern u32 *cpu_to_phys_id;
+
+extern void cpu_die(void);
+extern int cpu_to_chip_id(int cpu);
+
+#ifdef CONFIG_SMP
+
+struct smp_ops_t {
+	void  (*message_pass)(int cpu, int msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+	void  (*cause_ipi)(int cpu);
+#endif
+	int   (*cause_nmi_ipi)(int cpu);
+	void  (*probe)(void);
+	int   (*kick_cpu)(int nr);
+	int   (*prepare_cpu)(int nr);
+	void  (*setup_cpu)(int nr);
+	void  (*bringup_done)(void);
+	void  (*take_timebase)(void);
+	void  (*give_timebase)(void);
+	int   (*cpu_disable)(void);
+	void  (*cpu_die)(unsigned int nr);
+	int   (*cpu_bootable)(unsigned int nr);
+};
+
+extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
+extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
+extern void smp_send_debugger_break(void);
+extern void start_secondary_resume(void);
+extern void smp_generic_give_timebase(void);
+extern void smp_generic_take_timebase(void);
+
+DECLARE_PER_CPU(unsigned int, cpu_pvr);
+
+#ifdef CONFIG_HOTPLUG_CPU
+int generic_cpu_disable(void);
+void generic_cpu_die(unsigned int cpu);
+void generic_set_cpu_dead(unsigned int cpu);
+void generic_set_cpu_up(unsigned int cpu);
+int generic_check_cpu_restart(unsigned int cpu);
+int is_cpu_dead(unsigned int cpu);
+#else
+#define generic_set_cpu_up(i)	do { } while (0)
+#endif
+
+#ifdef CONFIG_PPC64
+#define raw_smp_processor_id()	(local_paca->paca_index)
+#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
+#else
+/* 32-bit */
+extern int smp_hw_index[];
+
+#define raw_smp_processor_id()	(current_thread_info()->cpu)
+#define hard_smp_processor_id() 	(smp_hw_index[smp_processor_id()])
+
+static inline int get_hard_smp_processor_id(int cpu)
+{
+	return smp_hw_index[cpu];
+}
+
+static inline void set_hard_smp_processor_id(int cpu, int phys)
+{
+	smp_hw_index[cpu] = phys;
+}
+#endif
+
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+	return per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+	return per_cpu(cpu_core_map, cpu);
+}
+
+static inline struct cpumask *cpu_l2_cache_mask(int cpu)
+{
+	return per_cpu(cpu_l2_cache_map, cpu);
+}
+
+extern int cpu_to_core_id(int cpu);
+
+/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
+ *
+ * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
+ * in /proc/interrupts will be wrong!!! --Troy */
+#define PPC_MSG_CALL_FUNCTION	0
+#define PPC_MSG_RESCHEDULE	1
+#define PPC_MSG_TICK_BROADCAST	2
+#define PPC_MSG_NMI_IPI		3
+
+/* This is only used by the powernv kernel */
+#define PPC_MSG_RM_HOST_ACTION	4
+
+#define NMI_IPI_ALL_OTHERS		-2
+
+#ifdef CONFIG_NMI_IPI
+extern int smp_handle_nmi_ipi(struct pt_regs *regs);
+#else
+static inline int smp_handle_nmi_ipi(struct pt_regs *regs) { return 0; }
+#endif
+
+/* for irq controllers that have dedicated ipis per message (4) */
+extern int smp_request_message_ipi(int virq, int message);
+extern const char *smp_ipi_name[];
+
+/* for irq controllers with only a single ipi */
+extern void smp_muxed_ipi_message_pass(int cpu, int msg);
+extern void smp_muxed_ipi_set_message(int cpu, int msg);
+extern irqreturn_t smp_ipi_demux(void);
+extern irqreturn_t smp_ipi_demux_relaxed(void);
+
+void smp_init_pSeries(void);
+void smp_init_cell(void);
+void smp_setup_cpu_maps(void);
+
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+
+#else
+/* for UP */
+#define hard_smp_processor_id()		get_hard_smp_processor_id(0)
+#define smp_setup_cpu_maps()
+static inline void inhibit_secondary_onlining(void) {}
+static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+	return cpumask_of(cpu);
+}
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC64
+static inline int get_hard_smp_processor_id(int cpu)
+{
+	return paca_ptrs[cpu]->hw_cpu_id;
+}
+
+static inline void set_hard_smp_processor_id(int cpu, int phys)
+{
+	paca_ptrs[cpu]->hw_cpu_id = phys;
+}
+#else
+/* 32-bit */
+#ifndef CONFIG_SMP
+extern int boot_cpuid_phys;
+static inline int get_hard_smp_processor_id(int cpu)
+{
+	return boot_cpuid_phys;
+}
+
+static inline void set_hard_smp_processor_id(int cpu, int phys)
+{
+	boot_cpuid_phys = phys;
+}
+#endif /* !CONFIG_SMP */
+#endif /* !CONFIG_PPC64 */
+
+#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
+extern void smp_release_cpus(void);
+#else
+static inline void smp_release_cpus(void) { };
+#endif
+
+extern int smt_enabled_at_boot;
+
+extern void smp_mpic_probe(void);
+extern void smp_mpic_setup_cpu(int cpu);
+extern int smp_generic_kick_cpu(int nr);
+extern int smp_generic_cpu_bootable(unsigned int nr);
+
+
+extern void smp_generic_give_timebase(void);
+extern void smp_generic_take_timebase(void);
+
+extern struct smp_ops_t *smp_ops;
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/* Definitions relative to the secondary CPU spin loop
+ * and entry point. Not all of them exist on both 32 and
+ * 64-bit but defining them all here doesn't harm
+ */
+extern void generic_secondary_smp_init(void);
+extern void generic_secondary_thread_init(void);
+extern unsigned long __secondary_hold_spinloop;
+extern unsigned long __secondary_hold_acknowledge;
+extern char __secondary_hold;
+extern unsigned int booting_thread_hwid;
+
+extern void __early_start(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SMP_H) */
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
new file mode 100644
index 0000000..8dff086
--- /dev/null
+++ b/arch/powerpc/include/asm/smu.h
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SMU_H
+#define _SMU_H
+
+/*
+ * Definitions for talking to the SMU chip in newer G5 PowerMacs
+ */
+#ifdef __KERNEL__
+#include <linux/list.h>
+#endif
+#include <linux/types.h>
+
+/*
+ * Known SMU commands
+ *
+ * Most of what is below comes from looking at the Open Firmware driver,
+ * though this is still incomplete and could use better documentation here
+ * or there...
+ */
+
+
+/*
+ * Partition info commands
+ *
+ * These commands are used to retrieve the sdb-partition-XX datas from
+ * the SMU. The length is always 2. First byte is the subcommand code
+ * and second byte is the partition ID.
+ *
+ * The reply is 6 bytes:
+ *
+ *  - 0..1 : partition address
+ *  - 2    : a byte containing the partition ID
+ *  - 3    : length (maybe other bits are rest of header ?)
+ *
+ * The data must then be obtained with calls to another command:
+ * SMU_CMD_MISC_ee_GET_DATABLOCK_REC (described below).
+ */
+#define SMU_CMD_PARTITION_COMMAND		0x3e
+#define   SMU_CMD_PARTITION_LATEST		0x01
+#define   SMU_CMD_PARTITION_BASE		0x02
+#define   SMU_CMD_PARTITION_UPDATE		0x03
+
+
+/*
+ * Fan control
+ *
+ * This is a "mux" for fan control commands. The command seem to
+ * act differently based on the number of arguments. With 1 byte
+ * of argument, this seem to be queries for fans status, setpoint,
+ * etc..., while with 0xe arguments, we will set the fans speeds.
+ *
+ * Queries (1 byte arg):
+ * ---------------------
+ *
+ * arg=0x01: read RPM fans status
+ * arg=0x02: read RPM fans setpoint
+ * arg=0x11: read PWM fans status
+ * arg=0x12: read PWM fans setpoint
+ *
+ * the "status" queries return the current speed while the "setpoint" ones
+ * return the programmed/target speed. It _seems_ that the result is a bit
+ * mask in the first byte of active/available fans, followed by 6 words (16
+ * bits) containing the requested speed.
+ *
+ * Setpoint (14 bytes arg):
+ * ------------------------
+ *
+ * first arg byte is 0 for RPM fans and 0x10 for PWM. Second arg byte is the
+ * mask of fans affected by the command. Followed by 6 words containing the
+ * setpoint value for selected fans in the mask (or 0 if mask value is 0)
+ */
+#define SMU_CMD_FAN_COMMAND			0x4a
+
+
+/*
+ * Battery access
+ *
+ * Same command number as the PMU, could it be same syntax ?
+ */
+#define SMU_CMD_BATTERY_COMMAND			0x6f
+#define   SMU_CMD_GET_BATTERY_INFO		0x00
+
+/*
+ * Real time clock control
+ *
+ * This is a "mux", first data byte contains the "sub" command.
+ * The "RTC" part of the SMU controls the date, time, powerup
+ * timer, but also a PRAM
+ *
+ * Dates are in BCD format on 7 bytes:
+ * [sec] [min] [hour] [weekday] [month day] [month] [year]
+ * with month being 1 based and year minus 100
+ */
+#define SMU_CMD_RTC_COMMAND			0x8e
+#define   SMU_CMD_RTC_SET_PWRUP_TIMER		0x00 /* i: 7 bytes date */
+#define   SMU_CMD_RTC_GET_PWRUP_TIMER		0x01 /* o: 7 bytes date */
+#define   SMU_CMD_RTC_STOP_PWRUP_TIMER		0x02
+#define   SMU_CMD_RTC_SET_PRAM_BYTE_ACC		0x20 /* i: 1 byte (address?) */
+#define   SMU_CMD_RTC_SET_PRAM_AUTOINC		0x21 /* i: 1 byte (data?) */
+#define   SMU_CMD_RTC_SET_PRAM_LO_BYTES 	0x22 /* i: 10 bytes */
+#define   SMU_CMD_RTC_SET_PRAM_HI_BYTES 	0x23 /* i: 10 bytes */
+#define   SMU_CMD_RTC_GET_PRAM_BYTE		0x28 /* i: 1 bytes (address?) */
+#define   SMU_CMD_RTC_GET_PRAM_LO_BYTES 	0x29 /* o: 10 bytes */
+#define   SMU_CMD_RTC_GET_PRAM_HI_BYTES 	0x2a /* o: 10 bytes */
+#define	  SMU_CMD_RTC_SET_DATETIME		0x80 /* i: 7 bytes date */
+#define   SMU_CMD_RTC_GET_DATETIME		0x81 /* o: 7 bytes date */
+
+ /*
+  * i2c commands
+  *
+  * To issue an i2c command, first is to send a parameter block to the
+  * the SMU. This is a command of type 0x9a with 9 bytes of header
+  * eventually followed by data for a write:
+  *
+  * 0: bus number (from device-tree usually, SMU has lots of busses !)
+  * 1: transfer type/format (see below)
+  * 2: device address. For combined and combined4 type transfers, this
+  *    is the "write" version of the address (bit 0x01 cleared)
+  * 3: subaddress length (0..3)
+  * 4: subaddress byte 0 (or only byte for subaddress length 1)
+  * 5: subaddress byte 1
+  * 6: subaddress byte 2
+  * 7: combined address (device address for combined mode data phase)
+  * 8: data length
+  *
+  * The transfer types are the same good old Apple ones it seems,
+  * that is:
+  *   - 0x00: Simple transfer
+  *   - 0x01: Subaddress transfer (addr write + data tx, no restart)
+  *   - 0x02: Combined transfer (addr write + restart + data tx)
+  *
+  * This is then followed by actual data for a write.
+  *
+  * At this point, the OF driver seems to have a limitation on transfer
+  * sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know
+  * whether this is just an OF limit due to some temporary buffer size
+  * or if this is an SMU imposed limit. This driver has the same limitation
+  * for now as I use a 0x10 bytes temporary buffer as well
+  *
+  * Once that is completed, a response is expected from the SMU. This is
+  * obtained via a command of type 0x9a with a length of 1 byte containing
+  * 0 as the data byte. OF also fills the rest of the data buffer with 0xff's
+  * though I can't tell yet if this is actually necessary. Once this command
+  * is complete, at this point, all I can tell is what OF does. OF tests
+  * byte 0 of the reply:
+  *   - on read, 0xfe or 0xfc : bus is busy, wait (see below) or nak ?
+  *   - on read, 0x00 or 0x01 : reply is in buffer (after the byte 0)
+  *   - on write, < 0 -> failure (immediate exit)
+  *   - else, OF just exists (without error, weird)
+  *
+  * So on read, there is this wait-for-busy thing when getting a 0xfc or
+  * 0xfe result. OF does a loop of up to 64 retries, waiting 20ms and
+  * doing the above again until either the retries expire or the result
+  * is no longer 0xfe or 0xfc
+  *
+  * The Darwin I2C driver is less subtle though. On any non-success status
+  * from the response command, it waits 5ms and tries again up to 20 times,
+  * it doesn't differentiate between fatal errors or "busy" status.
+  *
+  * This driver provides an asynchronous paramblock based i2c command
+  * interface to be used either directly by low level code or by a higher
+  * level driver interfacing to the linux i2c layer. The current
+  * implementation of this relies on working timers & timer interrupts
+  * though, so be careful of calling context for now. This may be "fixed"
+  * in the future by adding a polling facility.
+  */
+#define SMU_CMD_I2C_COMMAND			0x9a
+          /* transfer types */
+#define   SMU_I2C_TRANSFER_SIMPLE	0x00
+#define   SMU_I2C_TRANSFER_STDSUB	0x01
+#define   SMU_I2C_TRANSFER_COMBINED	0x02
+
+/*
+ * Power supply control
+ *
+ * The "sub" command is an ASCII string in the data, the
+ * data length is that of the string.
+ *
+ * The VSLEW command can be used to get or set the voltage slewing.
+ *  - length 5 (only "VSLEW") : it returns "DONE" and 3 bytes of
+ *    reply at data offset 6, 7 and 8.
+ *  - length 8 ("VSLEWxyz") has 3 additional bytes appended, and is
+ *    used to set the voltage slewing point. The SMU replies with "DONE"
+ * I yet have to figure out their exact meaning of those 3 bytes in
+ * both cases. They seem to be:
+ *  x = processor mask
+ *  y = op. point index
+ *  z = processor freq. step index
+ * I haven't yet deciphered result codes
+ *
+ */
+#define SMU_CMD_POWER_COMMAND			0xaa
+#define   SMU_CMD_POWER_RESTART		       	"RESTART"
+#define   SMU_CMD_POWER_SHUTDOWN		"SHUTDOWN"
+#define   SMU_CMD_POWER_VOLTAGE_SLEW		"VSLEW"
+
+/*
+ * Read ADC sensors
+ *
+ * This command takes one byte of parameter: the sensor ID (or "reg"
+ * value in the device-tree) and returns a 16 bits value
+ */
+#define SMU_CMD_READ_ADC			0xd8
+
+
+/* Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ *
+ * Parameters:
+ *   1: subcommand
+ */
+#define SMU_CMD_MISC_df_COMMAND			0xdf
+
+/*
+ * Sets "system ready" status
+ *
+ * I did not yet understand how it exactly works or what it does.
+ *
+ * Guessing from OF code, 0x02 activates the display backlight. Apple uses/used
+ * the same codebase for all OF versions. On PowerBooks, this command would
+ * enable the backlight. For the G5s, it only activates the front LED. However,
+ * don't take this for granted.
+ *
+ * Parameters:
+ *   2: status [0x00, 0x01 or 0x02]
+ */
+#define   SMU_CMD_MISC_df_SET_DISPLAY_LIT	0x02
+
+/*
+ * Sets mode of power switch.
+ *
+ * What this actually does is not yet known. Maybe it enables some interrupt.
+ *
+ * Parameters:
+ *   2: enable power switch? [0x00 or 0x01]
+ *   3 (optional): enable nmi? [0x00 or 0x01]
+ *
+ * Returns:
+ *   If parameter 2 is 0x00 and parameter 3 is not specified, returns whether
+ *   NMI is enabled. Otherwise unknown.
+ */
+#define   SMU_CMD_MISC_df_NMI_OPTION		0x04
+
+/* Sets LED dimm offset.
+ *
+ * The front LED dimms itself during sleep. Its brightness (or, well, the PWM
+ * frequency) depends on current time. Therefore, the SMU needs to know the
+ * timezone.
+ *
+ * Parameters:
+ *   2-8: unknown (BCD coding)
+ */
+#define   SMU_CMD_MISC_df_DIMM_OFFSET		0x99
+
+
+/*
+ * Version info commands
+ *
+ * Parameters:
+ *   1 (optional): Specifies version part to retrieve
+ *
+ * Returns:
+ *   Version value
+ */
+#define SMU_CMD_VERSION_COMMAND			0xea
+#define   SMU_VERSION_RUNNING			0x00
+#define   SMU_VERSION_BASE			0x01
+#define   SMU_VERSION_UPDATE			0x02
+
+
+/*
+ * Switches
+ *
+ * These are switches whose status seems to be known to the SMU.
+ *
+ * Parameters:
+ *   none
+ *
+ * Result:
+ *   Switch bits (ORed, see below)
+ */
+#define SMU_CMD_SWITCHES			0xdc
+
+/* Switches bits */
+#define SMU_SWITCH_CASE_CLOSED			0x01
+#define SMU_SWITCH_AC_POWER			0x04
+#define SMU_SWITCH_POWER_SWITCH			0x08
+
+
+/*
+ * Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ *
+ * SMU_CMD_MISC_ee_GET_DATABLOCK_REC is used, among others, to
+ * transfer blocks of data from the SMU. So far, I've decrypted it's
+ * usage to retrieve partition data. In order to do that, you have to
+ * break your transfer in "chunks" since that command cannot transfer
+ * more than a chunk at a time. The chunk size used by OF is 0xe bytes,
+ * but it seems that the darwin driver will let you do 0x1e bytes if
+ * your "PMU" version is >= 0x30. You can get the "PMU" version apparently
+ * either in the last 16 bits of property "smu-version-pmu" or as the 16
+ * bytes at offset 1 of "smu-version-info"
+ *
+ * For each chunk, the command takes 7 bytes of arguments:
+ *  byte 0: subcommand code (0x02)
+ *  byte 1: 0x04 (always, I don't know what it means, maybe the address
+ *                space to use or some other nicety. It's hard coded in OF)
+ *  byte 2..5: SMU address of the chunk (big endian 32 bits)
+ *  byte 6: size to transfer (up to max chunk size)
+ *
+ * The data is returned directly
+ */
+#define SMU_CMD_MISC_ee_COMMAND			0xee
+#define   SMU_CMD_MISC_ee_GET_DATABLOCK_REC	0x02
+
+/* Retrieves currently used watts.
+ *
+ * Parameters:
+ *   1: 0x03 (Meaning unknown)
+ */
+#define   SMU_CMD_MISC_ee_GET_WATTS		0x03
+
+#define   SMU_CMD_MISC_ee_LEDS_CTRL		0x04 /* i: 00 (00,01) [00] */
+#define   SMU_CMD_MISC_ee_GET_DATA		0x05 /* i: 00 , o: ?? */
+
+
+/*
+ * Power related commands
+ *
+ * Parameters:
+ *   1: subcommand
+ */
+#define SMU_CMD_POWER_EVENTS_COMMAND		0x8f
+
+/* SMU_POWER_EVENTS subcommands */
+enum {
+	SMU_PWR_GET_POWERUP_EVENTS      = 0x00,
+	SMU_PWR_SET_POWERUP_EVENTS      = 0x01,
+	SMU_PWR_CLR_POWERUP_EVENTS      = 0x02,
+	SMU_PWR_GET_WAKEUP_EVENTS       = 0x03,
+	SMU_PWR_SET_WAKEUP_EVENTS       = 0x04,
+	SMU_PWR_CLR_WAKEUP_EVENTS       = 0x05,
+
+	/*
+	 * Get last shutdown cause
+	 *
+	 * Returns:
+	 *   1 byte (signed char): Last shutdown cause. Exact meaning unknown.
+	 */
+	SMU_PWR_LAST_SHUTDOWN_CAUSE	= 0x07,
+
+	/*
+	 * Sets or gets server ID. Meaning or use is unknown.
+	 *
+	 * Parameters:
+	 *   2 (optional): Set server ID (1 byte)
+	 *
+	 * Returns:
+	 *   1 byte (server ID?)
+	 */
+	SMU_PWR_SERVER_ID		= 0x08,
+};
+
+/* Power events wakeup bits */
+enum {
+	SMU_PWR_WAKEUP_KEY              = 0x01, /* Wake on key press */
+	SMU_PWR_WAKEUP_AC_INSERT        = 0x02, /* Wake on AC adapter plug */
+	SMU_PWR_WAKEUP_AC_CHANGE        = 0x04,
+	SMU_PWR_WAKEUP_LID_OPEN         = 0x08,
+	SMU_PWR_WAKEUP_RING             = 0x10,
+};
+
+
+/*
+ * - Kernel side interface -
+ */
+
+#ifdef __KERNEL__
+
+/*
+ * Asynchronous SMU commands
+ *
+ * Fill up this structure and submit it via smu_queue_command(),
+ * and get notified by the optional done() callback, or because
+ * status becomes != 1
+ */
+
+struct smu_cmd;
+
+struct smu_cmd
+{
+	/* public */
+	u8			cmd;		/* command */
+	int			data_len;	/* data len */
+	int			reply_len;	/* reply len */
+	void			*data_buf;	/* data buffer */
+	void			*reply_buf;	/* reply buffer */
+	int			status;		/* command status */
+	void			(*done)(struct smu_cmd *cmd, void *misc);
+	void			*misc;
+
+	/* private */
+	struct list_head	link;
+};
+
+/*
+ * Queues an SMU command, all fields have to be initialized
+ */
+extern int smu_queue_cmd(struct smu_cmd *cmd);
+
+/*
+ * Simple command wrapper. This structure embeds a small buffer
+ * to ease sending simple SMU commands from the stack
+ */
+struct smu_simple_cmd
+{
+	struct smu_cmd	cmd;
+	u8	       	buffer[16];
+};
+
+/*
+ * Queues a simple command. All fields will be initialized by that
+ * function
+ */
+extern int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
+			    unsigned int data_len,
+			    void (*done)(struct smu_cmd *cmd, void *misc),
+			    void *misc,
+			    ...);
+
+/*
+ * Completion helper. Pass it to smu_queue_simple or as 'done'
+ * member to smu_queue_cmd, it will call complete() on the struct
+ * completion passed in the "misc" argument
+ */
+extern void smu_done_complete(struct smu_cmd *cmd, void *misc);
+
+/*
+ * Synchronous helpers. Will spin-wait for completion of a command
+ */
+extern void smu_spinwait_cmd(struct smu_cmd *cmd);
+
+static inline void smu_spinwait_simple(struct smu_simple_cmd *scmd)
+{
+	smu_spinwait_cmd(&scmd->cmd);
+}
+
+/*
+ * Poll routine to call if blocked with irqs off
+ */
+extern void smu_poll(void);
+
+
+/*
+ * Init routine, presence check....
+ */
+extern int smu_init(void);
+extern int smu_present(void);
+struct platform_device;
+extern struct platform_device *smu_get_ofdev(void);
+
+
+/*
+ * Common command wrappers
+ */
+extern void smu_shutdown(void);
+extern void smu_restart(void);
+struct rtc_time;
+extern int smu_get_rtc_time(struct rtc_time *time, int spinwait);
+extern int smu_set_rtc_time(struct rtc_time *time, int spinwait);
+
+/*
+ * Kernel asynchronous i2c interface
+ */
+
+#define SMU_I2C_READ_MAX	0x1d
+#define SMU_I2C_WRITE_MAX	0x15
+
+/* SMU i2c header, exactly matches i2c header on wire */
+struct smu_i2c_param
+{
+	u8	bus;		/* SMU bus ID (from device tree) */
+	u8	type;		/* i2c transfer type */
+	u8	devaddr;	/* device address (includes direction) */
+	u8	sublen;		/* subaddress length */
+	u8	subaddr[3];	/* subaddress */
+	u8	caddr;		/* combined address, filled by SMU driver */
+	u8	datalen;	/* length of transfer */
+	u8	data[SMU_I2C_READ_MAX];	/* data */
+};
+
+struct smu_i2c_cmd
+{
+	/* public */
+	struct smu_i2c_param	info;
+	void			(*done)(struct smu_i2c_cmd *cmd, void *misc);
+	void			*misc;
+	int			status; /* 1 = pending, 0 = ok, <0 = fail */
+
+	/* private */
+	struct smu_cmd		scmd;
+	int			read;
+	int			stage;
+	int			retries;
+	u8			pdata[32];
+	struct list_head	link;
+};
+
+/*
+ * Call this to queue an i2c command to the SMU. You must fill info,
+ * including info.data for a write, done and misc.
+ * For now, no polling interface is provided so you have to use completion
+ * callback.
+ */
+extern int smu_queue_i2c(struct smu_i2c_cmd *cmd);
+
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * - SMU "sdb" partitions informations -
+ */
+
+
+/*
+ * Partition header format
+ */
+struct smu_sdbp_header {
+	__u8	id;
+	__u8	len;
+	__u8	version;
+	__u8	flags;
+};
+
+
+ /*
+ * demangle 16 and 32 bits integer in some SMU partitions
+ * (currently, afaik, this concerns only the FVT partition
+ * (0x12)
+ */
+#define SMU_U16_MIX(x)	le16_to_cpu(x)
+#define SMU_U32_MIX(x)  ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8))
+
+
+/* This is the definition of the SMU sdb-partition-0x12 table (called
+ * CPU F/V/T operating points in Darwin). The definition for all those
+ * SMU tables should be moved to some separate file
+ */
+#define SMU_SDB_FVT_ID			0x12
+
+struct smu_sdbp_fvt {
+	__u32	sysclk;			/* Base SysClk frequency in Hz for
+					 * this operating point. Value need to
+					 * be unmixed with SMU_U32_MIX()
+					 */
+	__u8	pad;
+	__u8	maxtemp;		/* Max temp. supported by this
+					 * operating point
+					 */
+
+	__u16	volts[3];		/* CPU core voltage for the 3
+					 * PowerTune modes, a mode with
+					 * 0V = not supported. Value need
+					 * to be unmixed with SMU_U16_MIX()
+					 */
+};
+
+/* This partition contains voltage & current sensor calibration
+ * informations
+ */
+#define SMU_SDB_CPUVCP_ID		0x21
+
+struct smu_sdbp_cpuvcp {
+	__u16	volt_scale;		/* u4.12 fixed point */
+	__s16	volt_offset;		/* s4.12 fixed point */
+	__u16	curr_scale;		/* u4.12 fixed point */
+	__s16	curr_offset;		/* s4.12 fixed point */
+	__s32	power_quads[3];		/* s4.28 fixed point */
+};
+
+/* This partition contains CPU thermal diode calibration
+ */
+#define SMU_SDB_CPUDIODE_ID		0x18
+
+struct smu_sdbp_cpudiode {
+	__u16	m_value;		/* u1.15 fixed point */
+	__s16	b_value;		/* s10.6 fixed point */
+
+};
+
+/* This partition contains Slots power calibration
+ */
+#define SMU_SDB_SLOTSPOW_ID		0x78
+
+struct smu_sdbp_slotspow {
+	__u16	pow_scale;		/* u4.12 fixed point */
+	__s16	pow_offset;		/* s4.12 fixed point */
+};
+
+/* This partition contains machine specific version information about
+ * the sensor/control layout
+ */
+#define SMU_SDB_SENSORTREE_ID		0x25
+
+struct smu_sdbp_sensortree {
+	__u8	model_id;
+	__u8	unknown[3];
+};
+
+/* This partition contains CPU thermal control PID informations. So far
+ * only single CPU machines have been seen with an SMU, so we assume this
+ * carries only informations for those
+ */
+#define SMU_SDB_CPUPIDDATA_ID		0x17
+
+struct smu_sdbp_cpupiddata {
+	__u8	unknown1;
+	__u8	target_temp_delta;
+	__u8	unknown2;
+	__u8	history_len;
+	__s16	power_adj;
+	__u16	max_power;
+	__s32	gp,gr,gd;
+};
+
+
+/* Other partitions without known structures */
+#define SMU_SDB_DEBUG_SWITCHES_ID	0x05
+
+#ifdef __KERNEL__
+/*
+ * This returns the pointer to an SMU "sdb" partition data or NULL
+ * if not found. The data format is described below
+ */
+extern const struct smu_sdbp_header *smu_get_sdb_partition(int id,
+					unsigned int *size);
+
+/* Get "sdb" partition data from an SMU satellite */
+extern struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id,
+					int id, unsigned int *size);
+
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * - Userland interface -
+ */
+
+/*
+ * A given instance of the device can be configured for 2 different
+ * things at the moment:
+ *
+ *  - sending SMU commands (default at open() time)
+ *  - receiving SMU events (not yet implemented)
+ *
+ * Commands are written with write() of a command block. They can be
+ * "driver" commands (for example to switch to event reception mode)
+ * or real SMU commands. They are made of a header followed by command
+ * data if any.
+ *
+ * For SMU commands (not for driver commands), you can then read() back
+ * a reply. The reader will be blocked or not depending on how the device
+ * file is opened. poll() isn't implemented yet. The reply will consist
+ * of a header as well, followed by the reply data if any. You should
+ * always provide a buffer large enough for the maximum reply data, I
+ * recommand one page.
+ *
+ * It is illegal to send SMU commands through a file descriptor configured
+ * for events reception
+ *
+ */
+struct smu_user_cmd_hdr
+{
+	__u32		cmdtype;
+#define SMU_CMDTYPE_SMU			0	/* SMU command */
+#define SMU_CMDTYPE_WANTS_EVENTS	1	/* switch fd to events mode */
+#define SMU_CMDTYPE_GET_PARTITION	2	/* retrieve an sdb partition */
+
+	__u8		cmd;			/* SMU command byte */
+	__u8		pad[3];			/* padding */
+	__u32		data_len;		/* Length of data following */
+};
+
+struct smu_user_reply_hdr
+{
+	__u32		status;			/* Command status */
+	__u32		reply_len;		/* Length of data follwing */
+};
+
+#endif /*  _SMU_H */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
new file mode 100644
index 0000000..28f5dae
--- /dev/null
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SPARSEMEM_H
+#define _ASM_POWERPC_SPARSEMEM_H 1
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS		2^N: how big each section will be
+ * MAX_PHYSMEM_BITS		2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS       24
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP.
+ */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define MAX_PHYSMEM_BITS        47
+#else
+#define MAX_PHYSMEM_BITS        46
+#endif
+
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern int create_section_mapping(unsigned long start, unsigned long end, int nid);
+extern int remove_section_mapping(unsigned long start, unsigned long end);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+extern void resize_hpt_for_hotplug(unsigned long new_mem_size);
+#else
+static inline void resize_hpt_for_hotplug(unsigned long new_mem_size) { }
+#endif
+
+#ifdef CONFIG_NUMA
+extern int hot_add_scn_to_nid(unsigned long scn_addr);
+#else
+static inline int hot_add_scn_to_nid(unsigned long scn_addr)
+{
+	return 0;
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
new file mode 100644
index 0000000..685c723
--- /dev/null
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -0,0 +1,313 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+#ifdef __KERNEL__
+
+/*
+ * Simple spin lock operations.  
+ *
+ * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
+ *	Rework to support virtual processors
+ *
+ * Type of int is used as a full 64b word is not necessary.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
+ */
+#include <linux/irqflags.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/hvcall.h>
+#endif
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/asm-405.h>
+
+#ifdef CONFIG_PPC64
+/* use 0x800000yy when locked, where yy == CPU number */
+#ifdef __BIG_ENDIAN__
+#define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
+#define LOCK_TOKEN	1
+#endif
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
+#define CLEAR_IO_SYNC	(get_paca()->io_sync = 0)
+#define SYNC_IO		do {						\
+				if (unlikely(get_paca()->io_sync)) {	\
+					mb();				\
+					get_paca()->io_sync = 0;	\
+				}					\
+			} while (0)
+#else
+#define CLEAR_IO_SYNC
+#define SYNC_IO
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+		return false;
+	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
+}
+#endif
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+	smp_mb();
+	return !arch_spin_value_unlocked(*lock);
+}
+
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned long tmp, token;
+
+	token = LOCK_TOKEN;
+	__asm__ __volatile__(
+"1:	" PPC_LWARX(%0,0,%2,1) "\n\
+	cmpwi		0,%0,0\n\
+	bne-		2f\n\
+	stwcx.		%1,0,%2\n\
+	bne-		1b\n"
+	PPC_ACQUIRE_BARRIER
+"2:"
+	: "=&r" (tmp)
+	: "r" (token), "r" (&lock->slock)
+	: "cr0", "memory");
+
+	return tmp;
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	CLEAR_IO_SYNC;
+	return __arch_spin_trylock(lock) == 0;
+}
+
+/*
+ * On a system with shared processors (that is, where a physical
+ * processor is multiplexed between several virtual processors),
+ * there is no point spinning on a lock if the holder of the lock
+ * isn't currently scheduled on a physical processor.  Instead
+ * we detect this situation and ask the hypervisor to give the
+ * rest of our timeslice to the lock holder.
+ *
+ * So that we can tell which virtual processor is holding a lock,
+ * we put 0x80000000 | smp_processor_id() in the lock when it is
+ * held.  Conveniently, we have a word in the paca that holds this
+ * value.
+ */
+
+#if defined(CONFIG_PPC_SPLPAR)
+/* We only yield to the hypervisor if we are in shared processor mode */
+#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
+extern void __spin_yield(arch_spinlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x)	barrier()
+#define __rw_yield(x)	barrier()
+#define SHARED_PROCESSOR	0
+#endif
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	CLEAR_IO_SYNC;
+	while (1) {
+		if (likely(__arch_spin_trylock(lock) == 0))
+			break;
+		do {
+			HMT_low();
+			if (SHARED_PROCESSOR)
+				__spin_yield(lock);
+		} while (unlikely(lock->slock != 0));
+		HMT_medium();
+	}
+}
+
+static inline
+void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+{
+	unsigned long flags_dis;
+
+	CLEAR_IO_SYNC;
+	while (1) {
+		if (likely(__arch_spin_trylock(lock) == 0))
+			break;
+		local_save_flags(flags_dis);
+		local_irq_restore(flags);
+		do {
+			HMT_low();
+			if (SHARED_PROCESSOR)
+				__spin_yield(lock);
+		} while (unlikely(lock->slock != 0));
+		HMT_medium();
+		local_irq_restore(flags_dis);
+	}
+}
+#define arch_spin_lock_flags arch_spin_lock_flags
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	SYNC_IO;
+	__asm__ __volatile__("# arch_spin_unlock\n\t"
+				PPC_RELEASE_BARRIER: : :"memory");
+	lock->slock = 0;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+#ifdef CONFIG_PPC64
+#define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
+#define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
+#else
+#define __DO_SIGN_EXTEND
+#define WRLOCK_TOKEN		(-1)
+#endif
+
+/*
+ * This returns the old value in the lock + 1,
+ * so we got a read lock if the return value is > 0.
+ */
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
+{
+	long tmp;
+
+	__asm__ __volatile__(
+"1:	" PPC_LWARX(%0,0,%1,1) "\n"
+	__DO_SIGN_EXTEND
+"	addic.		%0,%0,1\n\
+	ble-		2f\n"
+	PPC405_ERR77(0,%1)
+"	stwcx.		%0,0,%1\n\
+	bne-		1b\n"
+	PPC_ACQUIRE_BARRIER
+"2:"	: "=&r" (tmp)
+	: "r" (&rw->lock)
+	: "cr0", "xer", "memory");
+
+	return tmp;
+}
+
+/*
+ * This returns the old value in the lock,
+ * so we got the write lock if the return value is 0.
+ */
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
+{
+	long tmp, token;
+
+	token = WRLOCK_TOKEN;
+	__asm__ __volatile__(
+"1:	" PPC_LWARX(%0,0,%2,1) "\n\
+	cmpwi		0,%0,0\n\
+	bne-		2f\n"
+	PPC405_ERR77(0,%1)
+"	stwcx.		%1,0,%2\n\
+	bne-		1b\n"
+	PPC_ACQUIRE_BARRIER
+"2:"	: "=&r" (tmp)
+	: "r" (token), "r" (&rw->lock)
+	: "cr0", "memory");
+
+	return tmp;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	while (1) {
+		if (likely(__arch_read_trylock(rw) > 0))
+			break;
+		do {
+			HMT_low();
+			if (SHARED_PROCESSOR)
+				__rw_yield(rw);
+		} while (unlikely(rw->lock < 0));
+		HMT_medium();
+	}
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	while (1) {
+		if (likely(__arch_write_trylock(rw) == 0))
+			break;
+		do {
+			HMT_low();
+			if (SHARED_PROCESSOR)
+				__rw_yield(rw);
+		} while (unlikely(rw->lock != 0));
+		HMT_medium();
+	}
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	return __arch_read_trylock(rw) > 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	return __arch_write_trylock(rw) == 0;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	long tmp;
+
+	__asm__ __volatile__(
+	"# read_unlock\n\t"
+	PPC_RELEASE_BARRIER
+"1:	lwarx		%0,0,%1\n\
+	addic		%0,%0,-1\n"
+	PPC405_ERR77(0,%1)
+"	stwcx.		%0,0,%1\n\
+	bne-		1b"
+	: "=&r"(tmp)
+	: "r"(&rw->lock)
+	: "cr0", "xer", "memory");
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	__asm__ __volatile__("# write_unlock\n\t"
+				PPC_RELEASE_BARRIER: : :"memory");
+	rw->lock = 0;
+}
+
+#define arch_spin_relax(lock)	__spin_yield(lock)
+#define arch_read_relax(lock)	__rw_yield(lock)
+#define arch_write_relax(lock)	__rw_yield(lock)
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock()   smp_mb()
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
new file mode 100644
index 0000000..87adaf1
--- /dev/null
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+	volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	volatile signed int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+
+#endif
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
new file mode 100644
index 0000000..a6e6e2b
--- /dev/null
+++ b/arch/powerpc/include/asm/spu.h
@@ -0,0 +1,725 @@
+/*
+ * SPU core / file system interface and HW structures
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_H
+#define _SPU_H
+#ifdef __KERNEL__
+
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <asm/reg.h>
+#include <asm/copro.h>
+
+#define LS_SIZE (256 * 1024)
+#define LS_ADDR_MASK (LS_SIZE - 1)
+
+#define MFC_PUT_CMD             0x20
+#define MFC_PUTS_CMD            0x28
+#define MFC_PUTR_CMD            0x30
+#define MFC_PUTF_CMD            0x22
+#define MFC_PUTB_CMD            0x21
+#define MFC_PUTFS_CMD           0x2A
+#define MFC_PUTBS_CMD           0x29
+#define MFC_PUTRF_CMD           0x32
+#define MFC_PUTRB_CMD           0x31
+#define MFC_PUTL_CMD            0x24
+#define MFC_PUTRL_CMD           0x34
+#define MFC_PUTLF_CMD           0x26
+#define MFC_PUTLB_CMD           0x25
+#define MFC_PUTRLF_CMD          0x36
+#define MFC_PUTRLB_CMD          0x35
+
+#define MFC_GET_CMD             0x40
+#define MFC_GETS_CMD            0x48
+#define MFC_GETF_CMD            0x42
+#define MFC_GETB_CMD            0x41
+#define MFC_GETFS_CMD           0x4A
+#define MFC_GETBS_CMD           0x49
+#define MFC_GETL_CMD            0x44
+#define MFC_GETLF_CMD           0x46
+#define MFC_GETLB_CMD           0x45
+
+#define MFC_SDCRT_CMD           0x80
+#define MFC_SDCRTST_CMD         0x81
+#define MFC_SDCRZ_CMD           0x89
+#define MFC_SDCRS_CMD           0x8D
+#define MFC_SDCRF_CMD           0x8F
+
+#define MFC_GETLLAR_CMD         0xD0
+#define MFC_PUTLLC_CMD          0xB4
+#define MFC_PUTLLUC_CMD         0xB0
+#define MFC_PUTQLLUC_CMD        0xB8
+#define MFC_SNDSIG_CMD          0xA0
+#define MFC_SNDSIGB_CMD         0xA1
+#define MFC_SNDSIGF_CMD         0xA2
+#define MFC_BARRIER_CMD         0xC0
+#define MFC_EIEIO_CMD           0xC8
+#define MFC_SYNC_CMD            0xCC
+
+#define MFC_MIN_DMA_SIZE_SHIFT  4       /* 16 bytes */
+#define MFC_MAX_DMA_SIZE_SHIFT  14      /* 16384 bytes */
+#define MFC_MIN_DMA_SIZE        (1 << MFC_MIN_DMA_SIZE_SHIFT)
+#define MFC_MAX_DMA_SIZE        (1 << MFC_MAX_DMA_SIZE_SHIFT)
+#define MFC_MIN_DMA_SIZE_MASK   (MFC_MIN_DMA_SIZE - 1)
+#define MFC_MAX_DMA_SIZE_MASK   (MFC_MAX_DMA_SIZE - 1)
+#define MFC_MIN_DMA_LIST_SIZE   0x0008  /*   8 bytes */
+#define MFC_MAX_DMA_LIST_SIZE   0x4000  /* 16K bytes */
+
+#define MFC_TAGID_TO_TAGMASK(tag_id)  (1 << (tag_id & 0x1F))
+
+/* Events for Channels 0-2 */
+#define MFC_DMA_TAG_STATUS_UPDATE_EVENT     0x00000001
+#define MFC_DMA_TAG_CMD_STALL_NOTIFY_EVENT  0x00000002
+#define MFC_DMA_QUEUE_AVAILABLE_EVENT       0x00000008
+#define MFC_SPU_MAILBOX_WRITTEN_EVENT       0x00000010
+#define MFC_DECREMENTER_EVENT               0x00000020
+#define MFC_PU_INT_MAILBOX_AVAILABLE_EVENT  0x00000040
+#define MFC_PU_MAILBOX_AVAILABLE_EVENT      0x00000080
+#define MFC_SIGNAL_2_EVENT                  0x00000100
+#define MFC_SIGNAL_1_EVENT                  0x00000200
+#define MFC_LLR_LOST_EVENT                  0x00000400
+#define MFC_PRIV_ATTN_EVENT                 0x00000800
+#define MFC_MULTI_SRC_EVENT                 0x00001000
+
+/* Flag indicating progress during context switch. */
+#define SPU_CONTEXT_SWITCH_PENDING	0UL
+#define SPU_CONTEXT_FAULT_PENDING	1UL
+
+struct spu_context;
+struct spu_runqueue;
+struct spu_lscsa;
+struct device_node;
+
+enum spu_utilization_state {
+	SPU_UTIL_USER,
+	SPU_UTIL_SYSTEM,
+	SPU_UTIL_IOWAIT,
+	SPU_UTIL_IDLE_LOADED,
+	SPU_UTIL_MAX
+};
+
+struct spu {
+	const char *name;
+	unsigned long local_store_phys;
+	u8 *local_store;
+	unsigned long problem_phys;
+	struct spu_problem __iomem *problem;
+	struct spu_priv2 __iomem *priv2;
+	struct list_head cbe_list;
+	struct list_head full_list;
+	enum { SPU_FREE, SPU_USED } alloc_state;
+	int number;
+	unsigned int irqs[3];
+	u32 node;
+	unsigned long flags;
+	u64 class_0_pending;
+	u64 class_0_dar;
+	u64 class_1_dar;
+	u64 class_1_dsisr;
+	size_t ls_size;
+	unsigned int slb_replace;
+	struct mm_struct *mm;
+	struct spu_context *ctx;
+	struct spu_runqueue *rq;
+	unsigned long long timestamp;
+	pid_t pid;
+	pid_t tgid;
+	spinlock_t register_lock;
+
+	void (* wbox_callback)(struct spu *spu);
+	void (* ibox_callback)(struct spu *spu);
+	void (* stop_callback)(struct spu *spu, int irq);
+	void (* mfc_callback)(struct spu *spu);
+
+	char irq_c0[8];
+	char irq_c1[8];
+	char irq_c2[8];
+
+	u64 spe_id;
+
+	void* pdata; /* platform private data */
+
+	/* of based platforms only */
+	struct device_node *devnode;
+
+	/* native only */
+	struct spu_priv1 __iomem *priv1;
+
+	/* beat only */
+	u64 shadow_int_mask_RW[3];
+
+	struct device dev;
+
+	int has_mem_affinity;
+	struct list_head aff_list;
+
+	struct {
+		/* protected by interrupt reentrancy */
+		enum spu_utilization_state util_state;
+		unsigned long long tstamp;
+		unsigned long long times[SPU_UTIL_MAX];
+		unsigned long long vol_ctx_switch;
+		unsigned long long invol_ctx_switch;
+		unsigned long long min_flt;
+		unsigned long long maj_flt;
+		unsigned long long hash_flt;
+		unsigned long long slb_flt;
+		unsigned long long class2_intr;
+		unsigned long long libassist;
+	} stats;
+};
+
+struct cbe_spu_info {
+	struct mutex list_mutex;
+	struct list_head spus;
+	int n_spus;
+	int nr_active;
+	atomic_t busy_spus;
+	atomic_t reserved_spus;
+};
+
+extern struct cbe_spu_info cbe_spu_info[];
+
+void spu_init_channels(struct spu *spu);
+void spu_irq_setaffinity(struct spu *spu, int cpu);
+
+void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
+		void *code, int code_size);
+
+extern void spu_invalidate_slbs(struct spu *spu);
+extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
+int spu_64k_pages_available(void);
+
+/* Calls from the memory management to the SPU */
+struct mm_struct;
+extern void spu_flush_all_slbs(struct mm_struct *mm);
+
+/* This interface allows a profiler (e.g., OProfile) to store a ref
+ * to spu context information that it creates.	This caching technique
+ * avoids the need to recreate this information after a save/restore operation.
+ *
+ * Assumes the caller has already incremented the ref count to
+ * profile_info; then spu_context_destroy must call kref_put
+ * on prof_info_kref.
+ */
+void spu_set_profile_private_kref(struct spu_context *ctx,
+				  struct kref *prof_info_kref,
+				  void ( * prof_info_release) (struct kref *kref));
+
+void *spu_get_profile_private_kref(struct spu_context *ctx);
+
+/* system callbacks from the SPU */
+struct spu_syscall_block {
+	u64 nr_ret;
+	u64 parm[6];
+};
+extern long spu_sys_callback(struct spu_syscall_block *s);
+
+/* syscalls implemented in spufs */
+struct file;
+struct coredump_params;
+struct spufs_calls {
+	long (*create_thread)(const char __user *name,
+					unsigned int flags, umode_t mode,
+					struct file *neighbor);
+	long (*spu_run)(struct file *filp, __u32 __user *unpc,
+						__u32 __user *ustatus);
+	int (*coredump_extra_notes_size)(void);
+	int (*coredump_extra_notes_write)(struct coredump_params *cprm);
+	void (*notify_spus_active)(void);
+	struct module *owner;
+};
+
+/* return status from spu_run, same as in libspe */
+#define SPE_EVENT_DMA_ALIGNMENT		0x0008	/*A DMA alignment error */
+#define SPE_EVENT_SPE_ERROR		0x0010	/*An illegal instruction error*/
+#define SPE_EVENT_SPE_DATA_SEGMENT	0x0020	/*A DMA segmentation error    */
+#define SPE_EVENT_SPE_DATA_STORAGE	0x0040	/*A DMA storage error */
+#define SPE_EVENT_INVALID_DMA		0x0800	/* Invalid MFC DMA */
+
+/*
+ * Flags for sys_spu_create.
+ */
+#define SPU_CREATE_EVENTS_ENABLED	0x0001
+#define SPU_CREATE_GANG			0x0002
+#define SPU_CREATE_NOSCHED		0x0004
+#define SPU_CREATE_ISOLATE		0x0008
+#define SPU_CREATE_AFFINITY_SPU		0x0010
+#define SPU_CREATE_AFFINITY_MEM		0x0020
+
+#define SPU_CREATE_FLAG_ALL		0x003f /* mask of all valid flags */
+
+
+int register_spu_syscalls(struct spufs_calls *calls);
+void unregister_spu_syscalls(struct spufs_calls *calls);
+
+int spu_add_dev_attr(struct device_attribute *attr);
+void spu_remove_dev_attr(struct device_attribute *attr);
+
+int spu_add_dev_attr_group(struct attribute_group *attrs);
+void spu_remove_dev_attr_group(struct attribute_group *attrs);
+
+/*
+ * Notifier blocks:
+ *
+ * oprofile can get notified when a context switch is performed
+ * on an spe. The notifer function that gets called is passed
+ * a pointer to the SPU structure as well as the object-id that
+ * identifies the binary running on that SPU now.
+ *
+ * For a context save, the object-id that is passed is zero,
+ * identifying that the kernel will run from that moment on.
+ *
+ * For a context restore, the object-id is the value written
+ * to object-id spufs file from user space and the notifer
+ * function can assume that spu->ctx is valid.
+ */
+struct notifier_block;
+int spu_switch_event_register(struct notifier_block * n);
+int spu_switch_event_unregister(struct notifier_block * n);
+
+extern void notify_spus_active(void);
+extern void do_notify_spus_active(void);
+
+/*
+ * This defines the Local Store, Problem Area and Privilege Area of an SPU.
+ */
+
+union mfc_tag_size_class_cmd {
+	struct {
+		u16 mfc_size;
+		u16 mfc_tag;
+		u8  pad;
+		u8  mfc_rclassid;
+		u16 mfc_cmd;
+	} u;
+	struct {
+		u32 mfc_size_tag32;
+		u32 mfc_class_cmd32;
+	} by32;
+	u64 all64;
+};
+
+struct mfc_cq_sr {
+	u64 mfc_cq_data0_RW;
+	u64 mfc_cq_data1_RW;
+	u64 mfc_cq_data2_RW;
+	u64 mfc_cq_data3_RW;
+};
+
+struct spu_problem {
+#define MS_SYNC_PENDING         1L
+	u64 spc_mssync_RW;					/* 0x0000 */
+	u8  pad_0x0008_0x3000[0x3000 - 0x0008];
+
+	/* DMA Area */
+	u8  pad_0x3000_0x3004[0x4];				/* 0x3000 */
+	u32 mfc_lsa_W;						/* 0x3004 */
+	u64 mfc_ea_W;						/* 0x3008 */
+	union mfc_tag_size_class_cmd mfc_union_W;			/* 0x3010 */
+	u8  pad_0x3018_0x3104[0xec];				/* 0x3018 */
+	u32 dma_qstatus_R;					/* 0x3104 */
+	u8  pad_0x3108_0x3204[0xfc];				/* 0x3108 */
+	u32 dma_querytype_RW;					/* 0x3204 */
+	u8  pad_0x3208_0x321c[0x14];				/* 0x3208 */
+	u32 dma_querymask_RW;					/* 0x321c */
+	u8  pad_0x3220_0x322c[0xc];				/* 0x3220 */
+	u32 dma_tagstatus_R;					/* 0x322c */
+#define DMA_TAGSTATUS_INTR_ANY	1u
+#define DMA_TAGSTATUS_INTR_ALL	2u
+	u8  pad_0x3230_0x4000[0x4000 - 0x3230]; 		/* 0x3230 */
+
+	/* SPU Control Area */
+	u8  pad_0x4000_0x4004[0x4];				/* 0x4000 */
+	u32 pu_mb_R;						/* 0x4004 */
+	u8  pad_0x4008_0x400c[0x4];				/* 0x4008 */
+	u32 spu_mb_W;						/* 0x400c */
+	u8  pad_0x4010_0x4014[0x4];				/* 0x4010 */
+	u32 mb_stat_R;						/* 0x4014 */
+	u8  pad_0x4018_0x401c[0x4];				/* 0x4018 */
+	u32 spu_runcntl_RW;					/* 0x401c */
+#define SPU_RUNCNTL_STOP	0L
+#define SPU_RUNCNTL_RUNNABLE	1L
+#define SPU_RUNCNTL_ISOLATE	2L
+	u8  pad_0x4020_0x4024[0x4];				/* 0x4020 */
+	u32 spu_status_R;					/* 0x4024 */
+#define SPU_STOP_STATUS_SHIFT           16
+#define SPU_STATUS_STOPPED		0x0
+#define SPU_STATUS_RUNNING		0x1
+#define SPU_STATUS_STOPPED_BY_STOP	0x2
+#define SPU_STATUS_STOPPED_BY_HALT	0x4
+#define SPU_STATUS_WAITING_FOR_CHANNEL	0x8
+#define SPU_STATUS_SINGLE_STEP		0x10
+#define SPU_STATUS_INVALID_INSTR        0x20
+#define SPU_STATUS_INVALID_CH           0x40
+#define SPU_STATUS_ISOLATED_STATE       0x80
+#define SPU_STATUS_ISOLATED_LOAD_STATUS 0x200
+#define SPU_STATUS_ISOLATED_EXIT_STATUS 0x400
+	u8  pad_0x4028_0x402c[0x4];				/* 0x4028 */
+	u32 spu_spe_R;						/* 0x402c */
+	u8  pad_0x4030_0x4034[0x4];				/* 0x4030 */
+	u32 spu_npc_RW;						/* 0x4034 */
+	u8  pad_0x4038_0x14000[0x14000 - 0x4038];		/* 0x4038 */
+
+	/* Signal Notification Area */
+	u8  pad_0x14000_0x1400c[0xc];				/* 0x14000 */
+	u32 signal_notify1;					/* 0x1400c */
+	u8  pad_0x14010_0x1c00c[0x7ffc];			/* 0x14010 */
+	u32 signal_notify2;					/* 0x1c00c */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 2 State Area */
+struct spu_priv2 {
+	/* MFC Registers */
+	u8  pad_0x0000_0x1100[0x1100 - 0x0000]; 		/* 0x0000 */
+
+	/* SLB Management Registers */
+	u8  pad_0x1100_0x1108[0x8];				/* 0x1100 */
+	u64 slb_index_W;					/* 0x1108 */
+#define SLB_INDEX_MASK				0x7L
+	u64 slb_esid_RW;					/* 0x1110 */
+	u64 slb_vsid_RW;					/* 0x1118 */
+#define SLB_VSID_SUPERVISOR_STATE	(0x1ull << 11)
+#define SLB_VSID_SUPERVISOR_STATE_MASK	(0x1ull << 11)
+#define SLB_VSID_PROBLEM_STATE		(0x1ull << 10)
+#define SLB_VSID_PROBLEM_STATE_MASK	(0x1ull << 10)
+#define SLB_VSID_EXECUTE_SEGMENT	(0x1ull << 9)
+#define SLB_VSID_NO_EXECUTE_SEGMENT	(0x1ull << 9)
+#define SLB_VSID_EXECUTE_SEGMENT_MASK	(0x1ull << 9)
+#define SLB_VSID_4K_PAGE		(0x0 << 8)
+#define SLB_VSID_LARGE_PAGE		(0x1ull << 8)
+#define SLB_VSID_PAGE_SIZE_MASK		(0x1ull << 8)
+#define SLB_VSID_CLASS_MASK		(0x1ull << 7)
+#define SLB_VSID_VIRTUAL_PAGE_SIZE_MASK	(0x1ull << 6)
+	u64 slb_invalidate_entry_W;				/* 0x1120 */
+	u64 slb_invalidate_all_W;				/* 0x1128 */
+	u8  pad_0x1130_0x2000[0x2000 - 0x1130]; 		/* 0x1130 */
+
+	/* Context Save / Restore Area */
+	struct mfc_cq_sr spuq[16];				/* 0x2000 */
+	struct mfc_cq_sr puq[8];				/* 0x2200 */
+	u8  pad_0x2300_0x3000[0x3000 - 0x2300]; 		/* 0x2300 */
+
+	/* MFC Control */
+	u64 mfc_control_RW;					/* 0x3000 */
+#define MFC_CNTL_RESUME_DMA_QUEUE		(0ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE		(1ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK		(1ull << 0)
+#define MFC_CNTL_SUSPEND_MASK			(1ull << 4)
+#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION	(0ull << 8)
+#define MFC_CNTL_SUSPEND_IN_PROGRESS		(1ull << 8)
+#define MFC_CNTL_SUSPEND_COMPLETE		(3ull << 8)
+#define MFC_CNTL_SUSPEND_DMA_STATUS_MASK	(3ull << 8)
+#define MFC_CNTL_DMA_QUEUES_EMPTY		(1ull << 14)
+#define MFC_CNTL_DMA_QUEUES_EMPTY_MASK		(1ull << 14)
+#define MFC_CNTL_PURGE_DMA_REQUEST		(1ull << 15)
+#define MFC_CNTL_PURGE_DMA_IN_PROGRESS		(1ull << 24)
+#define MFC_CNTL_PURGE_DMA_COMPLETE		(3ull << 24)
+#define MFC_CNTL_PURGE_DMA_STATUS_MASK		(3ull << 24)
+#define MFC_CNTL_RESTART_DMA_COMMAND		(1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_PENDING	(1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_STATUS_MASK (1ull << 32)
+#define MFC_CNTL_MFC_PRIVILEGE_STATE		(2ull << 33)
+#define MFC_CNTL_MFC_PROBLEM_STATE		(3ull << 33)
+#define MFC_CNTL_MFC_KEY_PROTECTION_STATE_MASK	(3ull << 33)
+#define MFC_CNTL_DECREMENTER_HALTED		(1ull << 35)
+#define MFC_CNTL_DECREMENTER_RUNNING		(1ull << 40)
+#define MFC_CNTL_DECREMENTER_STATUS_MASK	(1ull << 40)
+	u8  pad_0x3008_0x4000[0x4000 - 0x3008]; 		/* 0x3008 */
+
+	/* Interrupt Mailbox */
+	u64 puint_mb_R;						/* 0x4000 */
+	u8  pad_0x4008_0x4040[0x4040 - 0x4008]; 		/* 0x4008 */
+
+	/* SPU Control */
+	u64 spu_privcntl_RW;					/* 0x4040 */
+#define SPU_PRIVCNTL_MODE_NORMAL		(0x0ull << 0)
+#define SPU_PRIVCNTL_MODE_SINGLE_STEP		(0x1ull << 0)
+#define SPU_PRIVCNTL_MODE_MASK			(0x1ull << 0)
+#define SPU_PRIVCNTL_NO_ATTENTION_EVENT		(0x0ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT		(0x1ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT_MASK	(0x1ull << 1)
+#define SPU_PRIVCNT_LOAD_REQUEST_NORMAL		(0x0ull << 2)
+#define SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK	(0x1ull << 2)
+	u8  pad_0x4048_0x4058[0x10];				/* 0x4048 */
+	u64 spu_lslr_RW;					/* 0x4058 */
+	u64 spu_chnlcntptr_RW;					/* 0x4060 */
+	u64 spu_chnlcnt_RW;					/* 0x4068 */
+	u64 spu_chnldata_RW;					/* 0x4070 */
+	u64 spu_cfg_RW;						/* 0x4078 */
+	u8  pad_0x4080_0x5000[0x5000 - 0x4080]; 		/* 0x4080 */
+
+	/* PV2_ImplRegs: Implementation-specific privileged-state 2 regs */
+	u64 spu_pm_trace_tag_status_RW;				/* 0x5000 */
+	u64 spu_tag_status_query_RW;				/* 0x5008 */
+#define TAG_STATUS_QUERY_CONDITION_BITS (0x3ull << 32)
+#define TAG_STATUS_QUERY_MASK_BITS (0xffffffffull)
+	u64 spu_cmd_buf1_RW;					/* 0x5010 */
+#define SPU_COMMAND_BUFFER_1_LSA_BITS (0x7ffffull << 32)
+#define SPU_COMMAND_BUFFER_1_EAH_BITS (0xffffffffull)
+	u64 spu_cmd_buf2_RW;					/* 0x5018 */
+#define SPU_COMMAND_BUFFER_2_EAL_BITS ((0xffffffffull) << 32)
+#define SPU_COMMAND_BUFFER_2_TS_BITS (0xffffull << 16)
+#define SPU_COMMAND_BUFFER_2_TAG_BITS (0x3full)
+	u64 spu_atomic_status_RW;				/* 0x5020 */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 1 State Area */
+struct spu_priv1 {
+	/* Control and Configuration Area */
+	u64 mfc_sr1_RW;						/* 0x000 */
+#define MFC_STATE1_LOCAL_STORAGE_DECODE_MASK	0x01ull
+#define MFC_STATE1_BUS_TLBIE_MASK		0x02ull
+#define MFC_STATE1_REAL_MODE_OFFSET_ENABLE_MASK	0x04ull
+#define MFC_STATE1_PROBLEM_STATE_MASK		0x08ull
+#define MFC_STATE1_RELOCATE_MASK		0x10ull
+#define MFC_STATE1_MASTER_RUN_CONTROL_MASK	0x20ull
+#define MFC_STATE1_TABLE_SEARCH_MASK		0x40ull
+	u64 mfc_lpid_RW;					/* 0x008 */
+	u64 spu_idr_RW;						/* 0x010 */
+	u64 mfc_vr_RO;						/* 0x018 */
+#define MFC_VERSION_BITS		(0xffff << 16)
+#define MFC_REVISION_BITS		(0xffff)
+#define MFC_GET_VERSION_BITS(vr)	(((vr) & MFC_VERSION_BITS) >> 16)
+#define MFC_GET_REVISION_BITS(vr)	((vr) & MFC_REVISION_BITS)
+	u64 spu_vr_RO;						/* 0x020 */
+#define SPU_VERSION_BITS		(0xffff << 16)
+#define SPU_REVISION_BITS		(0xffff)
+#define SPU_GET_VERSION_BITS(vr)	(vr & SPU_VERSION_BITS) >> 16
+#define SPU_GET_REVISION_BITS(vr)	(vr & SPU_REVISION_BITS)
+	u8  pad_0x28_0x100[0x100 - 0x28];			/* 0x28 */
+
+	/* Interrupt Area */
+	u64 int_mask_RW[3];					/* 0x100 */
+#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR		0x1L
+#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR		0x2L
+#define CLASS0_ENABLE_SPU_ERROR_INTR			0x4L
+#define CLASS0_ENABLE_MFC_FIR_INTR			0x8L
+#define CLASS1_ENABLE_SEGMENT_FAULT_INTR		0x1L
+#define CLASS1_ENABLE_STORAGE_FAULT_INTR		0x2L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR	0x4L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR	0x8L
+#define CLASS2_ENABLE_MAILBOX_INTR			0x1L
+#define CLASS2_ENABLE_SPU_STOP_INTR			0x2L
+#define CLASS2_ENABLE_SPU_HALT_INTR			0x4L
+#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR	0x8L
+#define CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR		0x10L
+	u8  pad_0x118_0x140[0x28];				/* 0x118 */
+	u64 int_stat_RW[3];					/* 0x140 */
+#define CLASS0_DMA_ALIGNMENT_INTR			0x1L
+#define CLASS0_INVALID_DMA_COMMAND_INTR			0x2L
+#define CLASS0_SPU_ERROR_INTR				0x4L
+#define CLASS0_INTR_MASK				0x7L
+#define CLASS1_SEGMENT_FAULT_INTR			0x1L
+#define CLASS1_STORAGE_FAULT_INTR			0x2L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR		0x4L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR		0x8L
+#define CLASS1_INTR_MASK				0xfL
+#define CLASS2_MAILBOX_INTR				0x1L
+#define CLASS2_SPU_STOP_INTR				0x2L
+#define CLASS2_SPU_HALT_INTR				0x4L
+#define CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR		0x8L
+#define CLASS2_MAILBOX_THRESHOLD_INTR			0x10L
+#define CLASS2_INTR_MASK				0x1fL
+	u8  pad_0x158_0x180[0x28];				/* 0x158 */
+	u64 int_route_RW;					/* 0x180 */
+
+	/* Interrupt Routing */
+	u8  pad_0x188_0x200[0x200 - 0x188];			/* 0x188 */
+
+	/* Atomic Unit Control Area */
+	u64 mfc_atomic_flush_RW;				/* 0x200 */
+#define mfc_atomic_flush_enable			0x1L
+	u8  pad_0x208_0x280[0x78];				/* 0x208 */
+	u64 resource_allocation_groupID_RW;			/* 0x280 */
+	u64 resource_allocation_enable_RW; 			/* 0x288 */
+	u8  pad_0x290_0x3c8[0x3c8 - 0x290];			/* 0x290 */
+
+	/* SPU_Cache_ImplRegs: Implementation-dependent cache registers */
+
+	u64 smf_sbi_signal_sel;					/* 0x3c8 */
+#define smf_sbi_mask_lsb	56
+#define smf_sbi_shift		(63 - smf_sbi_mask_lsb)
+#define smf_sbi_mask		(0x301LL << smf_sbi_shift)
+#define smf_sbi_bus0_bits	(0x001LL << smf_sbi_shift)
+#define smf_sbi_bus2_bits	(0x100LL << smf_sbi_shift)
+#define smf_sbi2_bus0_bits	(0x201LL << smf_sbi_shift)
+#define smf_sbi2_bus2_bits	(0x300LL << smf_sbi_shift)
+	u64 smf_ato_signal_sel;					/* 0x3d0 */
+#define smf_ato_mask_lsb	35
+#define smf_ato_shift		(63 - smf_ato_mask_lsb)
+#define smf_ato_mask		(0x3LL << smf_ato_shift)
+#define smf_ato_bus0_bits	(0x2LL << smf_ato_shift)
+#define smf_ato_bus2_bits	(0x1LL << smf_ato_shift)
+	u8  pad_0x3d8_0x400[0x400 - 0x3d8];			/* 0x3d8 */
+
+	/* TLB Management Registers */
+	u64 mfc_sdr_RW;						/* 0x400 */
+	u8  pad_0x408_0x500[0xf8];				/* 0x408 */
+	u64 tlb_index_hint_RO;					/* 0x500 */
+	u64 tlb_index_W;					/* 0x508 */
+	u64 tlb_vpn_RW;						/* 0x510 */
+	u64 tlb_rpn_RW;						/* 0x518 */
+	u8  pad_0x520_0x540[0x20];				/* 0x520 */
+	u64 tlb_invalidate_entry_W;				/* 0x540 */
+	u64 tlb_invalidate_all_W;				/* 0x548 */
+	u8  pad_0x550_0x580[0x580 - 0x550];			/* 0x550 */
+
+	/* SPU_MMU_ImplRegs: Implementation-dependent MMU registers */
+	u64 smm_hid;						/* 0x580 */
+#define PAGE_SIZE_MASK		0xf000000000000000ull
+#define PAGE_SIZE_16MB_64KB	0x2000000000000000ull
+	u8  pad_0x588_0x600[0x600 - 0x588];			/* 0x588 */
+
+	/* MFC Status/Control Area */
+	u64 mfc_accr_RW;					/* 0x600 */
+#define MFC_ACCR_EA_ACCESS_GET		(1 << 0)
+#define MFC_ACCR_EA_ACCESS_PUT		(1 << 1)
+#define MFC_ACCR_LS_ACCESS_GET		(1 << 3)
+#define MFC_ACCR_LS_ACCESS_PUT		(1 << 4)
+	u8  pad_0x608_0x610[0x8];				/* 0x608 */
+	u64 mfc_dsisr_RW;					/* 0x610 */
+#define MFC_DSISR_PTE_NOT_FOUND		(1 << 30)
+#define MFC_DSISR_ACCESS_DENIED		(1 << 27)
+#define MFC_DSISR_ATOMIC		(1 << 26)
+#define MFC_DSISR_ACCESS_PUT		(1 << 25)
+#define MFC_DSISR_ADDR_MATCH		(1 << 22)
+#define MFC_DSISR_LS			(1 << 17)
+#define MFC_DSISR_L			(1 << 16)
+#define MFC_DSISR_ADDRESS_OVERFLOW	(1 << 0)
+	u8  pad_0x618_0x620[0x8];				/* 0x618 */
+	u64 mfc_dar_RW;						/* 0x620 */
+	u8  pad_0x628_0x700[0x700 - 0x628];			/* 0x628 */
+
+	/* Replacement Management Table (RMT) Area */
+	u64 rmt_index_RW;					/* 0x700 */
+	u8  pad_0x708_0x710[0x8];				/* 0x708 */
+	u64 rmt_data1_RW;					/* 0x710 */
+	u8  pad_0x718_0x800[0x800 - 0x718];			/* 0x718 */
+
+	/* Control/Configuration Registers */
+	u64 mfc_dsir_R;						/* 0x800 */
+#define MFC_DSIR_Q			(1 << 31)
+#define MFC_DSIR_SPU_QUEUE		MFC_DSIR_Q
+	u64 mfc_lsacr_RW;					/* 0x808 */
+#define MFC_LSACR_COMPARE_MASK		((~0ull) << 32)
+#define MFC_LSACR_COMPARE_ADDR		((~0ull) >> 32)
+	u64 mfc_lscrr_R;					/* 0x810 */
+#define MFC_LSCRR_Q			(1 << 31)
+#define MFC_LSCRR_SPU_QUEUE		MFC_LSCRR_Q
+#define MFC_LSCRR_QI_SHIFT		32
+#define MFC_LSCRR_QI_MASK		((~0ull) << MFC_LSCRR_QI_SHIFT)
+	u8  pad_0x818_0x820[0x8];				/* 0x818 */
+	u64 mfc_tclass_id_RW;					/* 0x820 */
+#define MFC_TCLASS_ID_ENABLE		(1L << 0L)
+#define MFC_TCLASS_SLOT2_ENABLE		(1L << 5L)
+#define MFC_TCLASS_SLOT1_ENABLE		(1L << 6L)
+#define MFC_TCLASS_SLOT0_ENABLE		(1L << 7L)
+#define MFC_TCLASS_QUOTA_2_SHIFT	8L
+#define MFC_TCLASS_QUOTA_1_SHIFT	16L
+#define MFC_TCLASS_QUOTA_0_SHIFT	24L
+#define MFC_TCLASS_QUOTA_2_MASK		(0x1FL << MFC_TCLASS_QUOTA_2_SHIFT)
+#define MFC_TCLASS_QUOTA_1_MASK		(0x1FL << MFC_TCLASS_QUOTA_1_SHIFT)
+#define MFC_TCLASS_QUOTA_0_MASK		(0x1FL << MFC_TCLASS_QUOTA_0_SHIFT)
+	u8  pad_0x828_0x900[0x900 - 0x828];			/* 0x828 */
+
+	/* Real Mode Support Registers */
+	u64 mfc_rm_boundary;					/* 0x900 */
+	u8  pad_0x908_0x938[0x30];				/* 0x908 */
+	u64 smf_dma_signal_sel;					/* 0x938 */
+#define mfc_dma1_mask_lsb	41
+#define mfc_dma1_shift		(63 - mfc_dma1_mask_lsb)
+#define mfc_dma1_mask		(0x3LL << mfc_dma1_shift)
+#define mfc_dma1_bits		(0x1LL << mfc_dma1_shift)
+#define mfc_dma2_mask_lsb	43
+#define mfc_dma2_shift		(63 - mfc_dma2_mask_lsb)
+#define mfc_dma2_mask		(0x3LL << mfc_dma2_shift)
+#define mfc_dma2_bits		(0x1LL << mfc_dma2_shift)
+	u8  pad_0x940_0xa38[0xf8];				/* 0x940 */
+	u64 smm_signal_sel;					/* 0xa38 */
+#define smm_sig_mask_lsb	12
+#define smm_sig_shift		(63 - smm_sig_mask_lsb)
+#define smm_sig_mask		(0x3LL << smm_sig_shift)
+#define smm_sig_bus0_bits	(0x2LL << smm_sig_shift)
+#define smm_sig_bus2_bits	(0x1LL << smm_sig_shift)
+	u8  pad_0xa40_0xc00[0xc00 - 0xa40];			/* 0xa40 */
+
+	/* DMA Command Error Area */
+	u64 mfc_cer_R;						/* 0xc00 */
+#define MFC_CER_Q		(1 << 31)
+#define MFC_CER_SPU_QUEUE	MFC_CER_Q
+	u8  pad_0xc08_0x1000[0x1000 - 0xc08];			/* 0xc08 */
+
+	/* PV1_ImplRegs: Implementation-dependent privileged-state 1 regs */
+	/* DMA Command Error Area */
+	u64 spu_ecc_cntl_RW;					/* 0x1000 */
+#define SPU_ECC_CNTL_E			(1ull << 0ull)
+#define SPU_ECC_CNTL_ENABLE		SPU_ECC_CNTL_E
+#define SPU_ECC_CNTL_DISABLE		(~SPU_ECC_CNTL_E & 1L)
+#define SPU_ECC_CNTL_S			(1ull << 1ull)
+#define SPU_ECC_STOP_AFTER_ERROR	SPU_ECC_CNTL_S
+#define SPU_ECC_CONTINUE_AFTER_ERROR	(~SPU_ECC_CNTL_S & 2L)
+#define SPU_ECC_CNTL_B			(1ull << 2ull)
+#define SPU_ECC_BACKGROUND_ENABLE	SPU_ECC_CNTL_B
+#define SPU_ECC_BACKGROUND_DISABLE	(~SPU_ECC_CNTL_B & 4L)
+#define SPU_ECC_CNTL_I_SHIFT		3ull
+#define SPU_ECC_CNTL_I_MASK		(3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_ALWAYS		(~SPU_ECC_CNTL_I & 12L)
+#define SPU_ECC_WRITE_CORRECTABLE	(1ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_UNCORRECTABLE	(3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_CNTL_D			(1ull << 5ull)
+#define SPU_ECC_DETECTION_ENABLE	SPU_ECC_CNTL_D
+#define SPU_ECC_DETECTION_DISABLE	(~SPU_ECC_CNTL_D & 32L)
+	u64 spu_ecc_stat_RW;					/* 0x1008 */
+#define SPU_ECC_CORRECTED_ERROR		(1ull << 0ul)
+#define SPU_ECC_UNCORRECTED_ERROR	(1ull << 1ul)
+#define SPU_ECC_SCRUB_COMPLETE		(1ull << 2ul)
+#define SPU_ECC_SCRUB_IN_PROGRESS	(1ull << 3ul)
+#define SPU_ECC_INSTRUCTION_ERROR	(1ull << 4ul)
+#define SPU_ECC_DATA_ERROR		(1ull << 5ul)
+#define SPU_ECC_DMA_ERROR		(1ull << 6ul)
+#define SPU_ECC_STATUS_CNT_MASK		(256ull << 8)
+	u64 spu_ecc_addr_RW;					/* 0x1010 */
+	u64 spu_err_mask_RW;					/* 0x1018 */
+#define SPU_ERR_ILLEGAL_INSTR		(1ull << 0ul)
+#define SPU_ERR_ILLEGAL_CHANNEL		(1ull << 1ul)
+	u8  pad_0x1020_0x1028[0x1028 - 0x1020];			/* 0x1020 */
+
+	/* SPU Debug-Trace Bus (DTB) Selection Registers */
+	u64 spu_trig0_sel;					/* 0x1028 */
+	u64 spu_trig1_sel;					/* 0x1030 */
+	u64 spu_trig2_sel;					/* 0x1038 */
+	u64 spu_trig3_sel;					/* 0x1040 */
+	u64 spu_trace_sel;					/* 0x1048 */
+#define spu_trace_sel_mask		0x1f1fLL
+#define spu_trace_sel_bus0_bits		0x1000LL
+#define spu_trace_sel_bus2_bits		0x0010LL
+	u64 spu_event0_sel;					/* 0x1050 */
+	u64 spu_event1_sel;					/* 0x1058 */
+	u64 spu_event2_sel;					/* 0x1060 */
+	u64 spu_event3_sel;					/* 0x1068 */
+	u64 spu_trace_cntl;					/* 0x1070 */
+} __attribute__ ((aligned(0x2000)));
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/spu_csa.h b/arch/powerpc/include/asm/spu_csa.h
new file mode 100644
index 0000000..51f80b4
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_csa.h
@@ -0,0 +1,260 @@
+/*
+ * spu_csa.h: Definitions for SPU context save area (CSA).
+ *
+ * (C) Copyright IBM 2005
+ *
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_CSA_H_
+#define _SPU_CSA_H_
+#ifdef __KERNEL__
+
+/*
+ * Total number of 128-bit registers.
+ */
+#define NR_SPU_GPRS         	128
+#define NR_SPU_SPRS         	9
+#define NR_SPU_REGS_PAD	    	7
+#define NR_SPU_SPILL_REGS   	144	/* GPRS + SPRS + PAD */
+#define SIZEOF_SPU_SPILL_REGS	NR_SPU_SPILL_REGS * 16
+
+#define SPU_SAVE_COMPLETE      	0x3FFB
+#define SPU_RESTORE_COMPLETE   	0x3FFC
+
+/*
+ * Definitions for various 'stopped' status conditions,
+ * to be recreated during context restore.
+ */
+#define SPU_STOPPED_STATUS_P    1
+#define SPU_STOPPED_STATUS_I    2
+#define SPU_STOPPED_STATUS_H    3
+#define SPU_STOPPED_STATUS_S    4
+#define SPU_STOPPED_STATUS_S_I  5
+#define SPU_STOPPED_STATUS_S_P  6
+#define SPU_STOPPED_STATUS_P_H  7
+#define SPU_STOPPED_STATUS_P_I  8
+#define SPU_STOPPED_STATUS_R    9
+
+/*
+ * Definitions for software decrementer status flag.
+ */
+#define SPU_DECR_STATUS_RUNNING 0x1
+#define SPU_DECR_STATUS_WRAPPED 0x2
+
+#ifndef  __ASSEMBLY__
+/**
+ * spu_reg128 - generic 128-bit register definition.
+ */
+struct spu_reg128 {
+	u32 slot[4];
+};
+
+/**
+ * struct spu_lscsa - Local Store Context Save Area.
+ * @gprs: Array of saved registers.
+ * @fpcr: Saved floating point status control register.
+ * @decr: Saved decrementer value.
+ * @decr_status: Indicates software decrementer status flags.
+ * @ppu_mb: Saved PPU mailbox data.
+ * @ppuint_mb: Saved PPU interrupting mailbox data.
+ * @tag_mask: Saved tag group mask.
+ * @event_mask: Saved event mask.
+ * @srr0: Saved SRR0.
+ * @stopped_status: Conditions to be recreated by restore.
+ * @ls: Saved contents of Local Storage Area.
+ *
+ * The LSCSA represents state that is primarily saved and
+ * restored by SPU-side code.
+ */
+struct spu_lscsa {
+	struct spu_reg128 gprs[128];
+	struct spu_reg128 fpcr;
+	struct spu_reg128 decr;
+	struct spu_reg128 decr_status;
+	struct spu_reg128 ppu_mb;
+	struct spu_reg128 ppuint_mb;
+	struct spu_reg128 tag_mask;
+	struct spu_reg128 event_mask;
+	struct spu_reg128 srr0;
+	struct spu_reg128 stopped_status;
+
+	/*
+	 * 'ls' must be page-aligned on all configurations.
+	 * Since we don't want to rely on having the spu-gcc
+	 * installed to build the kernel and this structure
+	 * is used in the SPU-side code, make it 64k-page
+	 * aligned for now.
+	 */
+	unsigned char ls[LS_SIZE] __attribute__((aligned(65536)));
+};
+
+#ifndef __SPU__
+/*
+ * struct spu_problem_collapsed - condensed problem state area, w/o pads.
+ */
+struct spu_problem_collapsed {
+	u64 spc_mssync_RW;
+	u32 mfc_lsa_W;
+	u32 unused_pad0;
+	u64 mfc_ea_W;
+	union mfc_tag_size_class_cmd mfc_union_W;
+	u32 dma_qstatus_R;
+	u32 dma_querytype_RW;
+	u32 dma_querymask_RW;
+	u32 dma_tagstatus_R;
+	u32 pu_mb_R;
+	u32 spu_mb_W;
+	u32 mb_stat_R;
+	u32 spu_runcntl_RW;
+	u32 spu_status_R;
+	u32 spu_spc_R;
+	u32 spu_npc_RW;
+	u32 signal_notify1;
+	u32 signal_notify2;
+	u32 unused_pad1;
+};
+
+/*
+ * struct spu_priv1_collapsed - condensed privileged 1 area, w/o pads.
+ */
+struct spu_priv1_collapsed {
+	u64 mfc_sr1_RW;
+	u64 mfc_lpid_RW;
+	u64 spu_idr_RW;
+	u64 mfc_vr_RO;
+	u64 spu_vr_RO;
+	u64 int_mask_class0_RW;
+	u64 int_mask_class1_RW;
+	u64 int_mask_class2_RW;
+	u64 int_stat_class0_RW;
+	u64 int_stat_class1_RW;
+	u64 int_stat_class2_RW;
+	u64 int_route_RW;
+	u64 mfc_atomic_flush_RW;
+	u64 resource_allocation_groupID_RW;
+	u64 resource_allocation_enable_RW;
+	u64 mfc_fir_R;
+	u64 mfc_fir_status_or_W;
+	u64 mfc_fir_status_and_W;
+	u64 mfc_fir_mask_R;
+	u64 mfc_fir_mask_or_W;
+	u64 mfc_fir_mask_and_W;
+	u64 mfc_fir_chkstp_enable_RW;
+	u64 smf_sbi_signal_sel;
+	u64 smf_ato_signal_sel;
+	u64 tlb_index_hint_RO;
+	u64 tlb_index_W;
+	u64 tlb_vpn_RW;
+	u64 tlb_rpn_RW;
+	u64 tlb_invalidate_entry_W;
+	u64 tlb_invalidate_all_W;
+	u64 smm_hid;
+	u64 mfc_accr_RW;
+	u64 mfc_dsisr_RW;
+	u64 mfc_dar_RW;
+	u64 rmt_index_RW;
+	u64 rmt_data1_RW;
+	u64 mfc_dsir_R;
+	u64 mfc_lsacr_RW;
+	u64 mfc_lscrr_R;
+	u64 mfc_tclass_id_RW;
+	u64 mfc_rm_boundary;
+	u64 smf_dma_signal_sel;
+	u64 smm_signal_sel;
+	u64 mfc_cer_R;
+	u64 pu_ecc_cntl_RW;
+	u64 pu_ecc_stat_RW;
+	u64 spu_ecc_addr_RW;
+	u64 spu_err_mask_RW;
+	u64 spu_trig0_sel;
+	u64 spu_trig1_sel;
+	u64 spu_trig2_sel;
+	u64 spu_trig3_sel;
+	u64 spu_trace_sel;
+	u64 spu_event0_sel;
+	u64 spu_event1_sel;
+	u64 spu_event2_sel;
+	u64 spu_event3_sel;
+	u64 spu_trace_cntl;
+};
+
+/*
+ * struct spu_priv2_collapsed - condensed privileged 2 area, w/o pads.
+ */
+struct spu_priv2_collapsed {
+	u64 slb_index_W;
+	u64 slb_esid_RW;
+	u64 slb_vsid_RW;
+	u64 slb_invalidate_entry_W;
+	u64 slb_invalidate_all_W;
+	struct mfc_cq_sr spuq[16];
+	struct mfc_cq_sr puq[8];
+	u64 mfc_control_RW;
+	u64 puint_mb_R;
+	u64 spu_privcntl_RW;
+	u64 spu_lslr_RW;
+	u64 spu_chnlcntptr_RW;
+	u64 spu_chnlcnt_RW;
+	u64 spu_chnldata_RW;
+	u64 spu_cfg_RW;
+	u64 spu_tag_status_query_RW;
+	u64 spu_cmd_buf1_RW;
+	u64 spu_cmd_buf2_RW;
+	u64 spu_atomic_status_RW;
+};
+
+/**
+ * struct spu_state
+ * @lscsa: Local Store Context Save Area.
+ * @prob: Collapsed Problem State Area, w/o pads.
+ * @priv1: Collapsed Privileged 1 Area, w/o pads.
+ * @priv2: Collapsed Privileged 2 Area, w/o pads.
+ * @spu_chnlcnt_RW: Array of saved channel counts.
+ * @spu_chnldata_RW: Array of saved channel data.
+ * @suspend_time: Time stamp when decrementer disabled.
+ *
+ * Structure representing the whole of the SPU
+ * context save area (CSA).  This struct contains
+ * all of the state necessary to suspend and then
+ * later optionally resume execution of an SPU
+ * context.
+ *
+ * The @lscsa region is by far the largest, and is
+ * allocated separately so that it may either be
+ * pinned or mapped to/from application memory, as
+ * appropriate for the OS environment.
+ */
+struct spu_state {
+	struct spu_lscsa *lscsa;
+	struct spu_problem_collapsed prob;
+	struct spu_priv1_collapsed priv1;
+	struct spu_priv2_collapsed priv2;
+	u64 spu_chnlcnt_RW[32];
+	u64 spu_chnldata_RW[32];
+	u32 spu_mailbox_data[4];
+	u32 pu_mailbox_data[1];
+	u64 class_0_dar, class_0_pending;
+	u64 class_1_dar, class_1_dsisr;
+	unsigned long suspend_time;
+	spinlock_t register_lock;
+};
+
+#endif /* !__SPU__ */
+#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+#endif /* _SPU_CSA_H_ */
diff --git a/arch/powerpc/include/asm/spu_info.h b/arch/powerpc/include/asm/spu_info.h
new file mode 100644
index 0000000..7146b78
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_info.h
@@ -0,0 +1,28 @@
+/*
+ * SPU info structures
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _SPU_INFO_H
+#define _SPU_INFO_H
+
+#include <asm/spu.h>
+#include <uapi/asm/spu_info.h>
+
+#endif
diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h
new file mode 100644
index 0000000..d8f5c60
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_priv1.h
@@ -0,0 +1,236 @@
+/*
+ * Defines an spu hypervisor abstraction layer.
+ *
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_SPU_PRIV1_H)
+#define _SPU_PRIV1_H
+#if defined(__KERNEL__)
+
+#include <linux/types.h>
+
+struct spu;
+struct spu_context;
+
+/* access to priv1 registers */
+
+struct spu_priv1_ops {
+	void (*int_mask_and) (struct spu *spu, int class, u64 mask);
+	void (*int_mask_or) (struct spu *spu, int class, u64 mask);
+	void (*int_mask_set) (struct spu *spu, int class, u64 mask);
+	u64 (*int_mask_get) (struct spu *spu, int class);
+	void (*int_stat_clear) (struct spu *spu, int class, u64 stat);
+	u64 (*int_stat_get) (struct spu *spu, int class);
+	void (*cpu_affinity_set) (struct spu *spu, int cpu);
+	u64 (*mfc_dar_get) (struct spu *spu);
+	u64 (*mfc_dsisr_get) (struct spu *spu);
+	void (*mfc_dsisr_set) (struct spu *spu, u64 dsisr);
+	void (*mfc_sdr_setup) (struct spu *spu);
+	void (*mfc_sr1_set) (struct spu *spu, u64 sr1);
+	u64 (*mfc_sr1_get) (struct spu *spu);
+	void (*mfc_tclass_id_set) (struct spu *spu, u64 tclass_id);
+	u64 (*mfc_tclass_id_get) (struct spu *spu);
+	void (*tlb_invalidate) (struct spu *spu);
+	void (*resource_allocation_groupID_set) (struct spu *spu, u64 id);
+	u64 (*resource_allocation_groupID_get) (struct spu *spu);
+	void (*resource_allocation_enable_set) (struct spu *spu, u64 enable);
+	u64 (*resource_allocation_enable_get) (struct spu *spu);
+};
+
+extern const struct spu_priv1_ops* spu_priv1_ops;
+
+static inline void
+spu_int_mask_and (struct spu *spu, int class, u64 mask)
+{
+	spu_priv1_ops->int_mask_and(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_or (struct spu *spu, int class, u64 mask)
+{
+	spu_priv1_ops->int_mask_or(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_set (struct spu *spu, int class, u64 mask)
+{
+	spu_priv1_ops->int_mask_set(spu, class, mask);
+}
+
+static inline u64
+spu_int_mask_get (struct spu *spu, int class)
+{
+	return spu_priv1_ops->int_mask_get(spu, class);
+}
+
+static inline void
+spu_int_stat_clear (struct spu *spu, int class, u64 stat)
+{
+	spu_priv1_ops->int_stat_clear(spu, class, stat);
+}
+
+static inline u64
+spu_int_stat_get (struct spu *spu, int class)
+{
+	return spu_priv1_ops->int_stat_get (spu, class);
+}
+
+static inline void
+spu_cpu_affinity_set (struct spu *spu, int cpu)
+{
+	spu_priv1_ops->cpu_affinity_set(spu, cpu);
+}
+
+static inline u64
+spu_mfc_dar_get (struct spu *spu)
+{
+	return spu_priv1_ops->mfc_dar_get(spu);
+}
+
+static inline u64
+spu_mfc_dsisr_get (struct spu *spu)
+{
+	return spu_priv1_ops->mfc_dsisr_get(spu);
+}
+
+static inline void
+spu_mfc_dsisr_set (struct spu *spu, u64 dsisr)
+{
+	spu_priv1_ops->mfc_dsisr_set(spu, dsisr);
+}
+
+static inline void
+spu_mfc_sdr_setup (struct spu *spu)
+{
+	spu_priv1_ops->mfc_sdr_setup(spu);
+}
+
+static inline void
+spu_mfc_sr1_set (struct spu *spu, u64 sr1)
+{
+	spu_priv1_ops->mfc_sr1_set(spu, sr1);
+}
+
+static inline u64
+spu_mfc_sr1_get (struct spu *spu)
+{
+	return spu_priv1_ops->mfc_sr1_get(spu);
+}
+
+static inline void
+spu_mfc_tclass_id_set (struct spu *spu, u64 tclass_id)
+{
+	spu_priv1_ops->mfc_tclass_id_set(spu, tclass_id);
+}
+
+static inline u64
+spu_mfc_tclass_id_get (struct spu *spu)
+{
+	return spu_priv1_ops->mfc_tclass_id_get(spu);
+}
+
+static inline void
+spu_tlb_invalidate (struct spu *spu)
+{
+	spu_priv1_ops->tlb_invalidate(spu);
+}
+
+static inline void
+spu_resource_allocation_groupID_set (struct spu *spu, u64 id)
+{
+	spu_priv1_ops->resource_allocation_groupID_set(spu, id);
+}
+
+static inline u64
+spu_resource_allocation_groupID_get (struct spu *spu)
+{
+	return spu_priv1_ops->resource_allocation_groupID_get(spu);
+}
+
+static inline void
+spu_resource_allocation_enable_set (struct spu *spu, u64 enable)
+{
+	spu_priv1_ops->resource_allocation_enable_set(spu, enable);
+}
+
+static inline u64
+spu_resource_allocation_enable_get (struct spu *spu)
+{
+	return spu_priv1_ops->resource_allocation_enable_get(spu);
+}
+
+/* spu management abstraction */
+
+struct spu_management_ops {
+	int (*enumerate_spus)(int (*fn)(void *data));
+	int (*create_spu)(struct spu *spu, void *data);
+	int (*destroy_spu)(struct spu *spu);
+	void (*enable_spu)(struct spu_context *ctx);
+	void (*disable_spu)(struct spu_context *ctx);
+	int (*init_affinity)(void);
+};
+
+extern const struct spu_management_ops* spu_management_ops;
+
+static inline int
+spu_enumerate_spus (int (*fn)(void *data))
+{
+	return spu_management_ops->enumerate_spus(fn);
+}
+
+static inline int
+spu_create_spu (struct spu *spu, void *data)
+{
+	return spu_management_ops->create_spu(spu, data);
+}
+
+static inline int
+spu_destroy_spu (struct spu *spu)
+{
+	return spu_management_ops->destroy_spu(spu);
+}
+
+static inline int
+spu_init_affinity (void)
+{
+	return spu_management_ops->init_affinity();
+}
+
+static inline void
+spu_enable_spu (struct spu_context *ctx)
+{
+	spu_management_ops->enable_spu(ctx);
+}
+
+static inline void
+spu_disable_spu (struct spu_context *ctx)
+{
+	spu_management_ops->disable_spu(ctx);
+}
+
+/*
+ * The declarations following are put here for convenience
+ * and only intended to be used by the platform setup code.
+ */
+
+extern const struct spu_priv1_ops spu_priv1_mmio_ops;
+extern const struct spu_priv1_ops spu_priv1_beat_ops;
+
+extern const struct spu_management_ops spu_management_of_ops;
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
new file mode 100644
index 0000000..4547891
--- /dev/null
+++ b/arch/powerpc/include/asm/sstep.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct pt_regs;
+
+/*
+ * We don't allow single-stepping an mtmsrd that would clear
+ * MSR_RI, since that would make the exception unrecoverable.
+ * Since we need to single-step to proceed from a breakpoint,
+ * we don't allow putting a breakpoint on an mtmsrd instruction.
+ * Similarly we don't allow breakpoints on rfid instructions.
+ * These macros tell us if an instruction is a mtmsrd or rfid.
+ * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
+ * and an mtmsrd (64-bit).
+ */
+#define IS_MTMSRD(instr)	(((instr) & 0xfc0007be) == 0x7c000124)
+#define IS_RFID(instr)		(((instr) & 0xfc0007fe) == 0x4c000024)
+#define IS_RFI(instr)		(((instr) & 0xfc0007fe) == 0x4c000064)
+
+enum instruction_type {
+	COMPUTE,		/* arith/logical/CR op, etc. */
+	LOAD,			/* load and store types need to be contiguous */
+	LOAD_MULTI,
+	LOAD_FP,
+	LOAD_VMX,
+	LOAD_VSX,
+	STORE,
+	STORE_MULTI,
+	STORE_FP,
+	STORE_VMX,
+	STORE_VSX,
+	LARX,
+	STCX,
+	BRANCH,
+	MFSPR,
+	MTSPR,
+	CACHEOP,
+	BARRIER,
+	SYSCALL,
+	MFMSR,
+	MTMSR,
+	RFI,
+	INTERRUPT,
+	UNKNOWN
+};
+
+#define INSTR_TYPE_MASK	0x1f
+
+#define OP_IS_LOAD_STORE(type)	(LOAD <= (type) && (type) <= STCX)
+
+/* Compute flags, ORed in with type */
+#define SETREG		0x20
+#define SETCC		0x40
+#define SETXER		0x80
+
+/* Branch flags, ORed in with type */
+#define SETLK		0x20
+#define BRTAKEN		0x40
+#define DECCTR		0x80
+
+/* Load/store flags, ORed in with type */
+#define SIGNEXT		0x20
+#define UPDATE		0x40	/* matches bit in opcode 31 instructions */
+#define BYTEREV		0x80
+#define FPCONV		0x100
+
+/* Barrier type field, ORed in with type */
+#define BARRIER_MASK	0xe0
+#define BARRIER_SYNC	0x00
+#define BARRIER_ISYNC	0x20
+#define BARRIER_EIEIO	0x40
+#define BARRIER_LWSYNC	0x60
+#define BARRIER_PTESYNC	0x80
+
+/* Cacheop values, ORed in with type */
+#define CACHEOP_MASK	0x700
+#define DCBST		0
+#define DCBF		0x100
+#define DCBTST		0x200
+#define DCBT		0x300
+#define ICBI		0x400
+#define DCBZ		0x500
+
+/* VSX flags values */
+#define VSX_FPCONV	1	/* do floating point SP/DP conversion */
+#define VSX_SPLAT	2	/* store loaded value into all elements */
+#define VSX_LDLEFT	4	/* load VSX register from left */
+#define VSX_CHECK_VEC	8	/* check MSR_VEC not MSR_VSX for reg >= 32 */
+
+/* Size field in type word */
+#define SIZE(n)		((n) << 12)
+#define GETSIZE(w)	((w) >> 12)
+
+#define GETTYPE(t)	((t) & INSTR_TYPE_MASK)
+
+#define MKOP(t, f, s)	((t) | (f) | SIZE(s))
+
+struct instruction_op {
+	int type;
+	int reg;
+	unsigned long val;
+	/* For LOAD/STORE/LARX/STCX */
+	unsigned long ea;
+	int update_reg;
+	/* For MFSPR */
+	int spr;
+	u32 ccval;
+	u32 xerval;
+	u8 element_size;	/* for VSX/VMX loads/stores */
+	u8 vsx_flags;
+};
+
+union vsx_reg {
+	u8	b[16];
+	u16	h[8];
+	u32	w[4];
+	unsigned long d[2];
+	float	fp[4];
+	double	dp[2];
+	__vector128 v;
+};
+
+/*
+ * Decode an instruction, and return information about it in *op
+ * without changing *regs.
+ *
+ * Return value is 1 if the instruction can be emulated just by
+ * updating *regs with the information in *op, -1 if we need the
+ * GPRs but *regs doesn't contain the full register set, or 0
+ * otherwise.
+ */
+extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+			 unsigned int instr);
+
+/*
+ * Emulate an instruction that can be executed just by updating
+ * fields in *regs.
+ */
+void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
+
+/*
+ * Emulate instructions that cause a transfer of control,
+ * arithmetic/logical instructions, loads and stores,
+ * cache operations and barriers.
+ *
+ * Returns 1 if the instruction was emulated successfully,
+ * 0 if it could not be emulated, or -1 for an instruction that
+ * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
+ */
+extern int emulate_step(struct pt_regs *regs, unsigned int instr);
+
+/*
+ * Emulate a load or store instruction by reading/writing the
+ * memory of the current process.  FP/VMX/VSX registers are assumed
+ * to hold live values if the appropriate enable bit in regs->msr is
+ * set; otherwise this will use the saved values in the thread struct
+ * for user-mode accesses.
+ */
+extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
+
+extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+			     const void *mem, bool cross_endian);
+extern void emulate_vsx_store(struct instruction_op *op,
+			      const union vsx_reg *reg, void *mem,
+			      bool cross_endian);
+extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/stacktrace.h b/arch/powerpc/include/asm/stacktrace.h
new file mode 100644
index 0000000..6149b53
--- /dev/null
+++ b/arch/powerpc/include/asm/stacktrace.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Stack trace functions.
+ *
+ * Copyright 2018, Murilo Opsfelder Araujo, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_STACKTRACE_H
+#define _ASM_POWERPC_STACKTRACE_H
+
+void show_user_instructions(struct pt_regs *regs);
+
+#endif /* _ASM_POWERPC_STACKTRACE_H */
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
new file mode 100644
index 0000000..1647de1
--- /dev/null
+++ b/arch/powerpc/include/asm/string.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_STRING_H
+#define _ASM_POWERPC_STRING_H
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_STRNCPY
+#define __HAVE_ARCH_STRNCMP
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_MEMCHR
+#define __HAVE_ARCH_MEMSET16
+#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
+
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *, __kernel_size_t);
+extern __kernel_size_t strlen(const char *);
+extern int strcmp(const char *,const char *);
+extern int strncmp(const char *, const char *, __kernel_size_t);
+extern char * strcat(char *, const char *);
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * memmove(void *,const void *,__kernel_size_t);
+extern int memcmp(const void *,const void *,__kernel_size_t);
+extern void * memchr(const void *,int,__kernel_size_t);
+extern void * memcpy_flushcache(void *,const void *,__kernel_size_t);
+
+#ifdef CONFIG_PPC64
+#define __HAVE_ARCH_MEMSET32
+#define __HAVE_ARCH_MEMSET64
+
+extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
+extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
+extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
+
+static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n)
+{
+	return __memset16(p, v, n * 2);
+}
+
+static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+{
+	return __memset32(p, v, n * 4);
+}
+
+static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+{
+	return __memset64(p, v, n * 8);
+}
+#else
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
+#endif
+#endif /* __KERNEL__ */
+
+#endif	/* _ASM_POWERPC_STRING_H */
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
new file mode 100644
index 0000000..487e090
--- /dev/null
+++ b/arch/powerpc/include/asm/swab.h
@@ -0,0 +1,12 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_SWAB_H
+#define _ASM_POWERPC_SWAB_H
+
+#include <uapi/asm/swab.h>
+
+#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
new file mode 100644
index 0000000..f65ecf5
--- /dev/null
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __ASM_SWIOTLB_H
+#define __ASM_SWIOTLB_H
+
+#include <linux/swiotlb.h>
+
+extern const struct dma_map_ops powerpc_swiotlb_dma_ops;
+
+extern unsigned int ppc_swiotlb_enable;
+int __init swiotlb_setup_bus_notifier(void);
+
+extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev);
+
+#ifdef CONFIG_SWIOTLB
+void swiotlb_detect_4g(void);
+#else
+static inline void swiotlb_detect_4g(void) {}
+#endif
+
+#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
new file mode 100644
index 0000000..5b03d8a
--- /dev/null
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_SWITCH_TO_H
+#define _ASM_POWERPC_SWITCH_TO_H
+
+#include <asm/reg.h>
+
+struct thread_struct;
+struct task_struct;
+struct pt_regs;
+
+extern struct task_struct *__switch_to(struct task_struct *,
+	struct task_struct *);
+#define switch_to(prev, next, last)	((last) = __switch_to((prev), (next)))
+
+extern struct task_struct *_switch(struct thread_struct *prev,
+				   struct thread_struct *next);
+
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
+
+extern int emulate_altivec(struct pt_regs *);
+
+extern void flush_all_to_thread(struct task_struct *);
+extern void giveup_all(struct task_struct *);
+
+#ifdef CONFIG_PPC_FPU
+extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
+extern void giveup_fpu(struct task_struct *);
+extern void save_fpu(struct task_struct *);
+static inline void disable_kernel_fp(void)
+{
+	msr_check_and_clear(MSR_FP);
+}
+#else
+static inline void save_fpu(struct task_struct *t) { }
+static inline void flush_fp_to_thread(struct task_struct *t) { }
+#endif
+
+#ifdef CONFIG_ALTIVEC
+extern void enable_kernel_altivec(void);
+extern void flush_altivec_to_thread(struct task_struct *);
+extern void giveup_altivec(struct task_struct *);
+extern void save_altivec(struct task_struct *);
+static inline void disable_kernel_altivec(void)
+{
+	msr_check_and_clear(MSR_VEC);
+}
+#else
+static inline void save_altivec(struct task_struct *t) { }
+static inline void __giveup_altivec(struct task_struct *t) { }
+#endif
+
+#ifdef CONFIG_VSX
+extern void enable_kernel_vsx(void);
+extern void flush_vsx_to_thread(struct task_struct *);
+static inline void disable_kernel_vsx(void)
+{
+	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void enable_kernel_spe(void);
+extern void flush_spe_to_thread(struct task_struct *);
+extern void giveup_spe(struct task_struct *);
+extern void __giveup_spe(struct task_struct *);
+static inline void disable_kernel_spe(void)
+{
+	msr_check_and_clear(MSR_SPE);
+}
+#else
+static inline void __giveup_spe(struct task_struct *t) { }
+#endif
+
+static inline void clear_task_ebb(struct task_struct *t)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+    /* EBB perf events are not inherited, so clear all EBB state. */
+    t->thread.ebbrr = 0;
+    t->thread.ebbhr = 0;
+    t->thread.bescr = 0;
+    t->thread.mmcr2 = 0;
+    t->thread.mmcr0 = 0;
+    t->thread.siar = 0;
+    t->thread.sdar = 0;
+    t->thread.sier = 0;
+    t->thread.used_ebb = 0;
+#endif
+}
+
+extern int set_thread_uses_vas(void);
+
+extern int set_thread_tidr(struct task_struct *t);
+
+#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
new file mode 100644
index 0000000..aca70fb
--- /dev/null
+++ b/arch/powerpc/include/asm/synch.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SYNCH_H 
+#define _ASM_POWERPC_SYNCH_H 
+#ifdef __KERNEL__
+
+#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
+
+#ifndef __ASSEMBLY__
+extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+			     void *fixup_end);
+
+static inline void eieio(void)
+{
+	__asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+	__asm__ __volatile__ ("isync" : : : "memory");
+}
+#endif /* __ASSEMBLY__ */
+
+#if defined(__powerpc64__)
+#    define LWSYNC	lwsync
+#elif defined(CONFIG_E500)
+#    define LWSYNC					\
+	START_LWSYNC_SECTION(96);			\
+	sync;						\
+	MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
+#else
+#    define LWSYNC	sync
+#endif
+
+#ifdef CONFIG_SMP
+#define __PPC_ACQUIRE_BARRIER				\
+	START_LWSYNC_SECTION(97);			\
+	isync;						\
+	MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
+#define PPC_ACQUIRE_BARRIER	 "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+#define PPC_RELEASE_BARRIER	 stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
+#define PPC_ATOMIC_EXIT_BARRIER	 "\n" stringify_in_c(sync) "\n"
+#else
+#define PPC_ACQUIRE_BARRIER
+#define PPC_RELEASE_BARRIER
+#define PPC_ATOMIC_ENTRY_BARRIER
+#define PPC_ATOMIC_EXIT_BARRIER
+#endif
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_SYNCH_H */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
new file mode 100644
index 0000000..ab9f3f0
--- /dev/null
+++ b/arch/powerpc/include/asm/syscall.h
@@ -0,0 +1,111 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H	1
+
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+
+/* ftrace syscalls requires exporting the sys_call_table */
+#ifdef CONFIG_FTRACE_SYSCALLS
+extern const unsigned long sys_call_table[];
+#endif /* CONFIG_FTRACE_SYSCALLS */
+
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+	/*
+	 * Note that we are returning an int here. That means 0xffffffff, ie.
+	 * 32-bit negative 1, will be interpreted as -1 on a 64-bit kernel.
+	 * This is important for seccomp so that compat tasks can set r0 = -1
+	 * to reject the syscall.
+	 */
+	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	regs->gpr[3] = regs->orig_gpr3;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+					    struct pt_regs *regs)
+{
+	return regs->gpr[3];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	/*
+	 * In the general case it's not obvious that we must deal with CCR
+	 * here, as the syscall exit path will also do that for us. However
+	 * there are some places, eg. the signal code, which check ccr to
+	 * decide if the value in r3 is actually an error.
+	 */
+	if (error) {
+		regs->ccr |= 0x10000000L;
+		regs->gpr[3] = error;
+	} else {
+		regs->ccr &= ~0x10000000L;
+		regs->gpr[3] = val;
+	}
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+	unsigned long val, mask = -1UL;
+
+	BUG_ON(i + n > 6);
+
+#ifdef CONFIG_COMPAT
+	if (test_tsk_thread_flag(task, TIF_32BIT))
+		mask = 0xffffffff;
+#endif
+	while (n--) {
+		if (n == 0 && i == 0)
+			val = regs->orig_gpr3;
+		else
+			val = regs->gpr[3 + i + n];
+
+		args[n] = val & mask;
+	}
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 const unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+	memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+
+	/* Also copy the first argument into orig_gpr3 */
+	if (i == 0 && n > 0)
+		regs->orig_gpr3 = args[0];
+}
+
+static inline int syscall_get_arch(void)
+{
+	int arch = is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+#ifdef __LITTLE_ENDIAN__
+	arch |= __AUDIT_ARCH_LE;
+#endif
+	return arch;
+}
+#endif	/* _ASM_SYSCALL_H */
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
new file mode 100644
index 0000000..398171f
--- /dev/null
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_SYSCALLS_H
+#define __ASM_POWERPC_SYSCALLS_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct rtas_args;
+
+asmlinkage long sys_mmap(unsigned long addr, size_t len,
+		unsigned long prot, unsigned long flags,
+		unsigned long fd, off_t offset);
+asmlinkage long sys_mmap2(unsigned long addr, size_t len,
+		unsigned long prot, unsigned long flags,
+		unsigned long fd, unsigned long pgoff);
+asmlinkage long ppc64_personality(unsigned long personality);
+asmlinkage long sys_rtas(struct rtas_args __user *uargs);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
new file mode 100644
index 0000000..01b5171
--- /dev/null
+++ b/arch/powerpc/include/asm/systbl.h
@@ -0,0 +1,396 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * List of powerpc syscalls. For the meaning of the _SPU suffix see
+ * arch/powerpc/platforms/cell/spu_callbacks.c
+ */
+
+SYSCALL(restart_syscall)
+SYSCALL(exit)
+PPC_SYS(fork)
+SYSCALL_SPU(read)
+SYSCALL_SPU(write)
+COMPAT_SYS_SPU(open)
+SYSCALL_SPU(close)
+SYSCALL_SPU(waitpid)
+SYSCALL_SPU(creat)
+SYSCALL_SPU(link)
+SYSCALL_SPU(unlink)
+COMPAT_SYS(execve)
+SYSCALL_SPU(chdir)
+COMPAT_SYS_SPU(time)
+SYSCALL_SPU(mknod)
+SYSCALL_SPU(chmod)
+SYSCALL_SPU(lchown)
+SYSCALL(ni_syscall)
+OLDSYS(stat)
+COMPAT_SYS_SPU(lseek)
+SYSCALL_SPU(getpid)
+COMPAT_SYS(mount)
+SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
+SYSCALL_SPU(setuid)
+SYSCALL_SPU(getuid)
+COMPAT_SYS_SPU(stime)
+COMPAT_SYS(ptrace)
+SYSCALL_SPU(alarm)
+OLDSYS(fstat)
+SYSCALL(pause)
+COMPAT_SYS(utime)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(access)
+SYSCALL_SPU(nice)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(sync)
+SYSCALL_SPU(kill)
+SYSCALL_SPU(rename)
+SYSCALL_SPU(mkdir)
+SYSCALL_SPU(rmdir)
+SYSCALL_SPU(dup)
+SYSCALL_SPU(pipe)
+COMPAT_SYS_SPU(times)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(brk)
+SYSCALL_SPU(setgid)
+SYSCALL_SPU(getgid)
+SYSCALL(signal)
+SYSCALL_SPU(geteuid)
+SYSCALL_SPU(getegid)
+SYSCALL(acct)
+SYSCALL(umount)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(ioctl)
+COMPAT_SYS_SPU(fcntl)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(setpgid)
+SYSCALL(ni_syscall)
+SYSX(sys_ni_syscall,sys_olduname,sys_olduname)
+SYSCALL_SPU(umask)
+SYSCALL_SPU(chroot)
+COMPAT_SYS(ustat)
+SYSCALL_SPU(dup2)
+SYSCALL_SPU(getppid)
+SYSCALL_SPU(getpgrp)
+SYSCALL_SPU(setsid)
+SYS32ONLY(sigaction)
+SYSCALL_SPU(sgetmask)
+SYSCALL_SPU(ssetmask)
+SYSCALL_SPU(setreuid)
+SYSCALL_SPU(setregid)
+#define compat_sys_sigsuspend sys_sigsuspend
+SYS32ONLY(sigsuspend)
+SYSX(sys_ni_syscall,compat_sys_sigpending,sys_sigpending)
+SYSCALL_SPU(sethostname)
+COMPAT_SYS_SPU(setrlimit)
+SYSX(sys_ni_syscall,compat_sys_old_getrlimit,sys_old_getrlimit)
+COMPAT_SYS_SPU(getrusage)
+COMPAT_SYS_SPU(gettimeofday)
+COMPAT_SYS_SPU(settimeofday)
+SYSCALL_SPU(getgroups)
+SYSCALL_SPU(setgroups)
+SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
+SYSCALL_SPU(symlink)
+OLDSYS(lstat)
+SYSCALL_SPU(readlink)
+SYSCALL(uselib)
+SYSCALL(swapon)
+SYSCALL(reboot)
+SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir)
+SYSCALL_SPU(mmap)
+SYSCALL_SPU(munmap)
+COMPAT_SYS_SPU(truncate)
+COMPAT_SYS_SPU(ftruncate)
+SYSCALL_SPU(fchmod)
+SYSCALL_SPU(fchown)
+SYSCALL_SPU(getpriority)
+SYSCALL_SPU(setpriority)
+SYSCALL(ni_syscall)
+COMPAT_SYS(statfs)
+COMPAT_SYS(fstatfs)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(socketcall)
+SYSCALL_SPU(syslog)
+COMPAT_SYS_SPU(setitimer)
+COMPAT_SYS_SPU(getitimer)
+COMPAT_SYS_SPU(newstat)
+COMPAT_SYS_SPU(newlstat)
+COMPAT_SYS_SPU(newfstat)
+SYSX(sys_ni_syscall,sys_uname,sys_uname)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(vhangup)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(wait4)
+SYSCALL(swapoff)
+COMPAT_SYS_SPU(sysinfo)
+COMPAT_SYS(ipc)
+SYSCALL_SPU(fsync)
+SYS32ONLY(sigreturn)
+PPC_SYS(clone)
+SYSCALL_SPU(setdomainname)
+SYSCALL_SPU(newuname)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(adjtimex)
+SYSCALL_SPU(mprotect)
+SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
+SYSCALL(ni_syscall)
+SYSCALL(init_module)
+SYSCALL(delete_module)
+SYSCALL(ni_syscall)
+SYSCALL(quotactl)
+SYSCALL_SPU(getpgid)
+SYSCALL_SPU(fchdir)
+SYSCALL_SPU(bdflush)
+SYSCALL_SPU(sysfs)
+SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(setfsuid)
+SYSCALL_SPU(setfsgid)
+SYSCALL_SPU(llseek)
+COMPAT_SYS_SPU(getdents)
+COMPAT_SPU_NEW(select)
+SYSCALL_SPU(flock)
+SYSCALL_SPU(msync)
+COMPAT_SYS_SPU(readv)
+COMPAT_SYS_SPU(writev)
+SYSCALL_SPU(getsid)
+SYSCALL_SPU(fdatasync)
+COMPAT_SYS(sysctl)
+SYSCALL_SPU(mlock)
+SYSCALL_SPU(munlock)
+SYSCALL_SPU(mlockall)
+SYSCALL_SPU(munlockall)
+SYSCALL_SPU(sched_setparam)
+SYSCALL_SPU(sched_getparam)
+SYSCALL_SPU(sched_setscheduler)
+SYSCALL_SPU(sched_getscheduler)
+SYSCALL_SPU(sched_yield)
+SYSCALL_SPU(sched_get_priority_max)
+SYSCALL_SPU(sched_get_priority_min)
+COMPAT_SYS_SPU(sched_rr_get_interval)
+COMPAT_SYS_SPU(nanosleep)
+SYSCALL_SPU(mremap)
+SYSCALL_SPU(setresuid)
+SYSCALL_SPU(getresuid)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(poll)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(setresgid)
+SYSCALL_SPU(getresgid)
+SYSCALL_SPU(prctl)
+COMPAT_SYS(rt_sigreturn)
+COMPAT_SYS(rt_sigaction)
+COMPAT_SYS(rt_sigprocmask)
+COMPAT_SYS(rt_sigpending)
+COMPAT_SYS(rt_sigtimedwait)
+COMPAT_SYS(rt_sigqueueinfo)
+COMPAT_SYS(rt_sigsuspend)
+COMPAT_SYS_SPU(pread64)
+COMPAT_SYS_SPU(pwrite64)
+SYSCALL_SPU(chown)
+SYSCALL_SPU(getcwd)
+SYSCALL_SPU(capget)
+SYSCALL_SPU(capset)
+COMPAT_SYS(sigaltstack)
+SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+PPC_SYS(vfork)
+COMPAT_SYS_SPU(getrlimit)
+COMPAT_SYS_SPU(readahead)
+SYS32ONLY(mmap2)
+SYS32ONLY(truncate64)
+SYS32ONLY(ftruncate64)
+SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
+SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
+SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
+SYSCALL(pciconfig_read)
+SYSCALL(pciconfig_write)
+SYSCALL(pciconfig_iobase)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(getdents64)
+SYSCALL_SPU(pivot_root)
+SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
+SYSCALL_SPU(madvise)
+SYSCALL_SPU(mincore)
+SYSCALL_SPU(gettid)
+SYSCALL_SPU(tkill)
+SYSCALL_SPU(setxattr)
+SYSCALL_SPU(lsetxattr)
+SYSCALL_SPU(fsetxattr)
+SYSCALL_SPU(getxattr)
+SYSCALL_SPU(lgetxattr)
+SYSCALL_SPU(fgetxattr)
+SYSCALL_SPU(listxattr)
+SYSCALL_SPU(llistxattr)
+SYSCALL_SPU(flistxattr)
+SYSCALL_SPU(removexattr)
+SYSCALL_SPU(lremovexattr)
+SYSCALL_SPU(fremovexattr)
+COMPAT_SYS_SPU(futex)
+COMPAT_SYS_SPU(sched_setaffinity)
+COMPAT_SYS_SPU(sched_getaffinity)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYS32ONLY(sendfile64)
+COMPAT_SYS_SPU(io_setup)
+SYSCALL_SPU(io_destroy)
+COMPAT_SYS_SPU(io_getevents)
+COMPAT_SYS_SPU(io_submit)
+SYSCALL_SPU(io_cancel)
+SYSCALL(set_tid_address)
+SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
+SYSCALL(exit_group)
+COMPAT_SYS(lookup_dcookie)
+SYSCALL_SPU(epoll_create)
+SYSCALL_SPU(epoll_ctl)
+SYSCALL_SPU(epoll_wait)
+SYSCALL_SPU(remap_file_pages)
+COMPAT_SYS_SPU(timer_create)
+COMPAT_SYS_SPU(timer_settime)
+COMPAT_SYS_SPU(timer_gettime)
+SYSCALL_SPU(timer_getoverrun)
+SYSCALL_SPU(timer_delete)
+COMPAT_SYS_SPU(clock_settime)
+COMPAT_SYS_SPU(clock_gettime)
+COMPAT_SYS_SPU(clock_getres)
+COMPAT_SYS_SPU(clock_nanosleep)
+SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
+SYSCALL_SPU(tgkill)
+COMPAT_SYS_SPU(utimes)
+COMPAT_SYS_SPU(statfs64)
+COMPAT_SYS_SPU(fstatfs64)
+SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64)
+SYSCALL_SPU(rtas)
+OLDSYS(debug_setcontext)
+SYSCALL(ni_syscall)
+COMPAT_SYS(migrate_pages)
+COMPAT_SYS(mbind)
+COMPAT_SYS(get_mempolicy)
+COMPAT_SYS(set_mempolicy)
+COMPAT_SYS(mq_open)
+SYSCALL(mq_unlink)
+COMPAT_SYS(mq_timedsend)
+COMPAT_SYS(mq_timedreceive)
+COMPAT_SYS(mq_notify)
+COMPAT_SYS(mq_getsetattr)
+COMPAT_SYS(kexec_load)
+SYSCALL(add_key)
+SYSCALL(request_key)
+COMPAT_SYS(keyctl)
+COMPAT_SYS(waitid)
+SYSCALL(ioprio_set)
+SYSCALL(ioprio_get)
+SYSCALL(inotify_init)
+SYSCALL(inotify_add_watch)
+SYSCALL(inotify_rm_watch)
+SYSCALL(spu_run)
+SYSCALL(spu_create)
+COMPAT_SYS(pselect6)
+COMPAT_SYS(ppoll)
+SYSCALL_SPU(unshare)
+SYSCALL_SPU(splice)
+SYSCALL_SPU(tee)
+COMPAT_SYS_SPU(vmsplice)
+COMPAT_SYS_SPU(openat)
+SYSCALL_SPU(mkdirat)
+SYSCALL_SPU(mknodat)
+SYSCALL_SPU(fchownat)
+COMPAT_SYS_SPU(futimesat)
+SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64)
+SYSCALL_SPU(unlinkat)
+SYSCALL_SPU(renameat)
+SYSCALL_SPU(linkat)
+SYSCALL_SPU(symlinkat)
+SYSCALL_SPU(readlinkat)
+SYSCALL_SPU(fchmodat)
+SYSCALL_SPU(faccessat)
+COMPAT_SYS_SPU(get_robust_list)
+COMPAT_SYS_SPU(set_robust_list)
+COMPAT_SYS_SPU(move_pages)
+SYSCALL_SPU(getcpu)
+COMPAT_SYS(epoll_pwait)
+COMPAT_SYS_SPU(utimensat)
+COMPAT_SYS_SPU(signalfd)
+SYSCALL_SPU(timerfd_create)
+SYSCALL_SPU(eventfd)
+COMPAT_SYS_SPU(sync_file_range2)
+COMPAT_SYS(fallocate)
+SYSCALL(subpage_prot)
+COMPAT_SYS_SPU(timerfd_settime)
+COMPAT_SYS_SPU(timerfd_gettime)
+COMPAT_SYS_SPU(signalfd4)
+SYSCALL_SPU(eventfd2)
+SYSCALL_SPU(epoll_create1)
+SYSCALL_SPU(dup3)
+SYSCALL_SPU(pipe2)
+SYSCALL(inotify_init1)
+SYSCALL_SPU(perf_event_open)
+COMPAT_SYS_SPU(preadv)
+COMPAT_SYS_SPU(pwritev)
+COMPAT_SYS(rt_tgsigqueueinfo)
+SYSCALL(fanotify_init)
+COMPAT_SYS(fanotify_mark)
+SYSCALL_SPU(prlimit64)
+SYSCALL_SPU(socket)
+SYSCALL_SPU(bind)
+SYSCALL_SPU(connect)
+SYSCALL_SPU(listen)
+SYSCALL_SPU(accept)
+SYSCALL_SPU(getsockname)
+SYSCALL_SPU(getpeername)
+SYSCALL_SPU(socketpair)
+SYSCALL_SPU(send)
+SYSCALL_SPU(sendto)
+COMPAT_SYS_SPU(recv)
+COMPAT_SYS_SPU(recvfrom)
+SYSCALL_SPU(shutdown)
+COMPAT_SYS_SPU(setsockopt)
+COMPAT_SYS_SPU(getsockopt)
+COMPAT_SYS_SPU(sendmsg)
+COMPAT_SYS_SPU(recvmsg)
+COMPAT_SYS_SPU(recvmmsg)
+SYSCALL_SPU(accept4)
+SYSCALL_SPU(name_to_handle_at)
+COMPAT_SYS_SPU(open_by_handle_at)
+COMPAT_SYS_SPU(clock_adjtime)
+SYSCALL_SPU(syncfs)
+COMPAT_SYS_SPU(sendmmsg)
+SYSCALL_SPU(setns)
+COMPAT_SYS(process_vm_readv)
+COMPAT_SYS(process_vm_writev)
+SYSCALL(finit_module)
+SYSCALL(kcmp) /* sys_kcmp */
+SYSCALL_SPU(sched_setattr)
+SYSCALL_SPU(sched_getattr)
+SYSCALL_SPU(renameat2)
+SYSCALL_SPU(seccomp)
+SYSCALL_SPU(getrandom)
+SYSCALL_SPU(memfd_create)
+SYSCALL_SPU(bpf)
+COMPAT_SYS(execveat)
+PPC64ONLY(switch_endian)
+SYSCALL_SPU(userfaultfd)
+SYSCALL_SPU(membarrier)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(mlock2)
+SYSCALL(copy_file_range)
+COMPAT_SYS_SPU(preadv2)
+COMPAT_SYS_SPU(pwritev2)
+SYSCALL(kexec_file_load)
+SYSCALL(statx)
+SYSCALL(pkey_alloc)
+SYSCALL(pkey_free)
+SYSCALL(pkey_mprotect)
+SYSCALL(rseq)
+COMPAT_SYS(io_pgetevents)
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
new file mode 100644
index 0000000..12e3629
--- /dev/null
+++ b/arch/powerpc/include/asm/tce.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_TCE_H
+#define _ASM_POWERPC_TCE_H
+#ifdef __KERNEL__
+
+#include <asm/iommu.h>
+
+/*
+ * Tces come in two formats, one for the virtual bus and a different
+ * format for PCI.  PCI TCEs can have hardware or software maintianed
+ * coherency.
+ */
+#define TCE_VB			0
+#define TCE_PCI			1
+
+/* TCE page size is 4096 bytes (1 << 12) */
+
+#define TCE_SHIFT	12
+#define TCE_PAGE_SIZE	(1 << TCE_SHIFT)
+
+#define TCE_ENTRY_SIZE		8		/* each TCE is 64 bits */
+
+#define TCE_RPN_MASK		0xfffffffffful  /* 40-bit RPN (4K pages) */
+#define TCE_RPN_SHIFT		12
+#define TCE_VALID		0x800		/* TCE valid */
+#define TCE_ALLIO		0x400		/* TCE valid for all lpars */
+#define TCE_PCI_WRITE		0x2		/* write from PCI allowed */
+#define TCE_PCI_READ		0x1		/* read from PCI allowed */
+#define TCE_VB_WRITE		0x1		/* write from VB allowed */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TCE_H */
diff --git a/arch/powerpc/include/asm/termios.h b/arch/powerpc/include/asm/termios.h
new file mode 100644
index 0000000..b8353e2
--- /dev/null
+++ b/arch/powerpc/include/asm/termios.h
@@ -0,0 +1,22 @@
+/*
+ * Liberally adapted from alpha/termios.h.  In particular, the c_cc[]
+ * fields have been reordered so that termio & termios share the
+ * common subset in the same order (for brain dead programs that don't
+ * know or care about the differences).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_TERMIOS_H
+#define _ASM_POWERPC_TERMIOS_H
+
+#include <uapi/asm/termios.h>
+
+/*                   ^C  ^\ del  ^U  ^D   1   0   0   0   0  ^W  ^R  ^Z  ^Q  ^S  ^V  ^U  */
+#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" 
+
+#include <asm-generic/termios-base.h>
+
+#endif	/* _ASM_POWERPC_TERMIOS_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
new file mode 100644
index 0000000..3c00020
--- /dev/null
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* thread_info.h: PowerPC low-level thread information
+ * adapted from the i386 version by Paul Mackerras
+ *
+ * Copyright (C) 2002  David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_POWERPC_THREAD_INFO_H
+#define _ASM_POWERPC_THREAD_INFO_H
+
+#include <asm/asm-const.h>
+
+#ifdef __KERNEL__
+
+#define THREAD_SHIFT		CONFIG_THREAD_SHIFT
+
+#define THREAD_SIZE		(1 << THREAD_SHIFT)
+
+#ifdef CONFIG_PPC64
+#define CURRENT_THREAD_INFO(dest, sp)	stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
+#else
+#define CURRENT_THREAD_INFO(dest, sp)	stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/cache.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/accounting.h>
+
+/*
+ * low level task data.
+ */
+struct thread_info {
+	struct task_struct *task;		/* main task structure */
+	int		cpu;			/* cpu we're on */
+	int		preempt_count;		/* 0 => preemptable,
+						   <0 => BUG */
+	unsigned long	local_flags;		/* private flags for thread */
+#ifdef CONFIG_LIVEPATCH
+	unsigned long *livepatch_sp;
+#endif
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32)
+	struct cpu_accounting_data accounting;
+#endif
+	/* low level flags - has atomic operations done on it */
+	unsigned long	flags ____cacheline_aligned_in_smp;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task =		&tsk,			\
+	.cpu =		0,			\
+	.preempt_count = INIT_PREEMPT_COUNT,	\
+	.flags =	0,			\
+}
+
+#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+	unsigned long val;
+
+	asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+	return (struct thread_info *)val;
+}
+
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+#endif /* __ASSEMBLY__ */
+
+/*
+ * thread information flag bit numbers
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_SIGPENDING		1	/* signal pending */
+#define TIF_NEED_RESCHED	2	/* rescheduling necessary */
+#define TIF_FSCHECK		3	/* Check FS is USER_DS on return */
+#define TIF_32BIT		4	/* 32 bit binary */
+#define TIF_RESTORE_TM		5	/* need to restore TM FP/VEC/VSX */
+#define TIF_PATCH_PENDING	6	/* pending live patching update */
+#define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
+#define TIF_SINGLESTEP		8	/* singlestepping active */
+#define TIF_NOHZ		9	/* in adaptive nohz mode */
+#define TIF_SECCOMP		10	/* secure computing */
+#define TIF_RESTOREALL		11	/* Restore all regs (implies NOERROR) */
+#define TIF_NOERROR		12	/* Force successful syscall return */
+#define TIF_NOTIFY_RESUME	13	/* callback before returning to user */
+#define TIF_UPROBE		14	/* breakpointed or single-stepping */
+#define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
+#define TIF_EMULATE_STACK_STORE	16	/* Is an instruction emulation
+						for stack store? */
+#define TIF_MEMDIE		17	/* is terminating due to OOM killer */
+#if defined(CONFIG_PPC64)
+#define TIF_ELF2ABI		18	/* function descriptors must die! */
+#endif
+#define TIF_POLLING_NRFLAG	19	/* true if poll_idle() is polling TIF_NEED_RESCHED */
+
+/* as above, but as bit values */
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+#define _TIF_32BIT		(1<<TIF_32BIT)
+#define _TIF_RESTORE_TM		(1<<TIF_RESTORE_TM)
+#define _TIF_PATCH_PENDING	(1<<TIF_PATCH_PENDING)
+#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
+#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
+#define _TIF_RESTOREALL		(1<<TIF_RESTOREALL)
+#define _TIF_NOERROR		(1<<TIF_NOERROR)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE		(1<<TIF_UPROBE)
+#define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_EMULATE_STACK_STORE	(1<<TIF_EMULATE_STACK_STORE)
+#define _TIF_NOHZ		(1<<TIF_NOHZ)
+#define _TIF_FSCHECK		(1<<TIF_FSCHECK)
+#define _TIF_SYSCALL_DOTRACE	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+				 _TIF_NOHZ)
+
+#define _TIF_USER_WORK_MASK	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+				 _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
+				 _TIF_FSCHECK)
+#define _TIF_PERSYSCALL_MASK	(_TIF_RESTOREALL|_TIF_NOERROR)
+
+/* Bits in local_flags */
+/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+#define TLF_NAPPING		0	/* idle thread enabled NAP mode */
+#define TLF_SLEEPING		1	/* suspend code enabled SLEEP mode */
+#define TLF_LAZY_MMU		3	/* tlb_batch is active */
+#define TLF_RUNLATCH		4	/* Is the runlatch enabled? */
+
+#define _TLF_NAPPING		(1 << TLF_NAPPING)
+#define _TLF_SLEEPING		(1 << TLF_SLEEPING)
+#define _TLF_LAZY_MMU		(1 << TLF_LAZY_MMU)
+#define _TLF_RUNLATCH		(1 << TLF_RUNLATCH)
+
+#ifndef __ASSEMBLY__
+
+static inline bool test_thread_local_flags(unsigned int flags)
+{
+	struct thread_info *ti = current_thread_info();
+	return (ti->local_flags & flags) != 0;
+}
+
+#ifdef CONFIG_PPC64
+#define is_32bit_task()	(test_thread_flag(TIF_32BIT))
+#else
+#define is_32bit_task()	(1)
+#endif
+
+#if defined(CONFIG_PPC64)
+#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI))
+#else
+#define is_elf2_task() (0)
+#endif
+
+#endif	/* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
new file mode 100644
index 0000000..b80d492
--- /dev/null
+++ b/arch/powerpc/include/asm/time.h
@@ -0,0 +1,205 @@
+/*
+ * Common time prototypes and such for all ppc machines.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) to merge
+ * Paul Mackerras' version and mine for PReP and Pmac.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __POWERPC_TIME_H
+#define __POWERPC_TIME_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_has_feature.h>
+
+/* time.c */
+extern unsigned long tb_ticks_per_jiffy;
+extern unsigned long tb_ticks_per_usec;
+extern unsigned long tb_ticks_per_sec;
+extern struct clock_event_device decrementer_clockevent;
+
+
+extern void generic_calibrate_decr(void);
+extern void hdec_interrupt(struct pt_regs *regs);
+
+/* Some sane defaults: 125 MHz timebase, 1GHz processor */
+extern unsigned long ppc_proc_freq;
+#define DEFAULT_PROC_FREQ	(DEFAULT_TB_FREQ * 8)
+extern unsigned long ppc_tb_freq;
+#define DEFAULT_TB_FREQ		125000000UL
+
+struct div_result {
+	u64 result_high;
+	u64 result_low;
+};
+
+/* Accessor functions for the timebase (RTC on 601) registers. */
+/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
+#ifdef CONFIG_6xx
+#define __USE_RTC()	(cpu_has_feature(CPU_FTR_USE_RTC))
+#else
+#define __USE_RTC()	0
+#endif
+
+#ifdef CONFIG_PPC64
+
+/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
+#define get_tbl		get_tb
+
+#else
+
+static inline unsigned long get_tbl(void)
+{
+#if defined(CONFIG_403GCX)
+	unsigned long tbl;
+	asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
+	return tbl;
+#else
+	return mftbl();
+#endif
+}
+
+static inline unsigned int get_tbu(void)
+{
+#ifdef CONFIG_403GCX
+	unsigned int tbu;
+	asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
+	return tbu;
+#else
+	return mftbu();
+#endif
+}
+#endif /* !CONFIG_PPC64 */
+
+static inline unsigned int get_rtcl(void)
+{
+	unsigned int rtcl;
+
+	asm volatile("mfrtcl %0" : "=r" (rtcl));
+	return rtcl;
+}
+
+static inline u64 get_rtc(void)
+{
+	unsigned int hi, lo, hi2;
+
+	do {
+		asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2"
+			     : "=r" (hi), "=r" (lo), "=r" (hi2));
+	} while (hi2 != hi);
+	return (u64)hi * 1000000000 + lo;
+}
+
+static inline u64 get_vtb(void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+	if (cpu_has_feature(CPU_FTR_ARCH_207S))
+		return mfspr(SPRN_VTB);
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_PPC64
+static inline u64 get_tb(void)
+{
+	return mftb();
+}
+#else /* CONFIG_PPC64 */
+static inline u64 get_tb(void)
+{
+	unsigned int tbhi, tblo, tbhi2;
+
+	do {
+		tbhi = get_tbu();
+		tblo = get_tbl();
+		tbhi2 = get_tbu();
+	} while (tbhi != tbhi2);
+
+	return ((u64)tbhi << 32) | tblo;
+}
+#endif /* !CONFIG_PPC64 */
+
+static inline u64 get_tb_or_rtc(void)
+{
+	return __USE_RTC() ? get_rtc() : get_tb();
+}
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+	mtspr(SPRN_TBWL, 0);
+	mtspr(SPRN_TBWU, upper);
+	mtspr(SPRN_TBWL, lower);
+}
+
+/* Accessor functions for the decrementer register.
+ * The 4xx doesn't even have a decrementer.  I tried to use the
+ * generic timer interrupt code, which seems OK, with the 4xx PIT
+ * in auto-reload mode.  The problem is PIT stops counting when it
+ * hits zero.  If it would wrap, we could use it just like a decrementer.
+ */
+static inline u64 get_dec(void)
+{
+#if defined(CONFIG_40x)
+	return (mfspr(SPRN_PIT));
+#else
+	return (mfspr(SPRN_DEC));
+#endif
+}
+
+/*
+ * Note: Book E and 4xx processors differ from other PowerPC processors
+ * in when the decrementer generates its interrupt: on the 1 to 0
+ * transition for Book E/4xx, but on the 0 to -1 transition for others.
+ */
+static inline void set_dec(u64 val)
+{
+#if defined(CONFIG_40x)
+	mtspr(SPRN_PIT, (u32) val);
+#else
+#ifndef CONFIG_BOOKE
+	--val;
+#endif
+	mtspr(SPRN_DEC, val);
+#endif /* not 40x */
+}
+
+static inline unsigned long tb_ticks_since(unsigned long tstamp)
+{
+	if (__USE_RTC()) {
+		int delta = get_rtcl() - (unsigned int) tstamp;
+		return delta < 0 ? delta + 1000000000 : delta;
+	}
+	return get_tbl() - tstamp;
+}
+
+#define mulhwu(x,y) \
+({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+
+#ifdef CONFIG_PPC64
+#define mulhdu(x,y) \
+({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+#else
+extern u64 mulhdu(u64, u64);
+#endif
+
+extern void div128_by_32(u64 dividend_high, u64 dividend_low,
+			 unsigned divisor, struct div_result *dr);
+
+extern void secondary_cpu_time_init(void);
+extern void __init time_init(void);
+
+DECLARE_PER_CPU(u64, decrementers_next_tb);
+
+/* Convert timebase ticks to nanoseconds */
+unsigned long long tb_to_ns(unsigned long long tb_ticks);
+
+#endif /* __KERNEL__ */
+#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
new file mode 100644
index 0000000..926b9f9
--- /dev/null
+++ b/arch/powerpc/include/asm/timex.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TIMEX_H
+#define _ASM_POWERPC_TIMEX_H
+
+#ifdef __KERNEL__
+
+/*
+ * PowerPC architecture timex specifications
+ */
+
+#include <asm/cputable.h>
+#include <asm/reg.h>
+
+#define CLOCK_TICK_RATE	1024000 /* Underlying HZ */
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+#ifdef __powerpc64__
+	return mftb();
+#else
+	cycles_t ret;
+
+	/*
+	 * For the "cycle" counter we use the timebase lower half.
+	 * Currently only used on SMP.
+	 */
+
+	ret = 0;
+
+	__asm__ __volatile__(
+#ifdef CONFIG_PPC_8xx
+		"97:	mftb %0\n"
+#else
+		"97:	mfspr %0, %2\n"
+#endif
+		"99:\n"
+		".section __ftr_fixup,\"a\"\n"
+		".align 2\n"
+		"98:\n"
+		"	.long %1\n"
+		"	.long 0\n"
+		"	.long 97b-98b\n"
+		"	.long 99b-98b\n"
+		"	.long 0\n"
+		"	.long 0\n"
+		".previous"
+		: "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
+	return ret;
+#endif
+}
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_TIMEX_H */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
new file mode 100644
index 0000000..f0e571b
--- /dev/null
+++ b/arch/powerpc/include/asm/tlb.h
@@ -0,0 +1,113 @@
+/*
+ *	TLB shootdown specifics for powerpc
+ *
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_TLB_H
+#define _ASM_POWERPC_TLB_H
+#ifdef __KERNEL__
+
+#ifndef __powerpc64__
+#include <asm/pgtable.h>
+#endif
+#include <asm/pgalloc.h>
+#ifndef __powerpc64__
+#include <asm/page.h>
+#include <asm/mmu.h>
+#endif
+
+#include <linux/pagemap.h>
+
+#define tlb_start_vma(tlb, vma)	do { } while (0)
+#define tlb_end_vma(tlb, vma)	do { } while (0)
+#define __tlb_remove_tlb_entry	__tlb_remove_tlb_entry
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+
+extern void tlb_flush(struct mmu_gather *tlb);
+
+/* Get the generic bits... */
+#include <asm-generic/tlb.h>
+
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+			     unsigned long address);
+
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+					  unsigned long address)
+{
+#ifdef CONFIG_PPC_STD_MMU_32
+	if (pte_val(*ptep) & _PAGE_HASHPTE)
+		flush_hash_entry(tlb->mm, ptep, address);
+#endif
+}
+
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+						     unsigned int page_size)
+{
+	if (!tlb->page_size)
+		tlb->page_size = page_size;
+	else if (tlb->page_size != page_size) {
+		if (!tlb->fullmm)
+			tlb_flush_mmu(tlb);
+		/*
+		 * update the page size after flush for the new
+		 * mmu_gather.
+		 */
+		tlb->page_size = page_size;
+	}
+}
+
+#ifdef CONFIG_SMP
+static inline int mm_is_core_local(struct mm_struct *mm)
+{
+	return cpumask_subset(mm_cpumask(mm),
+			      topology_sibling_cpumask(smp_processor_id()));
+}
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+	if (atomic_read(&mm->context.active_cpus) > 1)
+		return false;
+	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+static inline void mm_reset_thread_local(struct mm_struct *mm)
+{
+	WARN_ON(atomic_read(&mm->context.copros) > 0);
+	/*
+	 * It's possible for mm_access to take a reference on mm_users to
+	 * access the remote mm from another thread, but it's not allowed
+	 * to set mm_cpumask, so mm_users may be > 1 here.
+	 */
+	WARN_ON(current->mm != mm);
+	atomic_set(&mm->context.active_cpus, 1);
+	cpumask_clear(mm_cpumask(mm));
+	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+#else /* CONFIG_PPC_BOOK3S_64 */
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+	return cpumask_equal(mm_cpumask(mm),
+			      cpumask_of(smp_processor_id()));
+}
+#endif /* !CONFIG_PPC_BOOK3S_64 */
+
+#else /* CONFIG_SMP */
+static inline int mm_is_core_local(struct mm_struct *mm)
+{
+	return 1;
+}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+	return 1;
+}
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
new file mode 100644
index 0000000..61fba43
--- /dev/null
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TLBFLUSH_H
+#define _ASM_POWERPC_TLBFLUSH_H
+
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/tlbflush.h>
+#else
+#include <asm/nohash/tlbflush.h>
+#endif /* !CONFIG_PPC_BOOK3S */
+
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
new file mode 100644
index 0000000..e94f6db
--- /dev/null
+++ b/arch/powerpc/include/asm/tm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Transactional memory support routines to reclaim and recheckpoint
+ * transactional process state.
+ *
+ * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
+ */
+
+#include <uapi/asm/tm.h>
+
+#ifndef __ASSEMBLY__
+
+extern void tm_reclaim(struct thread_struct *thread,
+		       uint8_t cause);
+extern void tm_reclaim_current(uint8_t cause);
+extern void tm_recheckpoint(struct thread_struct *thread);
+extern void tm_save_sprs(struct thread_struct *thread);
+extern void tm_restore_sprs(struct thread_struct *thread);
+
+extern bool tm_suspend_disabled;
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
new file mode 100644
index 0000000..a4a718d
--- /dev/null
+++ b/arch/powerpc/include/asm/topology.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_TOPOLOGY_H
+#define _ASM_POWERPC_TOPOLOGY_H
+#ifdef __KERNEL__
+
+
+struct device;
+struct device_node;
+
+#ifdef CONFIG_NUMA
+
+/*
+ * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that
+ * all zones on all nodes will be eligible for zone_reclaim().
+ */
+#define RECLAIM_DISTANCE 10
+
+#include <asm/mmzone.h>
+
+#define cpumask_of_node(node) ((node) == -1 ?				\
+			       cpu_all_mask :				\
+			       node_to_cpumask_map[node])
+
+struct pci_bus;
+#ifdef CONFIG_PCI
+extern int pcibus_to_node(struct pci_bus *bus);
+#else
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+	return -1;
+}
+#endif
+
+#define cpumask_of_pcibus(bus)	(pcibus_to_node(bus) == -1 ?		\
+				 cpu_all_mask :				\
+				 cpumask_of_node(pcibus_to_node(bus)))
+
+extern int __node_distance(int, int);
+#define node_distance(a, b) __node_distance(a, b)
+
+extern void __init dump_numa_cpu_topology(void);
+
+extern int sysfs_add_device_to_node(struct device *dev, int nid);
+extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+extern int numa_update_cpu_topology(bool cpus_locked);
+
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
+{
+	numa_cpu_lookup_table[cpu] = node;
+}
+
+static inline int early_cpu_to_node(int cpu)
+{
+	int nid;
+
+	nid = numa_cpu_lookup_table[cpu];
+
+	/*
+	 * Fall back to node 0 if nid is unset (it should be, except bugs).
+	 * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+	 */
+	return (nid < 0) ? 0 : nid;
+}
+#else
+
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
+static inline void dump_numa_cpu_topology(void) {}
+
+static inline int sysfs_add_device_to_node(struct device *dev, int nid)
+{
+	return 0;
+}
+
+static inline void sysfs_remove_device_from_node(struct device *dev,
+						int nid)
+{
+}
+
+static inline int numa_update_cpu_topology(bool cpus_locked)
+{
+	return 0;
+}
+
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
+
+#endif /* CONFIG_NUMA */
+
+#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+extern int start_topology_update(void);
+extern int stop_topology_update(void);
+extern int prrn_is_enabled(void);
+extern int find_and_online_cpu_nid(int cpu);
+extern int timed_topology_update(int nsecs);
+extern void __init shared_proc_topology_init(void);
+#else
+static inline int start_topology_update(void)
+{
+	return 0;
+}
+static inline int stop_topology_update(void)
+{
+	return 0;
+}
+static inline int prrn_is_enabled(void)
+{
+	return 0;
+}
+static inline int find_and_online_cpu_nid(int cpu)
+{
+	return 0;
+}
+static inline int timed_topology_update(int nsecs)
+{
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static inline void shared_proc_topology_init(void) {}
+#endif
+#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+
+#include <asm-generic/topology.h>
+
+#ifdef CONFIG_SMP
+#include <asm/cputable.h>
+
+#ifdef CONFIG_PPC64
+#include <asm/smp.h>
+
+#define topology_physical_package_id(cpu)	(cpu_to_chip_id(cpu))
+#define topology_sibling_cpumask(cpu)	(per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu)	(per_cpu(cpu_core_map, cpu))
+#define topology_core_id(cpu)		(cpu_to_core_id(cpu))
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif	/* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
new file mode 100644
index 0000000..d018e86
--- /dev/null
+++ b/arch/powerpc/include/asm/trace.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM powerpc
+
+#if !defined(_TRACE_POWERPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWERPC_H
+
+#include <linux/tracepoint.h>
+
+struct pt_regs;
+
+DECLARE_EVENT_CLASS(ppc64_interrupt_class,
+
+	TP_PROTO(struct pt_regs *regs),
+
+	TP_ARGS(regs),
+
+	TP_STRUCT__entry(
+		__field(struct pt_regs *, regs)
+	),
+
+	TP_fast_assign(
+		__entry->regs = regs;
+	),
+
+	TP_printk("pt_regs=%p", __entry->regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, irq_entry,
+
+	TP_PROTO(struct pt_regs *regs),
+
+	TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, irq_exit,
+
+	TP_PROTO(struct pt_regs *regs),
+
+	TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_entry,
+
+	TP_PROTO(struct pt_regs *regs),
+
+	TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
+
+	TP_PROTO(struct pt_regs *regs),
+
+	TP_ARGS(regs)
+);
+
+#ifdef CONFIG_PPC_PSERIES
+extern int hcall_tracepoint_regfunc(void);
+extern void hcall_tracepoint_unregfunc(void);
+
+TRACE_EVENT_FN_COND(hcall_entry,
+
+	TP_PROTO(unsigned long opcode, unsigned long *args),
+
+	TP_ARGS(opcode, args),
+
+	TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, opcode)
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+	),
+
+	TP_printk("opcode=%lu", __entry->opcode),
+
+	hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
+);
+
+TRACE_EVENT_FN_COND(hcall_exit,
+
+	TP_PROTO(unsigned long opcode, long retval, unsigned long *retbuf),
+
+	TP_ARGS(opcode, retval, retbuf),
+
+	TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, opcode)
+		__field(long, retval)
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+		__entry->retval = retval;
+	),
+
+	TP_printk("opcode=%lu retval=%ld", __entry->opcode, __entry->retval),
+
+	hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
+);
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+extern int opal_tracepoint_regfunc(void);
+extern void opal_tracepoint_unregfunc(void);
+
+TRACE_EVENT_FN(opal_entry,
+
+	TP_PROTO(unsigned long opcode, unsigned long *args),
+
+	TP_ARGS(opcode, args),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, opcode)
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+	),
+
+	TP_printk("opcode=%lu", __entry->opcode),
+
+	opal_tracepoint_regfunc, opal_tracepoint_unregfunc
+);
+
+TRACE_EVENT_FN(opal_exit,
+
+	TP_PROTO(unsigned long opcode, unsigned long retval),
+
+	TP_ARGS(opcode, retval),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, opcode)
+		__field(unsigned long, retval)
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+		__entry->retval = retval;
+	),
+
+	TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval),
+
+	opal_tracepoint_regfunc, opal_tracepoint_unregfunc
+);
+#endif
+
+TRACE_EVENT(hash_fault,
+
+	    TP_PROTO(unsigned long addr, unsigned long access, unsigned long trap),
+	    TP_ARGS(addr, access, trap),
+	    TP_STRUCT__entry(
+		    __field(unsigned long, addr)
+		    __field(unsigned long, access)
+		    __field(unsigned long, trap)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->addr = addr;
+		    __entry->access = access;
+		    __entry->trap = trap;
+		    ),
+
+	    TP_printk("hash fault with addr 0x%lx and access = 0x%lx trap = 0x%lx",
+		      __entry->addr, __entry->access, __entry->trap)
+);
+
+
+TRACE_EVENT(tlbie,
+
+	TP_PROTO(unsigned long lpid, unsigned long local, unsigned long rb,
+		unsigned long rs, unsigned long ric, unsigned long prs,
+		unsigned long r),
+	TP_ARGS(lpid, local, rb, rs, ric, prs, r),
+	TP_STRUCT__entry(
+		__field(unsigned long, lpid)
+		__field(unsigned long, local)
+		__field(unsigned long, rb)
+		__field(unsigned long, rs)
+		__field(unsigned long, ric)
+		__field(unsigned long, prs)
+		__field(unsigned long, r)
+		),
+
+	TP_fast_assign(
+		__entry->lpid = lpid;
+		__entry->local = local;
+		__entry->rb = rb;
+		__entry->rs = rs;
+		__entry->ric = ric;
+		__entry->prs = prs;
+		__entry->r = r;
+		),
+
+	TP_printk("lpid=%ld, local=%ld, rb=0x%lx, rs=0x%lx, ric=0x%lx, "
+		"prs=0x%lx, r=0x%lx", __entry->lpid, __entry->local,
+		__entry->rb, __entry->rs, __entry->ric, __entry->prs,
+		__entry->r)
+);
+
+#endif /* _TRACE_POWERPC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/include/asm/trace_clock.h b/arch/powerpc/include/asm/trace_clock.h
new file mode 100644
index 0000000..cf1ee75
--- /dev/null
+++ b/arch/powerpc/include/asm/trace_clock.h
@@ -0,0 +1,19 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#ifndef _ASM_PPC_TRACE_CLOCK_H
+#define _ASM_PPC_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_ppc_tb(void);
+
+#define ARCH_TRACE_CLOCKS { trace_clock_ppc_tb, "ppc-tb", 0 },
+
+#endif  /* _ASM_PPC_TRACE_CLOCK_H */
diff --git a/arch/powerpc/include/asm/tsi108.h b/arch/powerpc/include/asm/tsi108.h
new file mode 100644
index 0000000..c2a955b
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108.h
@@ -0,0 +1,117 @@
+/*
+ * common routine and memory layout for Tundra TSI108(Grendel) host bridge
+ * memory controller.
+ *
+ * Author: Jacob Pan (jacob.pan@freescale.com)
+ *	   Alex Bounine (alexandreb@tundra.com)
+ *
+ * Copyright 2004-2006 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __PPC_KERNEL_TSI108_H
+#define __PPC_KERNEL_TSI108_H
+
+#include <asm/pci-bridge.h>
+
+/* Size of entire register space */
+#define TSI108_REG_SIZE		(0x10000)
+
+/* Sizes of register spaces for individual blocks */
+#define TSI108_HLP_SIZE		0x1000
+#define TSI108_PCI_SIZE		0x1000
+#define TSI108_CLK_SIZE		0x1000
+#define TSI108_PB_SIZE		0x1000
+#define TSI108_SD_SIZE		0x1000
+#define TSI108_DMA_SIZE		0x1000
+#define TSI108_ETH_SIZE		0x1000
+#define TSI108_I2C_SIZE		0x400
+#define TSI108_MPIC_SIZE	0x400
+#define TSI108_UART0_SIZE	0x200
+#define TSI108_GPIO_SIZE	0x200
+#define TSI108_UART1_SIZE	0x200
+
+/* Offsets within Tsi108(A) CSR space for individual blocks */
+#define TSI108_HLP_OFFSET	0x0000
+#define TSI108_PCI_OFFSET	0x1000
+#define TSI108_CLK_OFFSET	0x2000
+#define TSI108_PB_OFFSET	0x3000
+#define TSI108_SD_OFFSET	0x4000
+#define TSI108_DMA_OFFSET	0x5000
+#define TSI108_ETH_OFFSET	0x6000
+#define TSI108_I2C_OFFSET	0x7000
+#define TSI108_MPIC_OFFSET	0x7400
+#define TSI108_UART0_OFFSET	0x7800
+#define TSI108_GPIO_OFFSET	0x7A00
+#define TSI108_UART1_OFFSET	0x7C00
+
+/* Tsi108 registers used by common code components */
+#define TSI108_PCI_CSR		(0x004)
+#define TSI108_PCI_IRP_CFG_CTL	(0x180)
+#define TSI108_PCI_IRP_STAT	(0x184)
+#define TSI108_PCI_IRP_ENABLE	(0x188)
+#define TSI108_PCI_IRP_INTAD	(0x18C)
+
+#define TSI108_PCI_IRP_STAT_P_INT	(0x00400000)
+#define TSI108_PCI_IRP_ENABLE_P_INT	(0x00400000)
+
+#define TSI108_CG_PWRUP_STATUS	(0x234)
+
+#define TSI108_PB_ISR		(0x00C)
+#define TSI108_PB_ERRCS		(0x404)
+#define TSI108_PB_AERR		(0x408)
+
+#define TSI108_PB_ERRCS_ES		(1 << 1)
+#define TSI108_PB_ISR_PBS_RD_ERR	(1 << 8)
+
+#define TSI108_PCI_CFG_SIZE		(0x01000000)
+
+/*
+ * PHY Configuration Options
+ *
+ * Specify "bcm54xx" in the compatible property of your device tree phy
+ * nodes if your board uses the Broadcom PHYs
+ */
+#define TSI108_PHY_MV88E	0	/* Marvel 88Exxxx PHY */
+#define TSI108_PHY_BCM54XX	1	/* Broadcom BCM54xx PHY */
+
+/* Global variables */
+
+extern u32 tsi108_pci_cfg_base;
+/* Exported functions */
+
+extern int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfn,
+				      int offset, int len, u32 val);
+extern int tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn,
+				     int offset, int len, u32 * val);
+extern void tsi108_clear_pci_error(u32 pci_cfg_base);
+
+extern phys_addr_t get_csrbase(void);
+
+typedef struct {
+	u32 regs;		/* hw registers base address */
+	u32 phyregs;		/* phy registers base address */
+	u16 phy;		/* phy address */
+	u16 irq_num;		/* irq number */
+	u8 mac_addr[6];		/* phy mac address */
+	u16 phy_type;	/* type of phy on board */
+} hw_info;
+
+extern u32 get_vir_csrbase(void);
+extern u32 tsi108_csr_vir_base;
+
+static inline u32 tsi108_read_reg(u32 reg_offset)
+{
+	return in_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset));
+}
+
+static inline void tsi108_write_reg(u32 reg_offset, u32 val)
+{
+	out_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset), val);
+}
+
+#endif				/* __PPC_KERNEL_TSI108_H */
diff --git a/arch/powerpc/include/asm/tsi108_irq.h b/arch/powerpc/include/asm/tsi108_irq.h
new file mode 100644
index 0000000..6ed9397
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108_irq.h
@@ -0,0 +1,124 @@
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Alex Bounine, <alexandreb at tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * definitions for interrupt controller initialization and external interrupt
+ * demultiplexing on TSI108EMU/SVB boards.
+ */
+
+#ifndef _ASM_POWERPC_TSI108_IRQ_H
+#define _ASM_POWERPC_TSI108_IRQ_H
+
+/*
+ * Tsi108 interrupts
+ */
+#ifndef TSI108_IRQ_REG_BASE
+#define TSI108_IRQ_REG_BASE		0
+#endif
+
+#define TSI108_IRQ(x)		(TSI108_IRQ_REG_BASE + (x))
+
+#define TSI108_MAX_VECTORS	(36 + 4)	/* 36 sources + PCI INT demux */
+#define MAX_TASK_PRIO	0xF
+
+#define TSI108_IRQ_SPURIOUS	(TSI108_MAX_VECTORS)
+
+#define DEFAULT_PRIO_LVL	10	/* initial priority level */
+
+/* Interrupt vectors assignment to external and internal
+ * sources of requests. */
+
+/* EXTERNAL INTERRUPT SOURCES */
+
+#define IRQ_TSI108_EXT_INT0	TSI108_IRQ(0)	/* External Source at INT[0] */
+#define IRQ_TSI108_EXT_INT1	TSI108_IRQ(1)	/* External Source at INT[1] */
+#define IRQ_TSI108_EXT_INT2	TSI108_IRQ(2)	/* External Source at INT[2] */
+#define IRQ_TSI108_EXT_INT3	TSI108_IRQ(3)	/* External Source at INT[3] */
+
+/* INTERNAL INTERRUPT SOURCES */
+
+#define IRQ_TSI108_RESERVED0	TSI108_IRQ(4)	/* Reserved IRQ */
+#define IRQ_TSI108_RESERVED1	TSI108_IRQ(5)	/* Reserved IRQ */
+#define IRQ_TSI108_RESERVED2	TSI108_IRQ(6)	/* Reserved IRQ */
+#define IRQ_TSI108_RESERVED3	TSI108_IRQ(7)	/* Reserved IRQ */
+#define IRQ_TSI108_DMA0		TSI108_IRQ(8)	/* DMA0 */
+#define IRQ_TSI108_DMA1		TSI108_IRQ(9)	/* DMA1 */
+#define IRQ_TSI108_DMA2		TSI108_IRQ(10)	/* DMA2 */
+#define IRQ_TSI108_DMA3		TSI108_IRQ(11)	/* DMA3 */
+#define IRQ_TSI108_UART0	TSI108_IRQ(12)	/* UART0 */
+#define IRQ_TSI108_UART1	TSI108_IRQ(13)	/* UART1 */
+#define IRQ_TSI108_I2C		TSI108_IRQ(14)	/* I2C */
+#define IRQ_TSI108_GPIO		TSI108_IRQ(15)	/* GPIO */
+#define IRQ_TSI108_GIGE0	TSI108_IRQ(16)	/* GIGE0 */
+#define IRQ_TSI108_GIGE1	TSI108_IRQ(17)	/* GIGE1 */
+#define IRQ_TSI108_RESERVED4	TSI108_IRQ(18)	/* Reserved IRQ */
+#define IRQ_TSI108_HLP		TSI108_IRQ(19)	/* HLP */
+#define IRQ_TSI108_SDRAM	TSI108_IRQ(20)	/* SDC */
+#define IRQ_TSI108_PROC_IF	TSI108_IRQ(21)	/* Processor IF */
+#define IRQ_TSI108_RESERVED5	TSI108_IRQ(22)	/* Reserved IRQ */
+#define IRQ_TSI108_PCI		TSI108_IRQ(23)	/* PCI/X block */
+
+#define IRQ_TSI108_MBOX0	TSI108_IRQ(24)	/* Mailbox 0 register */
+#define IRQ_TSI108_MBOX1	TSI108_IRQ(25)	/* Mailbox 1 register */
+#define IRQ_TSI108_MBOX2	TSI108_IRQ(26)	/* Mailbox 2 register */
+#define IRQ_TSI108_MBOX3	TSI108_IRQ(27)	/* Mailbox 3 register */
+
+#define IRQ_TSI108_DBELL0	TSI108_IRQ(28)	/* Doorbell 0 */
+#define IRQ_TSI108_DBELL1	TSI108_IRQ(29)	/* Doorbell 1 */
+#define IRQ_TSI108_DBELL2	TSI108_IRQ(30)	/* Doorbell 2 */
+#define IRQ_TSI108_DBELL3	TSI108_IRQ(31)	/* Doorbell 3 */
+
+#define IRQ_TSI108_TIMER0	TSI108_IRQ(32)	/* Global Timer 0 */
+#define IRQ_TSI108_TIMER1	TSI108_IRQ(33)	/* Global Timer 1 */
+#define IRQ_TSI108_TIMER2	TSI108_IRQ(34)	/* Global Timer 2 */
+#define IRQ_TSI108_TIMER3	TSI108_IRQ(35)	/* Global Timer 3 */
+
+/*
+ * PCI bus INTA# - INTD# lines demultiplexor
+ */
+#define IRQ_PCI_INTAD_BASE	TSI108_IRQ(36)
+#define IRQ_PCI_INTA		(IRQ_PCI_INTAD_BASE + 0)
+#define IRQ_PCI_INTB		(IRQ_PCI_INTAD_BASE + 1)
+#define IRQ_PCI_INTC		(IRQ_PCI_INTAD_BASE + 2)
+#define IRQ_PCI_INTD		(IRQ_PCI_INTAD_BASE + 3)
+#define NUM_PCI_IRQS		(4)
+
+/* number of entries in vector dispatch table */
+#define IRQ_TSI108_TAB_SIZE	(TSI108_MAX_VECTORS + 1)
+
+/* Mapping of MPIC outputs to processors' interrupt pins */
+
+#define IDIR_INT_OUT0		0x1
+#define IDIR_INT_OUT1		0x2
+#define IDIR_INT_OUT2		0x4
+#define IDIR_INT_OUT3		0x8
+
+/*---------------------------------------------------------------
+ * IRQ line configuration parameters */
+
+/* Interrupt delivery modes */
+typedef enum {
+	TSI108_IRQ_DIRECTED,
+	TSI108_IRQ_DISTRIBUTED,
+} TSI108_IRQ_MODE;
+#endif				/*  _ASM_POWERPC_TSI108_IRQ_H */
diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h
new file mode 100644
index 0000000..ae59d5b
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108_pci.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2007 IBM Corp
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_TSI108_PCI_H
+#define _ASM_POWERPC_TSI108_PCI_H
+
+#include <asm/tsi108.h>
+
+/* Register definitions */
+#define TSI108_PCI_P2O_BAR0 (TSI108_PCI_OFFSET + 0x10)
+#define TSI108_PCI_P2O_BAR0_UPPER (TSI108_PCI_OFFSET + 0x14)
+#define TSI108_PCI_P2O_BAR2 (TSI108_PCI_OFFSET + 0x18)
+#define TSI108_PCI_P2O_BAR2_UPPER (TSI108_PCI_OFFSET + 0x1c)
+#define TSI108_PCI_P2O_PAGE_SIZES (TSI108_PCI_OFFSET + 0x4c)
+#define TSI108_PCI_PFAB_BAR0 (TSI108_PCI_OFFSET + 0x204)
+#define TSI108_PCI_PFAB_BAR0_UPPER (TSI108_PCI_OFFSET + 0x208)
+#define TSI108_PCI_PFAB_IO (TSI108_PCI_OFFSET + 0x20c)
+#define TSI108_PCI_PFAB_IO_UPPER (TSI108_PCI_OFFSET + 0x210)
+#define TSI108_PCI_PFAB_MEM32 (TSI108_PCI_OFFSET + 0x214)
+#define TSI108_PCI_PFAB_PFM3 (TSI108_PCI_OFFSET + 0x220)
+#define TSI108_PCI_PFAB_PFM4 (TSI108_PCI_OFFSET + 0x230)
+
+extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
+extern void tsi108_pci_int_init(struct device_node *node);
+extern void tsi108_irq_cascade(struct irq_desc *desc);
+extern void tsi108_clear_pci_cfg_error(void);
+
+#endif				/*  _ASM_POWERPC_TSI108_PCI_H */
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
new file mode 100644
index 0000000..49a0678
--- /dev/null
+++ b/arch/powerpc/include/asm/types.h
@@ -0,0 +1,38 @@
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue.  However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_TYPES_H
+#define _ASM_POWERPC_TYPES_H
+
+#include <uapi/asm/types.h>
+
+#ifdef __powerpc64__
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define PPC64_ELF_ABI_v2
+#else
+#define PPC64_ELF_ABI_v1
+#endif
+#endif /* __powerpc64__ */
+
+#ifndef __ASSEMBLY__
+
+typedef __vector128 vector128;
+
+typedef struct {
+	unsigned long entry;
+	unsigned long toc;
+	unsigned long env;
+} func_descr_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
new file mode 100644
index 0000000..bac225b
--- /dev/null
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARCH_POWERPC_UACCESS_H
+#define _ARCH_POWERPC_UACCESS_H
+
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/extable.h>
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * The fs/ds values are now the highest legal address in the "segment".
+ * This simplifies the checking in the routines below.
+ */
+
+#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
+
+#define KERNEL_DS	MAKE_MM_SEG(~0UL)
+#ifdef __powerpc64__
+/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+#define USER_DS		MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
+#else
+#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
+#endif
+
+#define get_ds()	(KERNEL_DS)
+#define get_fs()	(current->thread.addr_limit)
+
+static inline void set_fs(mm_segment_t fs)
+{
+	current->thread.addr_limit = fs;
+	/* On user-mode return check addr_limit (fs) is correct */
+	set_thread_flag(TIF_FSCHECK);
+}
+
+#define segment_eq(a, b)	((a).seg == (b).seg)
+
+#define user_addr_max()	(get_fs().seg)
+
+#ifdef __powerpc64__
+/*
+ * This check is sufficient because there is a large enough
+ * gap between user addresses and the kernel addresses
+ */
+#define __access_ok(addr, size, segment)	\
+	(((addr) <= (segment).seg) && ((size) <= (segment).seg))
+
+#else
+
+static inline int __access_ok(unsigned long addr, unsigned long size,
+			mm_segment_t seg)
+{
+	if (addr > seg.seg)
+		return 0;
+	return (size == 0 || size - 1 <= seg.seg - addr);
+}
+
+#endif
+
+#define access_ok(type, addr, size)		\
+	(__chk_user_ptr(addr),			\
+	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments.  (Of course, the
+ * exception handling means that it's no longer "just"...)
+ *
+ */
+#define get_user(x, ptr) \
+	__get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user_inatomic(x, ptr) \
+	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
+#define __put_user_inatomic(x, ptr) \
+	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_bad(void);
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op)			\
+	__asm__ __volatile__(					\
+		"1:	" op " %1,0(%2)	# put_user\n"		\
+		"2:\n"						\
+		".section .fixup,\"ax\"\n"			\
+		"3:	li %0,%3\n"				\
+		"	b 2b\n"					\
+		".previous\n"					\
+		EX_TABLE(1b, 3b)				\
+		: "=r" (err)					\
+		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __put_user_asm2(x, ptr, retval)				\
+	  __put_user_asm(x, ptr, retval, "std")
+#else /* __powerpc64__ */
+#define __put_user_asm2(x, addr, err)				\
+	__asm__ __volatile__(					\
+		"1:	stw %1,0(%2)\n"				\
+		"2:	stw %1+1,4(%2)\n"			\
+		"3:\n"						\
+		".section .fixup,\"ax\"\n"			\
+		"4:	li %0,%3\n"				\
+		"	b 3b\n"					\
+		".previous\n"					\
+		EX_TABLE(1b, 4b)				\
+		EX_TABLE(2b, 4b)				\
+		: "=r" (err)					\
+		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __put_user_size(x, ptr, size, retval)			\
+do {								\
+	retval = 0;						\
+	switch (size) {						\
+	  case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\
+	  case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\
+	  case 4: __put_user_asm(x, ptr, retval, "stw"); break;	\
+	  case 8: __put_user_asm2(x, ptr, retval); break;	\
+	  default: __put_user_bad();				\
+	}							\
+} while (0)
+
+#define __put_user_nocheck(x, ptr, size)			\
+({								\
+	long __pu_err;						\
+	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
+	if (!is_kernel_addr((unsigned long)__pu_addr))		\
+		might_fault();					\
+	__chk_user_ptr(ptr);					\
+	__put_user_size((x), __pu_addr, (size), __pu_err);	\
+	__pu_err;						\
+})
+
+#define __put_user_check(x, ptr, size)					\
+({									\
+	long __pu_err = -EFAULT;					\
+	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
+	might_fault();							\
+	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
+		__put_user_size((x), __pu_addr, (size), __pu_err);	\
+	__pu_err;							\
+})
+
+#define __put_user_nosleep(x, ptr, size)			\
+({								\
+	long __pu_err;						\
+	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
+	__chk_user_ptr(ptr);					\
+	__put_user_size((x), __pu_addr, (size), __pu_err);	\
+	__pu_err;						\
+})
+
+
+extern long __get_user_bad(void);
+
+/*
+ * This does an atomic 128 byte aligned load from userspace.
+ * Upto caller to do enable_kernel_vmx() before calling!
+ */
+#define __get_user_atomic_128_aligned(kaddr, uaddr, err)		\
+	__asm__ __volatile__(				\
+		"1:	lvx  0,0,%1	# get user\n"	\
+		" 	stvx 0,0,%2	# put kernel\n"	\
+		"2:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"3:	li %0,%3\n"			\
+		"	b 2b\n"				\
+		".previous\n"				\
+		EX_TABLE(1b, 3b)			\
+		: "=r" (err)			\
+		: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
+
+#define __get_user_asm(x, addr, err, op)		\
+	__asm__ __volatile__(				\
+		"1:	"op" %1,0(%2)	# get_user\n"	\
+		"2:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"3:	li %0,%3\n"			\
+		"	li %1,0\n"			\
+		"	b 2b\n"				\
+		".previous\n"				\
+		EX_TABLE(1b, 3b)			\
+		: "=r" (err), "=r" (x)			\
+		: "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __get_user_asm2(x, addr, err)			\
+	__get_user_asm(x, addr, err, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2(x, addr, err)			\
+	__asm__ __volatile__(				\
+		"1:	lwz %1,0(%2)\n"			\
+		"2:	lwz %1+1,4(%2)\n"		\
+		"3:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"4:	li %0,%3\n"			\
+		"	li %1,0\n"			\
+		"	li %1+1,0\n"			\
+		"	b 3b\n"				\
+		".previous\n"				\
+		EX_TABLE(1b, 4b)			\
+		EX_TABLE(2b, 4b)			\
+		: "=r" (err), "=&r" (x)			\
+		: "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __get_user_size(x, ptr, size, retval)			\
+do {								\
+	retval = 0;						\
+	__chk_user_ptr(ptr);					\
+	if (size > sizeof(x))					\
+		(x) = __get_user_bad();				\
+	switch (size) {						\
+	case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\
+	case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\
+	case 4: __get_user_asm(x, ptr, retval, "lwz"); break;	\
+	case 8: __get_user_asm2(x, ptr, retval);  break;	\
+	default: (x) = __get_user_bad();			\
+	}							\
+} while (0)
+
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+#define __get_user_nocheck(x, ptr, size)			\
+({								\
+	long __gu_err;						\
+	__long_type(*(ptr)) __gu_val;				\
+	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
+	__chk_user_ptr(ptr);					\
+	if (!is_kernel_addr((unsigned long)__gu_addr))		\
+		might_fault();					\
+	barrier_nospec();					\
+	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
+	(x) = (__typeof__(*(ptr)))__gu_val;			\
+	__gu_err;						\
+})
+
+#define __get_user_check(x, ptr, size)					\
+({									\
+	long __gu_err = -EFAULT;					\
+	__long_type(*(ptr)) __gu_val = 0;				\
+	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
+	might_fault();							\
+	if (access_ok(VERIFY_READ, __gu_addr, (size))) {		\
+		barrier_nospec();					\
+		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
+	}								\
+	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
+	__gu_err;							\
+})
+
+#define __get_user_nosleep(x, ptr, size)			\
+({								\
+	long __gu_err;						\
+	__long_type(*(ptr)) __gu_val;				\
+	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
+	__chk_user_ptr(ptr);					\
+	barrier_nospec();					\
+	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
+	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	__gu_err;						\
+})
+
+
+/* more complex routines */
+
+extern unsigned long __copy_tofrom_user(void __user *to,
+		const void __user *from, unsigned long size);
+
+#ifdef __powerpc64__
+static inline unsigned long
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+	return __copy_tofrom_user(to, from, n);
+}
+#endif /* __powerpc64__ */
+
+static inline unsigned long raw_copy_from_user(void *to,
+		const void __user *from, unsigned long n)
+{
+	if (__builtin_constant_p(n) && (n <= 8)) {
+		unsigned long ret = 1;
+
+		switch (n) {
+		case 1:
+			barrier_nospec();
+			__get_user_size(*(u8 *)to, from, 1, ret);
+			break;
+		case 2:
+			barrier_nospec();
+			__get_user_size(*(u16 *)to, from, 2, ret);
+			break;
+		case 4:
+			barrier_nospec();
+			__get_user_size(*(u32 *)to, from, 4, ret);
+			break;
+		case 8:
+			barrier_nospec();
+			__get_user_size(*(u64 *)to, from, 8, ret);
+			break;
+		}
+		if (ret == 0)
+			return 0;
+	}
+
+	barrier_nospec();
+	return __copy_tofrom_user((__force void __user *)to, from, n);
+}
+
+static inline unsigned long raw_copy_to_user(void __user *to,
+		const void *from, unsigned long n)
+{
+	if (__builtin_constant_p(n) && (n <= 8)) {
+		unsigned long ret = 1;
+
+		switch (n) {
+		case 1:
+			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+			break;
+		case 2:
+			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+			break;
+		case 4:
+			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+			break;
+		case 8:
+			__put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+			break;
+		}
+		if (ret == 0)
+			return 0;
+	}
+
+	return __copy_tofrom_user(to, (__force const void __user *)from, n);
+}
+
+extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
+{
+	might_fault();
+	if (likely(access_ok(VERIFY_WRITE, addr, size)))
+		return __clear_user(addr, size);
+	return size;
+}
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
+extern long __copy_from_user_flushcache(void *dst, const void __user *src,
+		unsigned size);
+extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
+			   size_t len);
+
+#endif	/* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
new file mode 100644
index 0000000..78f2675
--- /dev/null
+++ b/arch/powerpc/include/asm/udbg.h
@@ -0,0 +1,62 @@
+/*
+ * (c) 2001, 2006 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_UDBG_H
+#define _ASM_POWERPC_UDBG_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/init.h>
+
+extern void (*udbg_putc)(char c);
+extern void (*udbg_flush)(void);
+extern int (*udbg_getc)(void);
+extern int (*udbg_getc_poll)(void);
+
+extern void udbg_puts(const char *s);
+extern int udbg_write(const char *s, int n);
+
+extern void register_early_udbg_console(void);
+extern void udbg_printf(const char *fmt, ...)
+	__attribute__ ((format (printf, 1, 2)));
+extern void udbg_progress(char *s, unsigned short hex);
+
+extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
+extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
+
+extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
+extern unsigned int udbg_probe_uart_speed(unsigned int clock);
+
+struct device_node;
+extern void udbg_scc_init(int force_scc);
+extern int udbg_adb_init(int force_btext);
+extern void udbg_adb_init_early(void);
+
+extern void __init udbg_early_init(void);
+extern void __init udbg_init_debug_lpar(void);
+extern void __init udbg_init_debug_lpar_hvsi(void);
+extern void __init udbg_init_pmac_realmode(void);
+extern void __init udbg_init_maple_realmode(void);
+extern void __init udbg_init_pas_realmode(void);
+extern void __init udbg_init_rtas_panel(void);
+extern void __init udbg_init_rtas_console(void);
+extern void __init udbg_init_debug_beat(void);
+extern void __init udbg_init_btext(void);
+extern void __init udbg_init_44x_as1(void);
+extern void __init udbg_init_40x_realmode(void);
+extern void __init udbg_init_cpm(void);
+extern void __init udbg_init_usbgecko(void);
+extern void __init udbg_init_memcons(void);
+extern void __init udbg_init_ehv_bc(void);
+extern void __init udbg_init_ps3gelic(void);
+extern void __init udbg_init_debug_opal_raw(void);
+extern void __init udbg_init_debug_opal_hvsi(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/uic.h b/arch/powerpc/include/asm/uic.h
new file mode 100644
index 0000000..597edfc
--- /dev/null
+++ b/arch/powerpc/include/asm/uic.h
@@ -0,0 +1,21 @@
+/*
+ * IBM PPC4xx UIC external definitions and structure.
+ *
+ * Maintainer: David Gibson <dwg@au1.ibm.com>
+ * Copyright 2007 IBM Corporation.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_UIC_H
+#define _ASM_POWERPC_UIC_H
+
+#ifdef __KERNEL__
+
+extern void __init uic_init_tree(void);
+extern unsigned int uic_get_irq(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_UIC_H */
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
new file mode 100644
index 0000000..ce69c5e
--- /dev/null
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_UNALIGNED_H
+#define _ASM_POWERPC_UNALIGNED_H
+
+#ifdef __KERNEL__
+
+/*
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
+ */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
+
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
+#else
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
+#endif
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
new file mode 100644
index 0000000..e278299
--- /dev/null
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * uninorth.h: definitions for using the "UniNorth" host bridge chip
+ *             from Apple. This chip is used on "Core99" machines
+ *	       This also includes U2 used on more recent MacRISC2/3
+ *             machines and U3 (G5) 
+ *
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_UNINORTH_H__
+#define __ASM_UNINORTH_H__
+
+/*
+ * Uni-N and U3 config space reg. definitions
+ *
+ * (Little endian)
+ */
+
+/* Address ranges selection. This one should work with Bandit too */
+/* Not U3 */
+#define UNI_N_ADDR_SELECT		0x48
+#define UNI_N_ADDR_COARSE_MASK		0xffff0000	/* 256Mb regions at *0000000 */
+#define UNI_N_ADDR_FINE_MASK		0x0000ffff	/*  16Mb regions at f*000000 */
+
+/* AGP registers */
+/* Not U3 */
+#define UNI_N_CFG_GART_BASE		0x8c
+#define UNI_N_CFG_AGP_BASE		0x90
+#define UNI_N_CFG_GART_CTRL		0x94
+#define UNI_N_CFG_INTERNAL_STATUS	0x98
+#define UNI_N_CFG_GART_DUMMY_PAGE	0xa4
+
+/* UNI_N_CFG_GART_CTRL bits definitions */
+#define UNI_N_CFG_GART_INVAL		0x00000001
+#define UNI_N_CFG_GART_ENABLE		0x00000100
+#define UNI_N_CFG_GART_2xRESET		0x00010000
+#define UNI_N_CFG_GART_DISSBADET	0x00020000
+/* The following seems to only be used only on U3 <j.glisse@gmail.com> */
+#define U3_N_CFG_GART_SYNCMODE		0x00040000
+#define U3_N_CFG_GART_PERFRD		0x00080000
+#define U3_N_CFG_GART_B2BGNT		0x00200000
+#define U3_N_CFG_GART_FASTDDR		0x00400000
+
+/* My understanding of UniNorth AGP as of UniNorth rev 1.0x,
+ * revision 1.5 (x4 AGP) may need further changes.
+ *
+ * AGP_BASE register contains the base address of the AGP aperture on
+ * the AGP bus. It doesn't seem to be visible to the CPU as of UniNorth 1.x,
+ * even if decoding of this address range is enabled in the address select
+ * register. Apparently, the only supported bases are 256Mb multiples
+ * (high 4 bits of that register).
+ *
+ * GART_BASE register appear to contain the physical address of the GART
+ * in system memory in the high address bits (page aligned), and the
+ * GART size in the low order bits (number of GART pages)
+ *
+ * The GART format itself is one 32bits word per physical memory page.
+ * This word contains, in little-endian format (!!!), the physical address
+ * of the page in the high bits, and what appears to be an "enable" bit
+ * in the LSB bit (0) that must be set to 1 when the entry is valid.
+ *
+ * Obviously, the GART is not cache coherent and so any change to it
+ * must be flushed to memory (or maybe just make the GART space non
+ * cachable). AGP memory itself doesn't seem to be cache coherent neither.
+ *
+ * In order to invalidate the GART (which is probably necessary to inval
+ * the bridge internal TLBs), the following sequence has to be written,
+ * in order, to the GART_CTRL register:
+ *
+ *   UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ *   UNI_N_CFG_GART_ENABLE
+ *   UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_2xRESET
+ *   UNI_N_CFG_GART_ENABLE
+ *
+ * As far as AGP "features" are concerned, it looks like fast write may
+ * not be supported but this has to be confirmed.
+ *
+ * Turning on AGP seem to require a double invalidate operation, one before
+ * setting the AGP command register, on after.
+ *
+ * Turning off AGP seems to require the following sequence: first wait
+ * for the AGP to be idle by reading the internal status register, then
+ * write in that order to the GART_CTRL register:
+ *
+ *   UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ *   0
+ *   UNI_N_CFG_GART_2xRESET
+ *   0
+ */
+
+/*
+ * Uni-N memory mapped reg. definitions
+ *
+ * Those registers are Big-Endian !!
+ *
+ * Their meaning come from either Darwin and/or from experiments I made with
+ * the bootrom, I'm not sure about their exact meaning yet
+ *
+ */
+
+/* Version of the UniNorth chip */
+#define UNI_N_VERSION			0x0000		/* Known versions: 3,7 and 8 */
+
+#define UNI_N_VERSION_107		0x0003		/* 1.0.7 */
+#define UNI_N_VERSION_10A		0x0007		/* 1.0.10 */
+#define UNI_N_VERSION_150		0x0011		/* 1.5 */
+#define UNI_N_VERSION_200		0x0024		/* 2.0 */
+#define UNI_N_VERSION_PANGEA		0x00C0		/* Integrated U1 + K */
+#define UNI_N_VERSION_INTREPID		0x00D2		/* Integrated U2 + K */
+#define UNI_N_VERSION_300		0x0030		/* 3.0 (U3 on G5) */
+
+/* This register is used to enable/disable various clocks */
+#define UNI_N_CLOCK_CNTL		0x0020
+#define UNI_N_CLOCK_CNTL_PCI		0x00000001	/* PCI2 clock control */
+#define UNI_N_CLOCK_CNTL_GMAC		0x00000002	/* GMAC clock control */
+#define UNI_N_CLOCK_CNTL_FW		0x00000004	/* FireWire clock control */
+#define UNI_N_CLOCK_CNTL_ATA100		0x00000010	/* ATA-100 clock control (U2) */
+
+/* Power Management control */
+#define UNI_N_POWER_MGT			0x0030
+#define UNI_N_POWER_MGT_NORMAL		0x00
+#define UNI_N_POWER_MGT_IDLE2		0x01
+#define UNI_N_POWER_MGT_SLEEP		0x02
+
+/* This register is configured by Darwin depending on the UniN
+ * revision
+ */
+#define UNI_N_ARB_CTRL			0x0040
+#define UNI_N_ARB_CTRL_QACK_DELAY_SHIFT	15
+#define UNI_N_ARB_CTRL_QACK_DELAY_MASK	0x0e1f8000
+#define UNI_N_ARB_CTRL_QACK_DELAY	0x30
+#define UNI_N_ARB_CTRL_QACK_DELAY105	0x00
+
+/* This one _might_ return the CPU number of the CPU reading it;
+ * the bootROM decides whether to boot or to sleep/spinloop depending
+ * on this register being 0 or not
+ */
+#define UNI_N_CPU_NUMBER		0x0050
+
+/* This register appear to be read by the bootROM to decide what
+ *  to do on a non-recoverable reset (powerup or wakeup)
+ */
+#define UNI_N_HWINIT_STATE		0x0070
+#define UNI_N_HWINIT_STATE_SLEEPING	0x01
+#define UNI_N_HWINIT_STATE_RUNNING	0x02
+/* This last bit appear to be used by the bootROM to know the second
+ * CPU has started and will enter it's sleep loop with IP=0
+ */
+#define UNI_N_HWINIT_STATE_CPU1_FLAG	0x10000000
+
+/* This register controls AACK delay, which is set when 2004 iBook/PowerBook
+ * is in low speed mode.
+ */
+#define UNI_N_AACK_DELAY		0x0100
+#define UNI_N_AACK_DELAY_ENABLE		0x00000001
+
+/* Clock status for Intrepid */
+#define UNI_N_CLOCK_STOP_STATUS0	0x0150
+#define UNI_N_CLOCK_STOPPED_EXTAGP	0x00200000
+#define UNI_N_CLOCK_STOPPED_AGPDEL	0x00100000
+#define UNI_N_CLOCK_STOPPED_I2S0_45_49	0x00080000
+#define UNI_N_CLOCK_STOPPED_I2S0_18	0x00040000
+#define UNI_N_CLOCK_STOPPED_I2S1_45_49	0x00020000
+#define UNI_N_CLOCK_STOPPED_I2S1_18	0x00010000
+#define UNI_N_CLOCK_STOPPED_TIMER	0x00008000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK18	0x00004000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK32	0x00002000
+#define UNI_N_CLOCK_STOPPED_SCC_VIA32	0x00001000
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT0	0x00000800
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT1	0x00000400
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT2	0x00000200
+#define UNI_N_CLOCK_STOPPED_PCI_FBCLKO	0x00000100
+#define UNI_N_CLOCK_STOPPED_VEO0	0x00000080
+#define UNI_N_CLOCK_STOPPED_VEO1	0x00000040
+#define UNI_N_CLOCK_STOPPED_USB0	0x00000020
+#define UNI_N_CLOCK_STOPPED_USB1	0x00000010
+#define UNI_N_CLOCK_STOPPED_USB2	0x00000008
+#define UNI_N_CLOCK_STOPPED_32		0x00000004
+#define UNI_N_CLOCK_STOPPED_45		0x00000002
+#define UNI_N_CLOCK_STOPPED_49		0x00000001
+
+#define UNI_N_CLOCK_STOP_STATUS1	0x0160
+#define UNI_N_CLOCK_STOPPED_PLL4REF	0x00080000
+#define UNI_N_CLOCK_STOPPED_CPUDEL	0x00040000
+#define UNI_N_CLOCK_STOPPED_CPU		0x00020000
+#define UNI_N_CLOCK_STOPPED_BUF_REFCKO	0x00010000
+#define UNI_N_CLOCK_STOPPED_PCI2	0x00008000
+#define UNI_N_CLOCK_STOPPED_FW		0x00004000
+#define UNI_N_CLOCK_STOPPED_GB		0x00002000
+#define UNI_N_CLOCK_STOPPED_ATA66	0x00001000
+#define UNI_N_CLOCK_STOPPED_ATA100	0x00000800
+#define UNI_N_CLOCK_STOPPED_MAX		0x00000400
+#define UNI_N_CLOCK_STOPPED_PCI1	0x00000200
+#define UNI_N_CLOCK_STOPPED_KLPCI	0x00000100
+#define UNI_N_CLOCK_STOPPED_USB0PCI	0x00000080
+#define UNI_N_CLOCK_STOPPED_USB1PCI	0x00000040
+#define UNI_N_CLOCK_STOPPED_USB2PCI	0x00000020
+#define UNI_N_CLOCK_STOPPED_7PCI1	0x00000008
+#define UNI_N_CLOCK_STOPPED_AGP		0x00000004
+#define UNI_N_CLOCK_STOPPED_PCI0	0x00000002
+#define UNI_N_CLOCK_STOPPED_18		0x00000001
+
+/* Intrepid registe to OF do-platform-clockspreading */
+#define UNI_N_CLOCK_SPREADING		0x190
+
+/* Uninorth 1.5 rev. has additional perf. monitor registers at 0xf00-0xf50 */
+
+
+/*
+ * U3 specific registers
+ */
+
+
+/* U3 Toggle */
+#define U3_TOGGLE_REG			0x00e0
+#define U3_PMC_START_STOP		0x0001
+#define U3_MPIC_RESET			0x0002
+#define U3_MPIC_OUTPUT_ENABLE		0x0004
+
+/* U3 API PHY Config 1 */
+#define U3_API_PHY_CONFIG_1		0x23030
+
+/* U3 HyperTransport registers */
+#define U3_HT_CONFIG_BASE      		0x70000
+#define U3_HT_LINK_COMMAND		0x100
+#define U3_HT_LINK_CONFIG		0x110
+#define U3_HT_LINK_FREQ			0x120
+
+#endif /* __ASM_UNINORTH_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
new file mode 100644
index 0000000..c19379f
--- /dev/null
+++ b/arch/powerpc/include/asm/unistd.h
@@ -0,0 +1,58 @@
+/*
+ * This file contains the system call numbers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_UNISTD_H_
+#define _ASM_POWERPC_UNISTD_H_
+
+#include <uapi/asm/unistd.h>
+
+
+#define NR_syscalls		389
+
+#define __NR__exit __NR_exit
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_IPC
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLD_UNAME
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#ifdef CONFIG_PPC32
+#define __ARCH_WANT_OLD_STAT
+#endif
+#ifdef CONFIG_PPC64
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#endif
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE
+
+#endif		/* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
new file mode 100644
index 0000000..7422a99
--- /dev/null
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for powerpc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007-2012
+ *
+ * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+ */
+
+#include <linux/notifier.h>
+#include <asm/probes.h>
+
+typedef ppc_opcode_t uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES		4
+#define UPROBE_XOL_SLOT_BYTES	(MAX_UINSN_BYTES)
+
+/* The following alias is needed for reference from arch-agnostic code */
+#define UPROBE_SWBP_INSN	BREAKPOINT_INSTRUCTION
+#define UPROBE_SWBP_INSN_SIZE	4 /* swbp insn size in bytes */
+
+struct arch_uprobe {
+	union {
+		u32	insn;
+		u32	ixol;
+	};
+};
+
+struct arch_uprobe_task {
+	unsigned long	saved_trap_nr;
+};
+
+#endif	/* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h
new file mode 100644
index 0000000..5c0e082
--- /dev/null
+++ b/arch/powerpc/include/asm/user.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_USER_H
+#define _ASM_POWERPC_USER_H
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Adapted from <asm-alpha/user.h>
+ *
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd, NOT the osf-core).  The file contents
+ * are as follows:
+ *
+ *  upage: 1 page consisting of a user struct that tells gdb
+ *	what is present in the file.  Directly after this is a
+ *	copy of the task_struct, which is currently not used by gdb,
+ *	but it may come in handy at some point.  All of the registers
+ *	are stored as part of the upage.  The upage should always be
+ *	only one page long.
+ *  data: The data segment follows next.  We use current->end_text to
+ *	current->brk to pick up all of the user variables, plus any memory
+ *	that may have been sbrk'ed.  No attempt is made to determine if a
+ *	page is demand-zero or if a page is totally unused, we just cover
+ *	the entire range.  All of the addresses are rounded in such a way
+ *	that an integral number of pages is written.
+ *  stack: We need the stack information in order to get a meaningful
+ *	backtrace.  We need to write the data from usp to
+ *	current->start_stack, so we round each of these in order to be able
+ *	to write an integer number of pages.
+ */
+struct user {
+	struct pt_regs	regs;			/* entire machine state */
+	size_t		u_tsize;		/* text size (pages) */
+	size_t		u_dsize;		/* data size (pages) */
+	size_t		u_ssize;		/* stack size (pages) */
+	unsigned long	start_code;		/* text starting address */
+	unsigned long	start_data;		/* data starting address */
+	unsigned long	start_stack;		/* stack starting address */
+	long int	signal;			/* signal causing core dump */
+	unsigned long	u_ar0;			/* help gdb find registers */
+	unsigned long	magic;			/* identifies a core file */
+	char		u_comm[32];		/* user command name */
+};
+
+#define NBPG			PAGE_SIZE
+#define UPAGES			1
+#define HOST_TEXT_START_ADDR	(u.start_code)
+#define HOST_DATA_START_ADDR	(u.start_data)
+#define HOST_STACK_END_ADDR	(u.start_stack + u.u_ssize * NBPG)
+#endif	/* _ASM_POWERPC_USER_H */
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
new file mode 100644
index 0000000..7714562
--- /dev/null
+++ b/arch/powerpc/include/asm/vas.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2016-17 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_VAS_H
+#define _ASM_POWERPC_VAS_H
+
+struct vas_window;
+
+/*
+ * Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
+ * (Local FIFO Size Register) of the VAS workbook.
+ */
+#define VAS_RX_FIFO_SIZE_MIN	(1 << 10)	/* 1KB */
+#define VAS_RX_FIFO_SIZE_MAX	(8 << 20)	/* 8MB */
+
+/*
+ * Threshold Control Mode: Have paste operation fail if the number of
+ * requests in receive FIFO exceeds a threshold.
+ *
+ * NOTE: No special error code yet if paste is rejected because of these
+ *	 limits. So users can't distinguish between this and other errors.
+ */
+#define VAS_THRESH_DISABLED		0
+#define VAS_THRESH_FIFO_GT_HALF_FULL	1
+#define VAS_THRESH_FIFO_GT_QTR_FULL	2
+#define VAS_THRESH_FIFO_GT_EIGHTH_FULL	3
+
+/*
+ * Get/Set bit fields
+ */
+#define GET_FIELD(m, v)                (((v) & (m)) >> MASK_LSH(m))
+#define MASK_LSH(m)            (__builtin_ffsl(m) - 1)
+#define SET_FIELD(m, v, val)   \
+		(((v) & ~(m)) | ((((typeof(v))(val)) << MASK_LSH(m)) & (m)))
+
+/*
+ * Co-processor Engine type.
+ */
+enum vas_cop_type {
+	VAS_COP_TYPE_FAULT,
+	VAS_COP_TYPE_842,
+	VAS_COP_TYPE_842_HIPRI,
+	VAS_COP_TYPE_GZIP,
+	VAS_COP_TYPE_GZIP_HIPRI,
+	VAS_COP_TYPE_FTW,
+	VAS_COP_TYPE_MAX,
+};
+
+/*
+ * Receive window attributes specified by the (in-kernel) owner of window.
+ */
+struct vas_rx_win_attr {
+	void *rx_fifo;
+	int rx_fifo_size;
+	int wcreds_max;
+
+	bool pin_win;
+	bool rej_no_credit;
+	bool tx_wcred_mode;
+	bool rx_wcred_mode;
+	bool tx_win_ord_mode;
+	bool rx_win_ord_mode;
+	bool data_stamp;
+	bool nx_win;
+	bool fault_win;
+	bool user_win;
+	bool notify_disable;
+	bool intr_disable;
+	bool notify_early;
+
+	int lnotify_lpid;
+	int lnotify_pid;
+	int lnotify_tid;
+	u32 pswid;
+
+	int tc_mode;
+};
+
+/*
+ * Window attributes specified by the in-kernel owner of a send window.
+ */
+struct vas_tx_win_attr {
+	enum vas_cop_type cop;
+	int wcreds_max;
+	int lpid;
+	int pidr;		/* hardware PID (from SPRN_PID) */
+	int pid;		/* linux process id */
+	int pswid;
+	int rsvd_txbuf_count;
+	int tc_mode;
+
+	bool user_win;
+	bool pin_win;
+	bool rej_no_credit;
+	bool rsvd_txbuf_enable;
+	bool tx_wcred_mode;
+	bool rx_wcred_mode;
+	bool tx_win_ord_mode;
+	bool rx_win_ord_mode;
+};
+
+/*
+ * Helper to map a chip id to VAS id.
+ * For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
+ * mapping in which case, we will need to update this helper.
+ *
+ * Return the VAS id or -1 if no matching vasid is found.
+ */
+int chip_to_vas_id(int chipid);
+
+/*
+ * Helper to initialize receive window attributes to defaults for an
+ * NX window.
+ */
+void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop);
+
+/*
+ * Open a VAS receive window for the instance of VAS identified by @vasid
+ * Use @attr to initialize the attributes of the window.
+ *
+ * Return a handle to the window or ERR_PTR() on error.
+ */
+struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
+				   struct vas_rx_win_attr *attr);
+
+/*
+ * Helper to initialize send window attributes to defaults for an NX window.
+ */
+extern void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr,
+			enum vas_cop_type cop);
+
+/*
+ * Open a VAS send window for the instance of VAS identified by @vasid
+ * and the co-processor type @cop. Use @attr to initialize attributes
+ * of the window.
+ *
+ * Note: The instance of VAS must already have an open receive window for
+ * the coprocessor type @cop.
+ *
+ * Return a handle to the send window or ERR_PTR() on error.
+ */
+struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
+			struct vas_tx_win_attr *attr);
+
+/*
+ * Close the send or receive window identified by @win. For receive windows
+ * return -EAGAIN if there are active send windows attached to this receive
+ * window.
+ */
+int vas_win_close(struct vas_window *win);
+
+/*
+ * Copy the co-processor request block (CRB) @crb into the local L2 cache.
+ */
+int vas_copy_crb(void *crb, int offset);
+
+/*
+ * Paste a previously copied CRB (see vas_copy_crb()) from the L2 cache to
+ * the hardware address associated with the window @win. @re is expected/
+ * assumed to be true for NX windows.
+ */
+int vas_paste_crb(struct vas_window *win, int offset, bool re);
+
+/*
+ * Return a system-wide unique id for the VAS window @win.
+ */
+extern u32 vas_win_id(struct vas_window *win);
+
+/*
+ * Return the power bus paste address associated with @win so the caller
+ * can map that address into their address space.
+ */
+extern u64 vas_win_paste_addr(struct vas_window *win);
+#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
new file mode 100644
index 0000000..b5e1f8f
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PPC64_VDSO_H__
+#define __PPC64_VDSO_H__
+
+#ifdef __KERNEL__
+
+/* Default link addresses for the vDSOs */
+#define VDSO32_LBASE	0x0
+#define VDSO64_LBASE	0x0
+
+/* Default map addresses for 32bit vDSO */
+#define VDSO32_MBASE	0x100000
+
+#define VDSO_VERSION_STRING	LINUX_2.6.15
+
+/* Define if 64 bits VDSO has procedure descriptors */
+#undef VDS64_HAS_DESCRIPTORS
+
+#ifndef __ASSEMBLY__
+
+/* Offsets relative to thread->vdso_base */
+extern unsigned long vdso64_rt_sigtramp;
+extern unsigned long vdso32_sigtramp;
+extern unsigned long vdso32_rt_sigtramp;
+
+int vdso_getcpu_init(void);
+
+#else /* __ASSEMBLY__ */
+
+#ifdef __VDSO64__
+#ifdef VDS64_HAS_DESCRIPTORS
+#define V_FUNCTION_BEGIN(name)		\
+	.globl name;			\
+        .section ".opd","a";		\
+        .align 3;			\
+	name:				\
+	.quad .name,.TOC.@tocbase,0;	\
+	.previous;			\
+	.globl .name;			\
+	.type .name,@function; 		\
+	.name:				\
+
+#define V_FUNCTION_END(name)		\
+	.size .name,.-.name;
+
+#define V_LOCAL_FUNC(name) (.name)
+
+#else /* VDS64_HAS_DESCRIPTORS */
+
+#define V_FUNCTION_BEGIN(name)		\
+	.globl name;			\
+	name:				\
+
+#define V_FUNCTION_END(name)		\
+	.size name,.-name;
+
+#define V_LOCAL_FUNC(name) (name)
+
+#endif /* VDS64_HAS_DESCRIPTORS */
+#endif /* __VDSO64__ */
+
+#ifdef __VDSO32__
+
+#define V_FUNCTION_BEGIN(name)		\
+	.globl name;			\
+	.type name,@function; 		\
+	name:				\
+
+#define V_FUNCTION_END(name)		\
+	.size name,.-name;
+
+#define V_LOCAL_FUNC(name) (name)
+
+#endif /* __VDSO32__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __PPC64_VDSO_H__ */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
new file mode 100644
index 0000000..1afe90a
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -0,0 +1,124 @@
+#ifndef _VDSO_DATAPAGE_H
+#define _VDSO_DATAPAGE_H
+#ifdef __KERNEL__
+
+/*
+ * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
+ * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>,
+ * 		      IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+/*
+ * Note about this structure:
+ *
+ * This structure was historically called systemcfg and exposed to
+ * userland via /proc/ppc64/systemcfg. Unfortunately, this became an
+ * ABI issue as some proprietary software started relying on being able
+ * to mmap() it, thus we have to keep the base layout at least for a
+ * few kernel versions.
+ *
+ * However, since ppc32 doesn't suffer from this backward handicap,
+ * a simpler version of the data structure is used there with only the
+ * fields actually used by the vDSO.
+ *
+ */
+
+/*
+ * If the major version changes we are incompatible.
+ * Minor version changes are a hint.
+ */
+#define SYSTEMCFG_MAJOR 1
+#define SYSTEMCFG_MINOR 1
+
+#ifndef __ASSEMBLY__
+
+#include <linux/unistd.h>
+#include <linux/time.h>
+
+#define SYSCALL_MAP_SIZE      ((NR_syscalls + 31) / 32)
+
+/*
+ * So here is the ppc64 backward compatible version
+ */
+
+#ifdef CONFIG_PPC64
+
+struct vdso_data {
+	__u8  eye_catcher[16];		/* Eyecatcher: SYSTEMCFG:PPC64	0x00 */
+	struct {			/* Systemcfg version numbers	     */
+		__u32 major;		/* Major number			0x10 */
+		__u32 minor;		/* Minor number			0x14 */
+	} version;
+
+	/* Note about the platform flags: it now only contains the lpar
+	 * bit. The actual platform number is dead and buried
+	 */
+	__u32 platform;			/* Platform flags		0x18 */
+	__u32 processor;		/* Processor type		0x1C */
+	__u64 processorCount;		/* # of physical processors	0x20 */
+	__u64 physicalMemorySize;	/* Size of real memory(B)	0x28 */
+	__u64 tb_orig_stamp;		/* Timebase at boot		0x30 */
+	__u64 tb_ticks_per_sec;		/* Timebase tics / sec		0x38 */
+	__u64 tb_to_xs;			/* Inverse of TB to 2^20	0x40 */
+	__u64 stamp_xsec;		/*				0x48 */
+	__u64 tb_update_count;		/* Timebase atomicity ctr	0x50 */
+	__u32 tz_minuteswest;		/* Minutes west of Greenwich	0x58 */
+	__u32 tz_dsttime;		/* Type of dst correction	0x5C */
+	__u32 dcache_size;		/* L1 d-cache size		0x60 */
+	__u32 dcache_line_size;		/* L1 d-cache line size		0x64 */
+	__u32 icache_size;		/* L1 i-cache size		0x68 */
+	__u32 icache_line_size;		/* L1 i-cache line size		0x6C */
+
+	/* those additional ones don't have to be located anywhere
+	 * special as they were not part of the original systemcfg
+	 */
+	__u32 dcache_block_size;		/* L1 d-cache block size     */
+	__u32 icache_block_size;		/* L1 i-cache block size     */
+	__u32 dcache_log_block_size;		/* L1 d-cache log block size */
+	__u32 icache_log_block_size;		/* L1 i-cache log block size */
+	__s32 wtom_clock_sec;			/* Wall to monotonic clock */
+	__s32 wtom_clock_nsec;
+	struct timespec stamp_xtime;	/* xtime as at tb_orig_stamp */
+	__u32 stamp_sec_fraction;	/* fractional seconds of stamp_xtime */
+   	__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls  */
+   	__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
+};
+
+#else /* CONFIG_PPC64 */
+
+/*
+ * And here is the simpler 32 bits version
+ */
+struct vdso_data {
+	__u64 tb_orig_stamp;		/* Timebase at boot		0x30 */
+	__u64 tb_ticks_per_sec;		/* Timebase tics / sec		0x38 */
+	__u64 tb_to_xs;			/* Inverse of TB to 2^20	0x40 */
+	__u64 stamp_xsec;		/*				0x48 */
+	__u32 tb_update_count;		/* Timebase atomicity ctr	0x50 */
+	__u32 tz_minuteswest;		/* Minutes west of Greenwich	0x58 */
+	__u32 tz_dsttime;		/* Type of dst correction	0x5C */
+	__s32 wtom_clock_sec;			/* Wall to monotonic clock */
+	__s32 wtom_clock_nsec;
+	struct timespec stamp_xtime;	/* xtime as at tb_orig_stamp */
+	__u32 stamp_sec_fraction;	/* fractional seconds of stamp_xtime */
+   	__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
+	__u32 dcache_block_size;	/* L1 d-cache block size     */
+	__u32 icache_block_size;	/* L1 i-cache block size     */
+	__u32 dcache_log_block_size;	/* L1 d-cache log block size */
+	__u32 icache_log_block_size;	/* L1 i-cache log block size */
+};
+
+#endif /* CONFIG_PPC64 */
+
+extern struct vdso_data *vdso_data;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _SYSTEMCFG_H */
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
new file mode 100644
index 0000000..fcf7216
--- /dev/null
+++ b/arch/powerpc/include/asm/vga.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VGA_H_
+#define _ASM_POWERPC_VGA_H_
+
+#ifdef __KERNEL__
+
+/*
+ *	Access to VGA videoram
+ *
+ *	(c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+
+#include <asm/io.h>
+
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+
+#define VT_BUF_HAVE_RW
+/*
+ *  These are only needed for supporting VGA or MDA text mode, which use little
+ *  endian byte ordering.
+ *  In other cases, we can optimize by using native byte ordering and
+ *  <linux/vt_buffer.h> has already done the right job for us.
+ */
+
+static inline void scr_writew(u16 val, volatile u16 *addr)
+{
+	*addr = cpu_to_le16(val);
+}
+
+static inline u16 scr_readw(volatile const u16 *addr)
+{
+	return le16_to_cpu(*addr);
+}
+
+#define VT_BUF_HAVE_MEMSETW
+static inline void scr_memsetw(u16 *s, u16 v, unsigned int n)
+{
+	memset16(s, cpu_to_le16(v), n / 2);
+}
+
+#define VT_BUF_HAVE_MEMCPYW
+#define VT_BUF_HAVE_MEMMOVEW
+#define scr_memcpyw	memcpy
+#define scr_memmovew	memmove
+
+#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
+
+#ifdef __powerpc64__
+#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap((x), s))
+#else
+#define VGA_MAP_MEM(x,s) (x)
+#endif
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_VGA_H_ */
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
new file mode 100644
index 0000000..84286ec
--- /dev/null
+++ b/arch/powerpc/include/asm/vio.h
@@ -0,0 +1,173 @@
+/*
+ * IBM PowerPC Virtual I/O Infrastructure Support.
+ *
+ * Copyright (c) 2003 IBM Corp.
+ *  Dave Engebretsen engebret@us.ibm.com
+ *  Santiago Leon santil@us.ibm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_VIO_H
+#define _ASM_POWERPC_VIO_H
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/scatterlist.h>
+
+#include <asm/hvcall.h>
+
+/*
+ * Architecture-specific constants for drivers to
+ * extract attributes of the device using vio_get_attribute()
+ */
+#define VETH_MAC_ADDR "local-mac-address"
+#define VETH_MCAST_FILTER_SIZE "ibm,mac-address-filters"
+
+/* End architecture-specific constants */
+
+#define h_vio_signal(ua, mode) \
+  plpar_hcall_norets(H_VIO_SIGNAL, ua, mode)
+
+#define VIO_IRQ_DISABLE		0UL
+#define VIO_IRQ_ENABLE		1UL
+
+/*
+ * VIO CMO minimum entitlement for all devices and spare entitlement
+ */
+#define VIO_CMO_MIN_ENT 1562624
+
+extern struct bus_type vio_bus_type;
+
+struct iommu_table;
+
+/*
+ * Platform Facilities Option (PFO)-specific data
+ */
+
+/* Starting unit address for PFO devices on the VIO BUS */
+#define VIO_BASE_PFO_UA	0x50000000
+
+/**
+ * vio_pfo_op - PFO operation parameters
+ *
+ * @flags: h_call subfunctions and modifiers
+ * @in: Input data block logical real address
+ * @inlen: If non-negative, the length of the input data block.  If negative,
+ *	the length of the input data descriptor list in bytes.
+ * @out: Output data block logical real address
+ * @outlen: If non-negative, the length of the input data block.  If negative,
+ *	the length of the input data descriptor list in bytes.
+ * @csbcpb: Logical real address of the 4k naturally-aligned storage block
+ *	containing the CSB & optional FC field specific CPB
+ * @timeout: # of milliseconds to retry h_call, 0 for no timeout.
+ * @hcall_err: pointer to return the h_call return value, else NULL
+ */
+struct vio_pfo_op {
+	u64 flags;
+	s64 in;
+	s64 inlen;
+	s64 out;
+	s64 outlen;
+	u64 csbcpb;
+	void *done;
+	unsigned long handle;
+	unsigned int timeout;
+	long hcall_err;
+};
+
+/* End PFO specific data */
+
+enum vio_dev_family {
+	VDEVICE,	/* The OF node is a child of /vdevice */
+	PFO,		/* The OF node is a child of /ibm,platform-facilities */
+};
+
+/**
+ * vio_dev - This structure is used to describe virtual I/O devices.
+ *
+ * @desired: set from return of driver's get_desired_dma() function
+ * @entitled: bytes of IO data that has been reserved for this device.
+ * @allocated: bytes of IO data currently in use by the device.
+ * @allocs_failed: number of DMA failures due to insufficient entitlement.
+ */
+struct vio_dev {
+	const char *name;
+	const char *type;
+	uint32_t unit_address;
+	uint32_t resource_id;
+	unsigned int irq;
+	struct {
+		size_t desired;
+		size_t entitled;
+		size_t allocated;
+		atomic_t allocs_failed;
+	} cmo;
+	enum vio_dev_family family;
+	struct device dev;
+};
+
+struct vio_driver {
+	const char *name;
+	const struct vio_device_id *id_table;
+	int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
+	int (*remove)(struct vio_dev *dev);
+	/* A driver must have a get_desired_dma() function to
+	 * be loaded in a CMO environment if it uses DMA.
+	 */
+	unsigned long (*get_desired_dma)(struct vio_dev *dev);
+	const struct dev_pm_ops *pm;
+	struct device_driver driver;
+};
+
+extern int __vio_register_driver(struct vio_driver *drv, struct module *owner,
+				 const char *mod_name);
+/*
+ * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
+ */
+#define vio_register_driver(driver)		\
+	__vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+extern void vio_unregister_driver(struct vio_driver *drv);
+
+extern int vio_cmo_entitlement_update(size_t);
+extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired);
+
+extern void vio_unregister_device(struct vio_dev *dev);
+
+extern int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op);
+
+struct device_node;
+
+extern struct vio_dev *vio_register_device_node(
+		struct device_node *node_vdev);
+extern const void *vio_get_attribute(struct vio_dev *vdev, char *which,
+		int *length);
+#ifdef CONFIG_PPC_PSERIES
+extern struct vio_dev *vio_find_node(struct device_node *vnode);
+extern int vio_enable_interrupts(struct vio_dev *dev);
+extern int vio_disable_interrupts(struct vio_dev *dev);
+#else
+static inline int vio_enable_interrupts(struct vio_dev *dev)
+{
+	return 0;
+}
+#endif
+
+static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
+{
+	return container_of(drv, struct vio_driver, driver);
+}
+
+static inline struct vio_dev *to_vio_dev(struct device *dev)
+{
+	return container_of(dev, struct vio_dev, dev);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_VIO_H */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..f3f4710
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,206 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+#include <asm/asm-compat.h>
+#include <asm/ppc_asm.h>
+
+#ifdef __BIG_ENDIAN__
+
+struct word_at_a_time {
+	const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+	unsigned long mask = (val & c->low_bits) + c->low_bits;
+	return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+	long leading_zero_bits;
+
+	asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+	return leading_zero_bits >> 3;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+	unsigned long rhs = val | c->low_bits;
+	*data = rhs;
+	return (val + c->high_bits) & ~rhs;
+}
+
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+	return ~1ul << __fls(mask);
+}
+
+#else
+
+#ifdef CONFIG_64BIT
+
+/* unused */
+struct word_at_a_time {
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { }
+
+/* This will give us 0xff for a NULL char and 0x00 elsewhere */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+	unsigned long ret;
+	unsigned long zero = 0;
+
+	asm("cmpb %0,%1,%2" : "=r" (ret) : "r" (a), "r" (zero));
+	*bits = ret;
+
+	return ret;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+	return bits;
+}
+
+/* Alan Modra's little-endian strlen tail for 64-bit */
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+	unsigned long leading_zero_bits;
+	long trailing_zero_bit_mask;
+
+	asm("addi	%1,%2,-1\n\t"
+	    "andc	%1,%1,%2\n\t"
+	    "popcntd	%0,%1"
+		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+		: "b" (bits));
+
+	return leading_zero_bits;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+	return mask >> 3;
+}
+
+/* This assumes that we never ask for an all 1s bitmask */
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+	return (1UL << mask) - 1;
+}
+
+#else	/* 32-bit case */
+
+struct word_at_a_time {
+	const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+	long a = (0x0ff0001+mask) >> 23;
+	/* Fix the 1 for 00 case */
+	return a & mask;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+	bits = (bits - 1) & ~bits;
+	return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+	return count_masked_bytes(mask);
+}
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+	*bits = mask;
+	return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+	return bits;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* CONFIG_64BIT */
+
+#endif /* __BIG_ENDIAN__ */
+
+/*
+ * We use load_unaligned_zero() in a selftest, which builds a userspace
+ * program. Some linker scripts seem to discard the .fixup section, so allow
+ * the test code to use a different section name.
+ */
+#ifndef FIXUP_SECTION
+#define FIXUP_SECTION ".fixup"
+#endif
+
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+	unsigned long ret, offset, tmp;
+
+	asm(
+	"1:	" PPC_LL "%[ret], 0(%[addr])\n"
+	"2:\n"
+	".section " FIXUP_SECTION ",\"ax\"\n"
+	"3:	"
+#ifdef __powerpc64__
+	"clrrdi		%[tmp], %[addr], 3\n\t"
+	"clrlsldi	%[offset], %[addr], 61, 3\n\t"
+	"ld		%[ret], 0(%[tmp])\n\t"
+#ifdef __BIG_ENDIAN__
+	"sld		%[ret], %[ret], %[offset]\n\t"
+#else
+	"srd		%[ret], %[ret], %[offset]\n\t"
+#endif
+#else
+	"clrrwi		%[tmp], %[addr], 2\n\t"
+	"clrlslwi	%[offset], %[addr], 30, 3\n\t"
+	"lwz		%[ret], 0(%[tmp])\n\t"
+#ifdef __BIG_ENDIAN__
+	"slw		%[ret], %[ret], %[offset]\n\t"
+#else
+	"srw		%[ret], %[ret], %[offset]\n\t"
+#endif
+#endif
+	"b	2b\n"
+	".previous\n"
+	EX_TABLE(1b, 3b)
+	: [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret)
+	: [addr] "b" (addr), "m" (*(unsigned long *)addr));
+
+	return ret;
+}
+
+#undef FIXUP_SECTION
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
new file mode 100644
index 0000000..8e903b3
--- /dev/null
+++ b/arch/powerpc/include/asm/xics.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common definitions across all variants of ICP and ICS interrupt
+ * controllers.
+ */
+
+#ifndef _XICS_H
+#define _XICS_H
+
+#include <linux/interrupt.h>
+
+#define XICS_IPI		2
+#define XICS_IRQ_SPURIOUS	0
+
+/* Want a priority other than 0.  Various HW issues require this. */
+#define	DEFAULT_PRIORITY	5
+
+/*
+ * Mark IPIs as higher priority so we can take them inside interrupts
+ * FIXME: still true now?
+ */
+#define IPI_PRIORITY		4
+
+/* The least favored priority */
+#define LOWEST_PRIORITY		0xFF
+
+/* The number of priorities defined above */
+#define MAX_NUM_PRIORITIES	3
+
+/* Native ICP */
+#ifdef CONFIG_PPC_ICP_NATIVE
+extern int icp_native_init(void);
+extern void icp_native_flush_interrupt(void);
+extern void icp_native_cause_ipi_rm(int cpu);
+#else
+static inline int icp_native_init(void) { return -ENODEV; }
+#endif
+
+/* PAPR ICP */
+#ifdef CONFIG_PPC_ICP_HV
+extern int icp_hv_init(void);
+#else
+static inline int icp_hv_init(void) { return -ENODEV; }
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+extern int icp_opal_init(void);
+extern void icp_opal_flush_interrupt(void);
+#else
+static inline int icp_opal_init(void) { return -ENODEV; }
+#endif
+
+/* ICP ops */
+struct icp_ops {
+	unsigned int (*get_irq)(void);
+	void (*eoi)(struct irq_data *d);
+	void (*set_priority)(unsigned char prio);
+	void (*teardown_cpu)(void);
+	void (*flush_ipi)(void);
+#ifdef CONFIG_SMP
+	void (*cause_ipi)(int cpu);
+	irq_handler_t ipi_action;
+#endif
+};
+
+extern const struct icp_ops *icp_ops;
+
+/* Native ICS */
+extern int ics_native_init(void);
+
+/* RTAS ICS */
+#ifdef CONFIG_PPC_ICS_RTAS
+extern int ics_rtas_init(void);
+#else
+static inline int ics_rtas_init(void) { return -ENODEV; }
+#endif
+
+/* HAL ICS */
+#ifdef CONFIG_PPC_POWERNV
+extern int ics_opal_init(void);
+#else
+static inline int ics_opal_init(void) { return -ENODEV; }
+#endif
+
+/* ICS instance, hooked up to chip_data of an irq */
+struct ics {
+	struct list_head link;
+	int (*map)(struct ics *ics, unsigned int virq);
+	void (*mask_unknown)(struct ics *ics, unsigned long vec);
+	long (*get_server)(struct ics *ics, unsigned long vec);
+	int (*host_match)(struct ics *ics, struct device_node *node);
+	char data[];
+};
+
+/* Commons */
+extern unsigned int xics_default_server;
+extern unsigned int xics_default_distrib_server;
+extern unsigned int xics_interrupt_server_size;
+extern struct irq_domain *xics_host;
+
+struct xics_cppr {
+	unsigned char stack[MAX_NUM_PRIORITIES];
+	int index;
+};
+
+DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
+
+static inline void xics_push_cppr(unsigned int vec)
+{
+	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+
+	if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
+		return;
+
+	if (vec == XICS_IPI)
+		os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
+	else
+		os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
+}
+
+static inline unsigned char xics_pop_cppr(void)
+{
+	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+
+	if (WARN_ON(os_cppr->index < 1))
+		return LOWEST_PRIORITY;
+
+	return os_cppr->stack[--os_cppr->index];
+}
+
+static inline void xics_set_base_cppr(unsigned char cppr)
+{
+	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+
+	/* we only really want to set the priority when there's
+	 * just one cppr value on the stack
+	 */
+	WARN_ON(os_cppr->index != 0);
+
+	os_cppr->stack[0] = cppr;
+}
+
+static inline unsigned char xics_cppr_top(void)
+{
+	struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+	
+	return os_cppr->stack[os_cppr->index];
+}
+
+DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
+
+extern void xics_init(void);
+extern void xics_setup_cpu(void);
+extern void xics_update_irq_servers(void);
+extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
+extern void xics_mask_unknown_vec(unsigned int vec);
+extern irqreturn_t xics_ipi_dispatch(int cpu);
+extern void xics_smp_probe(void);
+extern void xics_register_ics(struct ics *ics);
+extern void xics_teardown_cpu(void);
+extern void xics_kexec_teardown_cpu(int secondary);
+extern void xics_migrate_irqs_away(void);
+extern void icp_native_eoi(struct irq_data *d);
+extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
+extern int xics_retrigger(struct irq_data *data);
+#ifdef CONFIG_SMP
+extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
+			       unsigned int strict_check);
+#else
+#define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server)
+#endif
+
+
+#endif /* _XICS_H */
diff --git a/arch/powerpc/include/asm/xilinx_intc.h b/arch/powerpc/include/asm/xilinx_intc.h
new file mode 100644
index 0000000..3192d7f
--- /dev/null
+++ b/arch/powerpc/include/asm/xilinx_intc.h
@@ -0,0 +1,20 @@
+/*
+ * Xilinx intc external definitions
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_XILINX_INTC_H
+#define _ASM_POWERPC_XILINX_INTC_H
+
+#ifdef __KERNEL__
+
+extern void __init xilinx_intc_init_tree(void);
+extern unsigned int xintc_get_irq(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_XILINX_INTC_H */
diff --git a/arch/powerpc/include/asm/xilinx_pci.h b/arch/powerpc/include/asm/xilinx_pci.h
new file mode 100644
index 0000000..7a8275c
--- /dev/null
+++ b/arch/powerpc/include/asm/xilinx_pci.h
@@ -0,0 +1,21 @@
+/*
+ * Xilinx pci external definitions
+ *
+ * Copyright 2009 Roderick Colenbrander
+ * Copyright 2009 Secret Lab Technologies Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef INCLUDE_XILINX_PCI
+#define INCLUDE_XILINX_PCI
+
+#ifdef CONFIG_XILINX_PCI
+extern void __init xilinx_pci_init(void);
+#else
+static inline void __init xilinx_pci_init(void) { return; }
+#endif
+
+#endif /* INCLUDE_XILINX_PCI */
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
new file mode 100644
index 0000000..6de989f
--- /dev/null
+++ b/arch/powerpc/include/asm/xive-regs.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_XIVE_REGS_H
+#define _ASM_POWERPC_XIVE_REGS_H
+
+/*
+ * "magic" Event State Buffer (ESB) MMIO offsets.
+ *
+ * Each interrupt source has a 2-bit state machine called ESB
+ * which can be controlled by MMIO. It's made of 2 bits, P and
+ * Q. P indicates that an interrupt is pending (has been sent
+ * to a queue and is waiting for an EOI). Q indicates that the
+ * interrupt has been triggered while pending.
+ *
+ * This acts as a coalescing mechanism in order to guarantee
+ * that a given interrupt only occurs at most once in a queue.
+ *
+ * When doing an EOI, the Q bit will indicate if the interrupt
+ * needs to be re-triggered.
+ *
+ * The following offsets into the ESB MMIO allow to read or
+ * manipulate the PQ bits. They must be used with an 8-bytes
+ * load instruction. They all return the previous state of the
+ * interrupt (atomically).
+ *
+ * Additionally, some ESB pages support doing an EOI via a
+ * store at 0 and some ESBs support doing a trigger via a
+ * separate trigger page.
+ */
+#define XIVE_ESB_STORE_EOI	0x400 /* Store */
+#define XIVE_ESB_LOAD_EOI	0x000 /* Load */
+#define XIVE_ESB_GET		0x800 /* Load */
+#define XIVE_ESB_SET_PQ_00	0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01	0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10	0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11	0xf00 /* Load */
+
+#define XIVE_ESB_VAL_P		0x2
+#define XIVE_ESB_VAL_Q		0x1
+
+/*
+ * Thread Management (aka "TM") registers
+ */
+
+/* TM register offsets */
+#define TM_QW0_USER		0x000 /* All rings */
+#define TM_QW1_OS		0x010 /* Ring 0..2 */
+#define TM_QW2_HV_POOL		0x020 /* Ring 0..1 */
+#define TM_QW3_HV_PHYS		0x030 /* Ring 0..1 */
+
+/* Byte offsets inside a QW             QW0 QW1 QW2 QW3 */
+#define TM_NSR			0x0  /*  +   +   -   +  */
+#define TM_CPPR			0x1  /*  -   +   -   +  */
+#define TM_IPB			0x2  /*  -   +   +   +  */
+#define TM_LSMFB		0x3  /*  -   +   +   +  */
+#define TM_ACK_CNT		0x4  /*  -   +   -   -  */
+#define TM_INC			0x5  /*  -   +   -   +  */
+#define TM_AGE			0x6  /*  -   +   -   +  */
+#define TM_PIPR			0x7  /*  -   +   -   +  */
+
+#define TM_WORD0		0x0
+#define TM_WORD1		0x4
+
+/*
+ * QW word 2 contains the valid bit at the top and other fields
+ * depending on the QW.
+ */
+#define TM_WORD2		0x8
+#define   TM_QW0W2_VU		PPC_BIT32(0)
+#define   TM_QW0W2_LOGIC_SERV	PPC_BITMASK32(1,31) // XX 2,31 ?
+#define   TM_QW1W2_VO		PPC_BIT32(0)
+#define   TM_QW1W2_OS_CAM	PPC_BITMASK32(8,31)
+#define   TM_QW2W2_VP		PPC_BIT32(0)
+#define   TM_QW2W2_POOL_CAM	PPC_BITMASK32(8,31)
+#define   TM_QW3W2_VT		PPC_BIT32(0)
+#define   TM_QW3W2_LP		PPC_BIT32(6)
+#define   TM_QW3W2_LE		PPC_BIT32(7)
+#define   TM_QW3W2_T		PPC_BIT32(31)
+
+/*
+ * In addition to normal loads to "peek" and writes (only when invalid)
+ * using 4 and 8 bytes accesses, the above registers support these
+ * "special" byte operations:
+ *
+ *   - Byte load from QW0[NSR] - User level NSR (EBB)
+ *   - Byte store to QW0[NSR] - User level NSR (EBB)
+ *   - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access
+ *   - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0
+ *                                    otherwise VT||0000000
+ *   - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present)
+ *
+ * Then we have all these "special" CI ops at these offset that trigger
+ * all sorts of side effects:
+ */
+#define TM_SPC_ACK_EBB		0x800	/* Load8 ack EBB to reg*/
+#define TM_SPC_ACK_OS_REG	0x810	/* Load16 ack OS irq to reg */
+#define TM_SPC_PUSH_USR_CTX	0x808	/* Store32 Push/Validate user context */
+#define TM_SPC_PULL_USR_CTX	0x808	/* Load32 Pull/Invalidate user context */
+#define TM_SPC_SET_OS_PENDING	0x812	/* Store8 Set OS irq pending bit */
+#define TM_SPC_PULL_OS_CTX	0x818	/* Load32/Load64 Pull/Invalidate OS context to reg */
+#define TM_SPC_PULL_POOL_CTX	0x828	/* Load32/Load64 Pull/Invalidate Pool context to reg*/
+#define TM_SPC_ACK_HV_REG	0x830	/* Load16 ack HV irq to reg */
+#define TM_SPC_PULL_USR_CTX_OL	0xc08	/* Store8 Pull/Inval usr ctx to odd line */
+#define TM_SPC_ACK_OS_EL	0xc10	/* Store8 ack OS irq to even line */
+#define TM_SPC_ACK_HV_POOL_EL	0xc20	/* Store8 ack HV evt pool to even line */
+#define TM_SPC_ACK_HV_EL	0xc30	/* Store8 ack HV irq to even line */
+/* XXX more... */
+
+/* NSR fields for the various QW ack types */
+#define TM_QW0_NSR_EB		PPC_BIT8(0)
+#define TM_QW1_NSR_EO		PPC_BIT8(0)
+#define TM_QW3_NSR_HE		PPC_BITMASK8(0,1)
+#define  TM_QW3_NSR_HE_NONE	0
+#define  TM_QW3_NSR_HE_POOL	1
+#define  TM_QW3_NSR_HE_PHYS	2
+#define  TM_QW3_NSR_HE_LSI	3
+#define TM_QW3_NSR_I		PPC_BIT8(2)
+#define TM_QW3_NSR_GRP_LVL	PPC_BIT8(3,7)
+
+#endif /* _ASM_POWERPC_XIVE_REGS_H */
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
new file mode 100644
index 0000000..3c704f5
--- /dev/null
+++ b/arch/powerpc/include/asm/xive.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_XIVE_H
+#define _ASM_POWERPC_XIVE_H
+
+#define XIVE_INVALID_VP	0xffffffff
+
+#ifdef CONFIG_PPC_XIVE
+
+/*
+ * Thread Interrupt Management Area (TIMA)
+ *
+ * This is a global MMIO region divided in 4 pages of varying access
+ * permissions, providing access to per-cpu interrupt management
+ * functions. It always identifies the CPU doing the access based
+ * on the PowerBus initiator ID, thus we always access via the
+ * same offset regardless of where the code is executing
+ */
+extern void __iomem *xive_tima;
+
+/*
+ * Offset in the TM area of our current execution level (provided by
+ * the backend)
+ */
+extern u32 xive_tima_offset;
+
+/*
+ * Per-irq data (irq_get_handler_data for normal IRQs), IPIs
+ * have it stored in the xive_cpu structure. We also cache
+ * for normal interrupts the current target CPU.
+ *
+ * This structure is setup by the backend for each interrupt.
+ */
+struct xive_irq_data {
+	u64 flags;
+	u64 eoi_page;
+	void __iomem *eoi_mmio;
+	u64 trig_page;
+	void __iomem *trig_mmio;
+	u32 esb_shift;
+	int src_chip;
+	u32 hw_irq;
+
+	/* Setup/used by frontend */
+	int target;
+	bool saved_p;
+};
+#define XIVE_IRQ_FLAG_STORE_EOI	0x01
+#define XIVE_IRQ_FLAG_LSI	0x02
+#define XIVE_IRQ_FLAG_SHIFT_BUG	0x04
+#define XIVE_IRQ_FLAG_MASK_FW	0x08
+#define XIVE_IRQ_FLAG_EOI_FW	0x10
+#define XIVE_IRQ_FLAG_H_INT_ESB	0x20
+
+/* Special flag set by KVM for excalation interrupts */
+#define XIVE_IRQ_NO_EOI		0x80
+
+#define XIVE_INVALID_CHIP_ID	-1
+
+/* A queue tracking structure in a CPU */
+struct xive_q {
+	__be32 			*qpage;
+	u32			msk;
+	u32			idx;
+	u32			toggle;
+	u64			eoi_phys;
+	u32			esc_irq;
+	atomic_t		count;
+	atomic_t		pending_count;
+};
+
+/* Global enable flags for the XIVE support */
+extern bool __xive_enabled;
+
+static inline bool xive_enabled(void) { return __xive_enabled; }
+
+extern bool xive_spapr_init(void);
+extern bool xive_native_init(void);
+extern void xive_smp_probe(void);
+extern int  xive_smp_prepare_cpu(unsigned int cpu);
+extern void xive_smp_setup_cpu(void);
+extern void xive_smp_disable_cpu(void);
+extern void xive_teardown_cpu(void);
+extern void xive_shutdown(void);
+extern void xive_flush_interrupt(void);
+
+/* xmon hook */
+extern void xmon_xive_do_dump(int cpu);
+
+/* APIs used by KVM */
+extern u32 xive_native_default_eq_shift(void);
+extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
+extern void xive_native_free_vp_block(u32 vp_base);
+extern int xive_native_populate_irq_data(u32 hw_irq,
+					 struct xive_irq_data *data);
+extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
+extern u32 xive_native_alloc_irq(void);
+extern void xive_native_free_irq(u32 irq);
+extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+
+extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
+				       __be32 *qpage, u32 order, bool can_escalate);
+extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
+
+extern void xive_native_sync_source(u32 hw_irq);
+extern bool is_xive_irq(struct irq_chip *chip);
+extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
+extern int xive_native_disable_vp(u32 vp_id);
+extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
+extern bool xive_native_has_single_escalation(void);
+
+#else
+
+static inline bool xive_enabled(void) { return false; }
+
+static inline bool xive_spapr_init(void) { return false; }
+static inline bool xive_native_init(void) { return false; }
+static inline void xive_smp_probe(void) { }
+static inline int  xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
+static inline void xive_smp_setup_cpu(void) { }
+static inline void xive_smp_disable_cpu(void) { }
+static inline void xive_kexec_teardown_cpu(int secondary) { }
+static inline void xive_shutdown(void) { }
+static inline void xive_flush_interrupt(void) { }
+
+static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
+static inline void xive_native_free_vp_block(u32 vp_base) { }
+
+#endif
+
+#endif /* _ASM_POWERPC_XIVE_H */
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
new file mode 100644
index 0000000..30ff69b
--- /dev/null
+++ b/arch/powerpc/include/asm/xmon.h
@@ -0,0 +1,35 @@
+#ifndef __ASM_POWERPC_XMON_H
+#define __ASM_POWERPC_XMON_H
+
+/*
+ * Copyrignt (C) 2006 IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/irqreturn.h>
+
+#ifdef CONFIG_XMON
+extern void xmon_setup(void);
+extern void xmon_register_spus(struct list_head *list);
+struct pt_regs;
+extern int xmon(struct pt_regs *excp);
+extern irqreturn_t xmon_irq(int, void *);
+#else
+static inline void xmon_setup(void) { };
+static inline void xmon_register_spus(struct list_head *list) { };
+#endif
+
+#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
+extern int cpus_are_in_xmon(void);
+#endif
+
+extern __printf(1, 2) void xmon_printf(const char *format, ...);
+
+#endif /* __KERNEL __ */
+#endif /* __ASM_POWERPC_XMON_H */
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
new file mode 100644
index 0000000..7d6dc50
--- /dev/null
+++ b/arch/powerpc/include/asm/xor.h
@@ -0,0 +1,59 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#ifndef _ASM_POWERPC_XOR_H
+#define _ASM_POWERPC_XOR_H
+
+#ifdef CONFIG_ALTIVEC
+
+#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
+#include <asm/xor_altivec.h>
+
+static struct xor_block_template xor_block_altivec = {
+	.name = "altivec",
+	.do_2 = xor_altivec_2,
+	.do_3 = xor_altivec_3,
+	.do_4 = xor_altivec_4,
+	.do_5 = xor_altivec_5,
+};
+
+#define XOR_SPEED_ALTIVEC()				\
+	do {						\
+		if (cpu_has_feature(CPU_FTR_ALTIVEC))	\
+			xor_speed(&xor_block_altivec);	\
+	} while (0)
+#else
+#define XOR_SPEED_ALTIVEC()
+#endif
+
+/* Also try the generic routines. */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES				\
+do {							\
+	xor_speed(&xor_block_8regs);			\
+	xor_speed(&xor_block_8regs_p);			\
+	xor_speed(&xor_block_32regs);			\
+	xor_speed(&xor_block_32regs_p);			\
+	XOR_SPEED_ALTIVEC();				\
+} while (0)
+
+#endif /* _ASM_POWERPC_XOR_H */
diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h
new file mode 100644
index 0000000..6ca9235
--- /dev/null
+++ b/arch/powerpc/include/asm/xor_altivec.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_XOR_ALTIVEC_H
+#define _ASM_POWERPC_XOR_ALTIVEC_H
+
+#ifdef CONFIG_ALTIVEC
+
+void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+		   unsigned long *v2_in);
+void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+		   unsigned long *v2_in, unsigned long *v3_in);
+void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+		   unsigned long *v2_in, unsigned long *v3_in,
+		   unsigned long *v4_in);
+void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+		   unsigned long *v2_in, unsigned long *v3_in,
+		   unsigned long *v4_in, unsigned long *v5_in);
+
+#endif
+#endif /* _ASM_POWERPC_XOR_ALTIVEC_H */