v4.19.13 snapshot.
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
new file mode 100644
index 0000000..403eb3a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -0,0 +1,3 @@
+mkregtable
+*_reg_safe.h
+
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
new file mode 100644
index 0000000..9909f5c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -0,0 +1,7 @@
+config DRM_RADEON_USERPTR
+	bool "Always enable userptr support"
+	depends on DRM_RADEON
+	select MMU_NOTIFIER
+	help
+	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
+	  selected to enabled full userptr support.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
new file mode 100644
index 0000000..92ccd7a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Idrivers/gpu/drm/amd/include
+
+hostprogs-y := mkregtable
+clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+
+quiet_cmd_mkregtable = MKREGTABLE $@
+      cmd_mkregtable = $(obj)/mkregtable $< > $@
+
+$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
+
+$(obj)/r200.o: $(obj)/r200_reg_safe.h
+
+$(obj)/rv515.o: $(obj)/rv515_reg_safe.h
+
+$(obj)/r300.o: $(obj)/r300_reg_safe.h
+
+$(obj)/r420.o: $(obj)/r420_reg_safe.h
+
+$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
+
+$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
+
+radeon-y := radeon_drv.o
+
+# add KMS driver
+radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
+	radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
+	atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
+	radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
+	radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
+	radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
+	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
+	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
+	r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
+	radeon_pm.o atombios_dp.o r600_hdmi.o dce3_1_afmt.o \
+	evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
+	evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+	atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+	si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
+	r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
+	rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
+	trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
+	ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o \
+	radeon_sync.o radeon_audio.o radeon_dp_auxch.o radeon_dp_mst.o
+
+radeon-$(CONFIG_MMU_NOTIFIER) += radeon_mn.o
+
+# add async DMA block
+radeon-y += \
+	r600_dma.o \
+	rv770_dma.o \
+	evergreen_dma.o \
+	ni_dma.o \
+	si_dma.o \
+	cik_sdma.o \
+
+# add UVD block
+radeon-y += \
+	radeon_uvd.o \
+	uvd_v1_0.o \
+	uvd_v2_2.o \
+	uvd_v3_1.o \
+	uvd_v4_2.o
+
+# add VCE block
+radeon-y += \
+	radeon_vce.o \
+	vce_v1_0.o \
+	vce_v2_0.o
+
+radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
+radeon-$(CONFIG_ACPI) += radeon_acpi.o
+
+obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
new file mode 100644
index 0000000..0619269
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -0,0 +1,736 @@
+/*
+* Copyright 2006-2007 Advanced Micro Devices, Inc.  
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
+
+#ifndef _OBJECTID_H
+#define _OBJECTID_H
+
+#if defined(_X86_)
+#pragma pack(1)
+#endif
+
+/****************************************************/
+/* Graphics Object Type Definition                  */
+/****************************************************/
+#define GRAPH_OBJECT_TYPE_NONE                    0x0
+#define GRAPH_OBJECT_TYPE_GPU                     0x1
+#define GRAPH_OBJECT_TYPE_ENCODER                 0x2
+#define GRAPH_OBJECT_TYPE_CONNECTOR               0x3
+#define GRAPH_OBJECT_TYPE_ROUTER                  0x4
+/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH            0x6  
+#define GRAPH_OBJECT_TYPE_GENERIC                 0x7
+
+/****************************************************/
+/* Encoder Object ID Definition                     */
+/****************************************************/
+#define ENCODER_OBJECT_ID_NONE                    0x00 
+
+/* Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
+#define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05     /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
+
+/* External Third Party Encoders */
+#define ENCODER_OBJECT_ID_SI170B                  0x08
+#define ENCODER_OBJECT_ID_CH7303                  0x09
+#define ENCODER_OBJECT_ID_CH7301                  0x0A
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B    /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
+#define ENCODER_OBJECT_ID_TITFP513                0x0E
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F    /* not used for Radeon */
+#define ENCODER_OBJECT_ID_VT1623                  0x10
+#define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
+#define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+#define ENCODER_OBJECT_ID_ALMOND                  0x22
+#define ENCODER_OBJECT_ID_TRAVIS                  0x23
+#define ENCODER_OBJECT_ID_NUTMEG                  0x22
+#define ENCODER_OBJECT_ID_HDMI_ANX9805            0x26
+
+/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16  /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178                   0X17  /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18  /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
+#define ENCODER_OBJECT_ID_VT1625                  0x1A
+#define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
+#define ENCODER_OBJECT_ID_DP_AN9801               0x1C
+#define ENCODER_OBJECT_ID_DP_DP501                0x1D
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY         0x1E
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA   0x1F
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1        0x20
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2        0x21
+#define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3        0x25
+#define ENCODER_OBJECT_ID_INTERNAL_AMCLK          0x27
+
+#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
+
+/****************************************************/
+/* Connector Object ID Definition                   */
+/****************************************************/
+#define CONNECTOR_OBJECT_ID_NONE                  0x00 
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D       0x04
+#define CONNECTOR_OBJECT_ID_VGA                   0x05
+#define CONNECTOR_OBJECT_ID_COMPOSITE             0x06
+#define CONNECTOR_OBJECT_ID_SVIDEO                0x07
+#define CONNECTOR_OBJECT_ID_YPbPr                 0x08
+#define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
+#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A  /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_SCART                 0x0B
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
+#define CONNECTOR_OBJECT_ID_LVDS                  0x0E
+#define CONNECTOR_OBJECT_ID_7PIN_DIN              0x0F
+#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR        0x10
+#define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
+#define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
+#define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
+#define CONNECTOR_OBJECT_ID_eDP                   0x14
+#define CONNECTOR_OBJECT_ID_MXM                   0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+
+/* deleted */
+
+/****************************************************/
+/* Router Object ID Definition                      */
+/****************************************************/
+#define ROUTER_OBJECT_ID_NONE											0x00
+#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL				0x01
+
+/****************************************************/
+/* Generic Object ID Definition                     */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE                    0x00
+#define GENERIC_OBJECT_ID_GLSYNC                  0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
+#define GENERIC_OBJECT_ID_MXM_OPM                 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+
+/****************************************************/
+/* Graphics Object ENUM ID Definition               */
+/****************************************************/
+#define GRAPH_OBJECT_ENUM_ID1                     0x01
+#define GRAPH_OBJECT_ENUM_ID2                     0x02
+#define GRAPH_OBJECT_ENUM_ID3                     0x03
+#define GRAPH_OBJECT_ENUM_ID4                     0x04
+#define GRAPH_OBJECT_ENUM_ID5                     0x05
+#define GRAPH_OBJECT_ENUM_ID6                     0x06
+#define GRAPH_OBJECT_ENUM_ID7                     0x07
+
+/****************************************************/
+/* Graphics Object ID Bit definition                */
+/****************************************************/
+#define OBJECT_ID_MASK                            0x00FF
+#define ENUM_ID_MASK                              0x0700
+#define RESERVED1_ID_MASK                         0x0800
+#define OBJECT_TYPE_MASK                          0x7000
+#define RESERVED2_ID_MASK                         0x8000
+                                                  
+#define OBJECT_ID_SHIFT                           0x00
+#define ENUM_ID_SHIFT                             0x08
+#define OBJECT_TYPE_SHIFT                         0x0C
+
+
+/****************************************************/
+/* Graphics Object family definition                */
+/****************************************************/
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+                                                                           GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
+/****************************************************/
+/* GPU Object ID definition - Shared with BIOS      */
+/****************************************************/
+#define GPU_ENUM_ID1                            ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+
+/****************************************************/
+/* Encoder Object ID definition - Shared with BIOS  */
+/****************************************************/
+/*
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101      
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
+#define ENCODER_SIL170B_ENUM_ID1              0x2108  
+#define ENCODER_CH7303_ENUM_ID1               0x2109
+#define ENCODER_CH7301_ENUM_ID1               0x210A
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1       0x210C
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1       0x210D
+#define ENCODER_TITFP513_ENUM_ID1             0x210E
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1       0x210F
+#define ENCODER_VT1623_ENUM_ID1               0x2110
+#define ENCODER_HDMI_SI1930_ENUM_ID1          0x2111
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1        0x2112
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116  
+#define ENCODER_SI178_ENUM_ID1                   0x2117 
+#define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
+#define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
+#define ENCODER_VT1625_ENUM_ID1                  0x211A
+#define ENCODER_HDMI_SI1932_ENUM_ID1             0x211B
+#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1       0x211C
+#define ENCODER_DP_DP501_ENUM_ID1                0x211D
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
+*/
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1           ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1          ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT)  // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1                    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)  
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1                ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1     (  GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 
+
+#define ENCODER_VT1625_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)  
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
+#define ENCODER_VCE_ENUM_ID1                     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_ANX9805_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Connector Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1        0x3101
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1          0x3102
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1        0x3103
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1          0x3104
+#define CONNECTOR_VGA_ENUM_ID1                      0x3105
+#define CONNECTOR_COMPOSITE_ENUM_ID1                0x3106
+#define CONNECTOR_SVIDEO_ENUM_ID1                   0x3107
+#define CONNECTOR_YPbPr_ENUM_ID1                    0x3108
+#define CONNECTOR_D_CONNECTORE_ENUM_ID1             0x3109
+#define CONNECTOR_9PIN_DIN_ENUM_ID1                 0x310A
+#define CONNECTOR_SCART_ENUM_ID1                    0x310B
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1              0x310C
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1              0x310D
+#define CONNECTOR_LVDS_ENUM_ID1                     0x310E
+#define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
+*/
+#define CONNECTOR_LVDS_ENUM_ID1                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID5   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID6   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID4     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Router Object ID definition - Shared with BIOS   */
+/****************************************************/
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1      ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+                                                GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+
+/* deleted */
+
+/****************************************************/
+/* Generic Object ID definition - Shared with BIOS  */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1        (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Object Cap definition - Shared with BIOS         */
+/****************************************************/
+#define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
+#define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
+
+
+#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
+#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
+#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
+
+#if defined(_X86_)
+#pragma pack()
+#endif
+
+#endif  /*GRAPHICTYPE */
+
+
+
+
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
new file mode 100644
index 0000000..e8fae5c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-bits.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_BITS_H
+#define ATOM_BITS_H
+
+static inline uint8_t get_u8(void *bios, int ptr)
+{
+    return ((unsigned char *)bios)[ptr];
+}
+#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
+#define CU8(ptr) get_u8(ctx->bios, (ptr))
+static inline uint16_t get_u16(void *bios, int ptr)
+{
+    return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+}
+#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
+#define CU16(ptr) get_u16(ctx->bios, (ptr))
+static inline uint32_t get_u32(void *bios, int ptr)
+{
+    return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
+}
+#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
+#define CU32(ptr) get_u32(ctx->bios, (ptr))
+#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-names.h b/drivers/gpu/drm/radeon/atom-names.h
new file mode 100644
index 0000000..6f907a5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-names.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_NAMES_H
+#define ATOM_NAMES_H
+
+#include "atom.h"
+
+#ifdef ATOM_DEBUG
+
+#define ATOM_OP_NAMES_CNT 123
+static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
+"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
+"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
+"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
+"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
+"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
+"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
+"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
+"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
+"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
+"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
+"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
+"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
+"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
+"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
+"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
+"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
+"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
+"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
+"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
+"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
+"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
+"DEBUG", "CTB_DS",
+};
+
+#define ATOM_TABLE_NAMES_CNT 74
+static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
+"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
+"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
+"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
+"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
+"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
+"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
+"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
+"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
+"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
+"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
+"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
+"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
+"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
+"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
+"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
+"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
+"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
+"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
+"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
+"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
+"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
+"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
+"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
+"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
+"MemoryDeviceInit", "EnableYUV",
+};
+
+#define ATOM_IO_NAMES_CNT 5
+static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
+"MM", "PLL", "MC", "PCIE", "PCIE PORT",
+};
+
+#else
+
+#define ATOM_OP_NAMES_CNT 0
+#define ATOM_TABLE_NAMES_CNT 0
+#define ATOM_IO_NAMES_CNT 0
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-types.h b/drivers/gpu/drm/radeon/atom-types.h
new file mode 100644
index 0000000..1125b86
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-types.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie
+ */
+
+#ifndef ATOM_TYPES_H
+#define ATOM_TYPES_H
+
+/* sync atom types to kernel types */
+
+typedef uint16_t USHORT;
+typedef uint32_t ULONG;
+typedef uint8_t UCHAR;
+
+
+#ifndef ATOM_BIG_ENDIAN
+#if defined(__BIG_ENDIAN)
+#define ATOM_BIG_ENDIAN 1
+#else
+#define ATOM_BIG_ENDIAN 0
+#endif
+#endif
+#endif
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
new file mode 100644
index 0000000..e55cbee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -0,0 +1,1420 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define ATOM_DEBUG
+
+#include "atom.h"
+#include "atom-names.h"
+#include "atom-bits.h"
+#include "radeon.h"
+
+#define ATOM_COND_ABOVE		0
+#define ATOM_COND_ABOVEOREQUAL	1
+#define ATOM_COND_ALWAYS	2
+#define ATOM_COND_BELOW		3
+#define ATOM_COND_BELOWOREQUAL	4
+#define ATOM_COND_EQUAL		5
+#define ATOM_COND_NOTEQUAL	6
+
+#define ATOM_PORT_ATI	0
+#define ATOM_PORT_PCI	1
+#define ATOM_PORT_SYSIO	2
+
+#define ATOM_UNIT_MICROSEC	0
+#define ATOM_UNIT_MILLISEC	1
+
+#define PLL_INDEX	2
+#define PLL_DATA	3
+
+typedef struct {
+	struct atom_context *ctx;
+	uint32_t *ps, *ws;
+	int ps_shift;
+	uint16_t start;
+	unsigned last_jump;
+	unsigned long last_jump_jiffies;
+	bool abort;
+} atom_exec_context;
+
+int atom_debug = 0;
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+
+static uint32_t atom_arg_mask[8] = {
+	0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
+	0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
+};
+static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
+
+static int atom_dst_to_src[8][4] = {
+	/* translate destination alignment field to the source alignment encoding */
+	{0, 0, 0, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+};
+static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
+
+static int debug_depth = 0;
+#ifdef ATOM_DEBUG
+static void debug_print_spaces(int n)
+{
+	while (n--)
+		printk("   ");
+}
+
+#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
+#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
+#else
+#define DEBUG(...) do { } while (0)
+#define SDEBUG(...) do { } while (0)
+#endif
+
+static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+				 uint32_t index, uint32_t data)
+{
+	struct radeon_device *rdev = ctx->card->dev->dev_private;
+	uint32_t temp = 0xCDCDCDCD;
+
+	while (1)
+		switch (CU8(base)) {
+		case ATOM_IIO_NOP:
+			base++;
+			break;
+		case ATOM_IIO_READ:
+			temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+			base += 3;
+			break;
+		case ATOM_IIO_WRITE:
+			if (rdev->family == CHIP_RV515)
+				(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+			base += 3;
+			break;
+		case ATOM_IIO_CLEAR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 2));
+			base += 3;
+			break;
+		case ATOM_IIO_SET:
+			temp |=
+			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
+									2);
+			base += 3;
+			break;
+		case ATOM_IIO_MOVE_INDEX:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((index >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_DATA:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((data >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_ATTR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((ctx->
+			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+									  CU8
+									  (base
+									   +
+									   1))))
+			    << CU8(base + 3);
+			base += 4;
+			break;
+		case ATOM_IIO_END:
+			return temp;
+		default:
+			pr_info("Unknown IIO opcode\n");
+			return 0;
+		}
+}
+
+static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+				 int *ptr, uint32_t *saved, int print)
+{
+	uint32_t idx, val = 0xCDCDCDCD, align, arg;
+	struct atom_context *gctx = ctx->ctx;
+	arg = attr & 7;
+	align = (attr >> 3) & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print)
+			DEBUG("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			val = gctx->card->reg_read(gctx->card, idx);
+			break;
+		case ATOM_IO_PCI:
+			pr_info("PCI registers are not implemented\n");
+			return 0;
+		case ATOM_IO_SYSIO:
+			pr_info("SYSIO registers are not implemented\n");
+			return 0;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				pr_info("Bad IO mode\n");
+				return 0;
+			}
+			if (!gctx->iio[gctx->io_mode & 0x7F]) {
+				pr_info("Undefined indirect IO read method %d\n",
+					gctx->io_mode & 0x7F);
+				return 0;
+			}
+			val =
+			    atom_iio_execute(gctx,
+					     gctx->iio[gctx->io_mode & 0x7F],
+					     idx, 0);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		/* get_unaligned_le32 avoids unaligned accesses from atombios
+		 * tables, noticed on a DEC Alpha. */
+		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+		if (print)
+			DEBUG("PS[0x%02X,0x%04X]", idx, val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			val = gctx->divmul[0];
+			break;
+		case ATOM_WS_REMAINDER:
+			val = gctx->divmul[1];
+			break;
+		case ATOM_WS_DATAPTR:
+			val = gctx->data_block;
+			break;
+		case ATOM_WS_SHIFT:
+			val = gctx->shift;
+			break;
+		case ATOM_WS_OR_MASK:
+			val = 1 << gctx->shift;
+			break;
+		case ATOM_WS_AND_MASK:
+			val = ~(1 << gctx->shift);
+			break;
+		case ATOM_WS_FB_WINDOW:
+			val = gctx->fb_base;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			val = gctx->io_attr;
+			break;
+		case ATOM_WS_REGPTR:
+			val = gctx->reg_block;
+			break;
+		default:
+			val = ctx->ws[idx];
+		}
+		break;
+	case ATOM_ARG_ID:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print) {
+			if (gctx->data_block)
+				DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
+			else
+				DEBUG("ID[0x%04X]", idx);
+		}
+		val = U32(idx + gctx->data_block);
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+			val = 0;
+		} else
+			val = gctx->scratch[(gctx->fb_base / 4) + idx];
+		if (print)
+			DEBUG("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			val = U32(*ptr);
+			(*ptr) += 4;
+			if (print)
+				DEBUG("IMM 0x%08X\n", val);
+			return val;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			val = U16(*ptr);
+			(*ptr) += 2;
+			if (print)
+				DEBUG("IMM 0x%04X\n", val);
+			return val;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			val = U8(*ptr);
+			(*ptr)++;
+			if (print)
+				DEBUG("IMM 0x%02X\n", val);
+			return val;
+		}
+		return 0;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("PLL[0x%02X]", idx);
+		val = gctx->card->pll_read(gctx->card, idx);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("MC[0x%02X]", idx);
+		val = gctx->card->mc_read(gctx->card, idx);
+		break;
+	}
+	if (saved)
+		*saved = val;
+	val &= atom_arg_mask[align];
+	val >>= atom_arg_shift[align];
+	if (print)
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			DEBUG(".[31:0] -> 0x%08X\n", val);
+			break;
+		case ATOM_SRC_WORD0:
+			DEBUG(".[15:0] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD8:
+			DEBUG(".[23:8] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD16:
+			DEBUG(".[31:16] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_BYTE0:
+			DEBUG(".[7:0] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE8:
+			DEBUG(".[15:8] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE16:
+			DEBUG(".[23:16] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE24:
+			DEBUG(".[31:24] -> 0x%02X\n", val);
+			break;
+		}
+	return val;
+}
+
+static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+	case ATOM_ARG_ID:
+		(*ptr) += 2;
+		break;
+	case ATOM_ARG_PLL:
+	case ATOM_ARG_MC:
+	case ATOM_ARG_PS:
+	case ATOM_ARG_WS:
+	case ATOM_ARG_FB:
+		(*ptr)++;
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			(*ptr) += 4;
+			return;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			(*ptr) += 2;
+			return;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			(*ptr)++;
+			return;
+		}
+		return;
+	}
+}
+
+static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+}
+
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+	uint32_t val = 0xCDCDCDCD;
+
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		val = U32(*ptr);
+		(*ptr) += 4;
+		break;
+	case ATOM_SRC_WORD0:
+	case ATOM_SRC_WORD8:
+	case ATOM_SRC_WORD16:
+		val = U16(*ptr);
+		(*ptr) += 2;
+		break;
+	case ATOM_SRC_BYTE0:
+	case ATOM_SRC_BYTE8:
+	case ATOM_SRC_BYTE16:
+	case ATOM_SRC_BYTE24:
+		val = U8(*ptr);
+		(*ptr)++;
+		break;
+	}
+	return val;
+}
+
+static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			     int *ptr, uint32_t *saved, int print)
+{
+	return atom_get_src_int(ctx,
+				arg | atom_dst_to_src[(attr >> 3) &
+						      7][(attr >> 6) & 3] << 3,
+				ptr, saved, print);
+}
+
+static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
+{
+	atom_skip_src_int(ctx,
+			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
+								 3] << 3, ptr);
+}
+
+static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			 int *ptr, uint32_t val, uint32_t saved)
+{
+	uint32_t align =
+	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
+	    val, idx;
+	struct atom_context *gctx = ctx->ctx;
+	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
+	val <<= atom_arg_shift[align];
+	val &= atom_arg_mask[align];
+	saved &= ~atom_arg_mask[align];
+	val |= saved;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		DEBUG("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			if (idx == 0)
+				gctx->card->reg_write(gctx->card, idx,
+						      val << 2);
+			else
+				gctx->card->reg_write(gctx->card, idx, val);
+			break;
+		case ATOM_IO_PCI:
+			pr_info("PCI registers are not implemented\n");
+			return;
+		case ATOM_IO_SYSIO:
+			pr_info("SYSIO registers are not implemented\n");
+			return;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				pr_info("Bad IO mode\n");
+				return;
+			}
+			if (!gctx->iio[gctx->io_mode & 0xFF]) {
+				pr_info("Undefined indirect IO write method %d\n",
+					gctx->io_mode & 0x7F);
+				return;
+			}
+			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
+					 idx, val);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("PS[0x%02X]", idx);
+		ctx->ps[idx] = cpu_to_le32(val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			gctx->divmul[0] = val;
+			break;
+		case ATOM_WS_REMAINDER:
+			gctx->divmul[1] = val;
+			break;
+		case ATOM_WS_DATAPTR:
+			gctx->data_block = val;
+			break;
+		case ATOM_WS_SHIFT:
+			gctx->shift = val;
+			break;
+		case ATOM_WS_OR_MASK:
+		case ATOM_WS_AND_MASK:
+			break;
+		case ATOM_WS_FB_WINDOW:
+			gctx->fb_base = val;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			gctx->io_attr = val;
+			break;
+		case ATOM_WS_REGPTR:
+			gctx->reg_block = val;
+			break;
+		default:
+			ctx->ws[idx] = val;
+		}
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+		} else
+			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
+		DEBUG("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("PLL[0x%02X]", idx);
+		gctx->card->pll_write(gctx->card, idx, val);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("MC[0x%02X]", idx);
+		gctx->card->mc_write(gctx->card, idx, val);
+		return;
+	}
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		DEBUG(".[31:0] <- 0x%08X\n", old_val);
+		break;
+	case ATOM_SRC_WORD0:
+		DEBUG(".[15:0] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD8:
+		DEBUG(".[23:8] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD16:
+		DEBUG(".[31:16] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE0:
+		DEBUG(".[7:0] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE8:
+		DEBUG(".[15:8] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE16:
+		DEBUG(".[23:16] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE24:
+		DEBUG(".[31:24] <- 0x%02X\n", old_val);
+		break;
+	}
+}
+
+static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst += src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk("ATOM BIOS beeped!\n");
+}
+
+static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8((*ptr)++);
+	int r = 0;
+
+	if (idx < ATOM_TABLE_NAMES_CNT)
+		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
+	else
+		SDEBUG("   table: %d\n", idx);
+	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+		r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+	if (r) {
+		ctx->abort = true;
+	}
+}
+
+static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t saved;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
+}
+
+static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = (dst == src);
+	ctx->ctx->cs_above = (dst > src);
+	SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
+	       ctx->ctx->cs_above ? "GT" : "LE");
+}
+
+static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+{
+	unsigned count = U8((*ptr)++);
+	SDEBUG("   count: %d\n", count);
+	if (arg == ATOM_UNIT_MICROSEC)
+		udelay(count);
+	else if (!drm_can_sleep())
+		mdelay(count);
+	else
+		msleep(count);
+}
+
+static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	if (src != 0) {
+		ctx->ctx->divmul[0] = dst / src;
+		ctx->ctx->divmul[1] = dst % src;
+	} else {
+		ctx->ctx->divmul[0] = 0;
+		ctx->ctx->divmul[1] = 0;
+	}
+}
+
+static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* functionally, a nop */
+}
+
+static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int execute = 0, target = U16(*ptr);
+	unsigned long cjiffies;
+
+	(*ptr) += 2;
+	switch (arg) {
+	case ATOM_COND_ABOVE:
+		execute = ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_ABOVEOREQUAL:
+		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_ALWAYS:
+		execute = 1;
+		break;
+	case ATOM_COND_BELOW:
+		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
+		break;
+	case ATOM_COND_BELOWOREQUAL:
+		execute = !ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_EQUAL:
+		execute = ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_NOTEQUAL:
+		execute = !ctx->ctx->cs_equal;
+		break;
+	}
+	if (arg != ATOM_COND_ALWAYS)
+		SDEBUG("   taken: %s\n", execute ? "yes" : "no");
+	SDEBUG("   target: 0x%04X\n", target);
+	if (execute) {
+		if (ctx->last_jump == (ctx->start + target)) {
+			cjiffies = jiffies;
+			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+				cjiffies -= ctx->last_jump_jiffies;
+				if ((jiffies_to_msecs(cjiffies) > 5000)) {
+					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+					ctx->abort = true;
+				}
+			} else {
+				/* jiffies wrap around we will just wait a little longer */
+				ctx->last_jump_jiffies = jiffies;
+			}
+		} else {
+			ctx->last_jump = ctx->start + target;
+			ctx->last_jump_jiffies = jiffies;
+		}
+		*ptr = ctx->start + target;
+	}
+}
+
+static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, mask, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+	SDEBUG("   mask: 0x%08x", mask);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= mask;
+	dst |= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, saved;
+	int dptr = *ptr;
+	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
+		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	else {
+		atom_skip_dst(ctx, arg, attr, ptr);
+		saved = 0xCDCDCDCD;
+	}
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
+}
+
+static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->divmul[0] = dst * src;
+}
+
+static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* nothing */
+}
+
+static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst |= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t val = U8((*ptr)++);
+	SDEBUG("POST card output: 0x%02X\n", val);
+}
+
+static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
+{
+	pr_info("unimplemented!\n");
+}
+
+static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	pr_info("unimplemented!\n");
+}
+
+static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	pr_info("unimplemented!\n");
+}
+
+static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8(*ptr);
+	(*ptr)++;
+	SDEBUG("   block: %d\n", idx);
+	if (!idx)
+		ctx->ctx->data_block = 0;
+	else if (idx == 255)
+		ctx->ctx->data_block = ctx->start;
+	else
+		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
+	SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
+}
+
+static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	SDEBUG("   fb_base: ");
+	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
+}
+
+static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int port;
+	switch (arg) {
+	case ATOM_PORT_ATI:
+		port = U16(*ptr);
+		if (port < ATOM_IO_NAMES_CNT)
+			SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
+		else
+			SDEBUG("   port: %d\n", port);
+		if (!port)
+			ctx->ctx->io_mode = ATOM_IO_MM;
+		else
+			ctx->ctx->io_mode = ATOM_IO_IIO | port;
+		(*ptr) += 2;
+		break;
+	case ATOM_PORT_PCI:
+		ctx->ctx->io_mode = ATOM_IO_PCI;
+		(*ptr)++;
+		break;
+	case ATOM_PORT_SYSIO:
+		ctx->ctx->io_mode = ATOM_IO_SYSIO;
+		(*ptr)++;
+		break;
+	}
+}
+
+static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	ctx->ctx->reg_block = U16(*ptr);
+	(*ptr) += 2;
+	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
+}
+
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst <<= shift;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst >>= shift;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst <<= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst >>= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst -= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, val, target;
+	SDEBUG("   switch: ");
+	src = atom_get_src(ctx, attr, ptr);
+	while (U16(*ptr) != ATOM_CASE_END)
+		if (U8(*ptr) == ATOM_CASE_MAGIC) {
+			(*ptr)++;
+			SDEBUG("   case: ");
+			val =
+			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
+					 ptr);
+			target = U16(*ptr);
+			if (val == src) {
+				SDEBUG("   target: %04X\n", target);
+				*ptr = ctx->start + target;
+				return;
+			}
+			(*ptr) += 2;
+		} else {
+			pr_info("Bad case\n");
+			return;
+		}
+	(*ptr) += 2;
+}
+
+static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = ((dst & src) == 0);
+	SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
+}
+
+static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst ^= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
+{
+	pr_info("unimplemented!\n");
+}
+
+static struct {
+	void (*func) (atom_exec_context *, int *, int);
+	int arg;
+} opcode_table[ATOM_OP_CNT] = {
+	{
+	NULL, 0}, {
+	atom_op_move, ATOM_ARG_REG}, {
+	atom_op_move, ATOM_ARG_PS}, {
+	atom_op_move, ATOM_ARG_WS}, {
+	atom_op_move, ATOM_ARG_FB}, {
+	atom_op_move, ATOM_ARG_PLL}, {
+	atom_op_move, ATOM_ARG_MC}, {
+	atom_op_and, ATOM_ARG_REG}, {
+	atom_op_and, ATOM_ARG_PS}, {
+	atom_op_and, ATOM_ARG_WS}, {
+	atom_op_and, ATOM_ARG_FB}, {
+	atom_op_and, ATOM_ARG_PLL}, {
+	atom_op_and, ATOM_ARG_MC}, {
+	atom_op_or, ATOM_ARG_REG}, {
+	atom_op_or, ATOM_ARG_PS}, {
+	atom_op_or, ATOM_ARG_WS}, {
+	atom_op_or, ATOM_ARG_FB}, {
+	atom_op_or, ATOM_ARG_PLL}, {
+	atom_op_or, ATOM_ARG_MC}, {
+	atom_op_shift_left, ATOM_ARG_REG}, {
+	atom_op_shift_left, ATOM_ARG_PS}, {
+	atom_op_shift_left, ATOM_ARG_WS}, {
+	atom_op_shift_left, ATOM_ARG_FB}, {
+	atom_op_shift_left, ATOM_ARG_PLL}, {
+	atom_op_shift_left, ATOM_ARG_MC}, {
+	atom_op_shift_right, ATOM_ARG_REG}, {
+	atom_op_shift_right, ATOM_ARG_PS}, {
+	atom_op_shift_right, ATOM_ARG_WS}, {
+	atom_op_shift_right, ATOM_ARG_FB}, {
+	atom_op_shift_right, ATOM_ARG_PLL}, {
+	atom_op_shift_right, ATOM_ARG_MC}, {
+	atom_op_mul, ATOM_ARG_REG}, {
+	atom_op_mul, ATOM_ARG_PS}, {
+	atom_op_mul, ATOM_ARG_WS}, {
+	atom_op_mul, ATOM_ARG_FB}, {
+	atom_op_mul, ATOM_ARG_PLL}, {
+	atom_op_mul, ATOM_ARG_MC}, {
+	atom_op_div, ATOM_ARG_REG}, {
+	atom_op_div, ATOM_ARG_PS}, {
+	atom_op_div, ATOM_ARG_WS}, {
+	atom_op_div, ATOM_ARG_FB}, {
+	atom_op_div, ATOM_ARG_PLL}, {
+	atom_op_div, ATOM_ARG_MC}, {
+	atom_op_add, ATOM_ARG_REG}, {
+	atom_op_add, ATOM_ARG_PS}, {
+	atom_op_add, ATOM_ARG_WS}, {
+	atom_op_add, ATOM_ARG_FB}, {
+	atom_op_add, ATOM_ARG_PLL}, {
+	atom_op_add, ATOM_ARG_MC}, {
+	atom_op_sub, ATOM_ARG_REG}, {
+	atom_op_sub, ATOM_ARG_PS}, {
+	atom_op_sub, ATOM_ARG_WS}, {
+	atom_op_sub, ATOM_ARG_FB}, {
+	atom_op_sub, ATOM_ARG_PLL}, {
+	atom_op_sub, ATOM_ARG_MC}, {
+	atom_op_setport, ATOM_PORT_ATI}, {
+	atom_op_setport, ATOM_PORT_PCI}, {
+	atom_op_setport, ATOM_PORT_SYSIO}, {
+	atom_op_setregblock, 0}, {
+	atom_op_setfbbase, 0}, {
+	atom_op_compare, ATOM_ARG_REG}, {
+	atom_op_compare, ATOM_ARG_PS}, {
+	atom_op_compare, ATOM_ARG_WS}, {
+	atom_op_compare, ATOM_ARG_FB}, {
+	atom_op_compare, ATOM_ARG_PLL}, {
+	atom_op_compare, ATOM_ARG_MC}, {
+	atom_op_switch, 0}, {
+	atom_op_jump, ATOM_COND_ALWAYS}, {
+	atom_op_jump, ATOM_COND_EQUAL}, {
+	atom_op_jump, ATOM_COND_BELOW}, {
+	atom_op_jump, ATOM_COND_ABOVE}, {
+	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
+	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
+	atom_op_jump, ATOM_COND_NOTEQUAL}, {
+	atom_op_test, ATOM_ARG_REG}, {
+	atom_op_test, ATOM_ARG_PS}, {
+	atom_op_test, ATOM_ARG_WS}, {
+	atom_op_test, ATOM_ARG_FB}, {
+	atom_op_test, ATOM_ARG_PLL}, {
+	atom_op_test, ATOM_ARG_MC}, {
+	atom_op_delay, ATOM_UNIT_MILLISEC}, {
+	atom_op_delay, ATOM_UNIT_MICROSEC}, {
+	atom_op_calltable, 0}, {
+	atom_op_repeat, 0}, {
+	atom_op_clear, ATOM_ARG_REG}, {
+	atom_op_clear, ATOM_ARG_PS}, {
+	atom_op_clear, ATOM_ARG_WS}, {
+	atom_op_clear, ATOM_ARG_FB}, {
+	atom_op_clear, ATOM_ARG_PLL}, {
+	atom_op_clear, ATOM_ARG_MC}, {
+	atom_op_nop, 0}, {
+	atom_op_eot, 0}, {
+	atom_op_mask, ATOM_ARG_REG}, {
+	atom_op_mask, ATOM_ARG_PS}, {
+	atom_op_mask, ATOM_ARG_WS}, {
+	atom_op_mask, ATOM_ARG_FB}, {
+	atom_op_mask, ATOM_ARG_PLL}, {
+	atom_op_mask, ATOM_ARG_MC}, {
+	atom_op_postcard, 0}, {
+	atom_op_beep, 0}, {
+	atom_op_savereg, 0}, {
+	atom_op_restorereg, 0}, {
+	atom_op_setdatablock, 0}, {
+	atom_op_xor, ATOM_ARG_REG}, {
+	atom_op_xor, ATOM_ARG_PS}, {
+	atom_op_xor, ATOM_ARG_WS}, {
+	atom_op_xor, ATOM_ARG_FB}, {
+	atom_op_xor, ATOM_ARG_PLL}, {
+	atom_op_xor, ATOM_ARG_MC}, {
+	atom_op_shl, ATOM_ARG_REG}, {
+	atom_op_shl, ATOM_ARG_PS}, {
+	atom_op_shl, ATOM_ARG_WS}, {
+	atom_op_shl, ATOM_ARG_FB}, {
+	atom_op_shl, ATOM_ARG_PLL}, {
+	atom_op_shl, ATOM_ARG_MC}, {
+	atom_op_shr, ATOM_ARG_REG}, {
+	atom_op_shr, ATOM_ARG_PS}, {
+	atom_op_shr, ATOM_ARG_WS}, {
+	atom_op_shr, ATOM_ARG_FB}, {
+	atom_op_shr, ATOM_ARG_PLL}, {
+	atom_op_shr, ATOM_ARG_MC}, {
+atom_op_debug, 0},};
+
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int base = CU16(ctx->cmd_table + 4 + 2 * index);
+	int len, ws, ps, ptr;
+	unsigned char op;
+	atom_exec_context ectx;
+	int ret = 0;
+
+	if (!base)
+		return -EINVAL;
+
+	len = CU16(base + ATOM_CT_SIZE_PTR);
+	ws = CU8(base + ATOM_CT_WS_PTR);
+	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
+	ptr = base + ATOM_CT_CODE_PTR;
+
+	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+
+	ectx.ctx = ctx;
+	ectx.ps_shift = ps / 4;
+	ectx.start = base;
+	ectx.ps = params;
+	ectx.abort = false;
+	ectx.last_jump = 0;
+	if (ws)
+		ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+	else
+		ectx.ws = NULL;
+
+	debug_depth++;
+	while (1) {
+		op = CU8(ptr++);
+		if (op < ATOM_OP_NAMES_CNT)
+			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
+		else
+			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+		if (ectx.abort) {
+			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+				base, len, ws, ps, ptr - 1);
+			ret = -EINVAL;
+			goto free;
+		}
+
+		if (op < ATOM_OP_CNT && op > 0)
+			opcode_table[op].func(&ectx, &ptr,
+					      opcode_table[op].arg);
+		else
+			break;
+
+		if (op == ATOM_OP_EOT)
+			break;
+	}
+	debug_depth--;
+	SDEBUG("<<\n");
+
+free:
+	if (ws)
+		kfree(ectx.ws);
+	return ret;
+}
+
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int r;
+
+	mutex_lock(&ctx->mutex);
+	/* reset data block */
+	ctx->data_block = 0;
+	/* reset reg block */
+	ctx->reg_block = 0;
+	/* reset fb window */
+	ctx->fb_base = 0;
+	/* reset io mode */
+	ctx->io_mode = ATOM_IO_MM;
+	/* reset divmul */
+	ctx->divmul[0] = 0;
+	ctx->divmul[1] = 0;
+	r = atom_execute_table_locked(ctx, index, params);
+	mutex_unlock(&ctx->mutex);
+	return r;
+}
+
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int r;
+	mutex_lock(&ctx->scratch_mutex);
+	r = atom_execute_table_scratch_unlocked(ctx, index, params);
+	mutex_unlock(&ctx->scratch_mutex);
+	return r;
+}
+
+static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+
+static void atom_index_iio(struct atom_context *ctx, int base)
+{
+	ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+	if (!ctx->iio)
+		return;
+	while (CU8(base) == ATOM_IIO_START) {
+		ctx->iio[CU8(base + 1)] = base + 2;
+		base += 2;
+		while (CU8(base) != ATOM_IIO_END)
+			base += atom_iio_len[CU8(base)];
+		base += 3;
+	}
+}
+
+struct atom_context *atom_parse(struct card_info *card, void *bios)
+{
+	int base;
+	struct atom_context *ctx =
+	    kzalloc(sizeof(struct atom_context), GFP_KERNEL);
+	char *str;
+	char name[512];
+	int i;
+
+	if (!ctx)
+		return NULL;
+
+	ctx->card = card;
+	ctx->bios = bios;
+
+	if (CU16(0) != ATOM_BIOS_MAGIC) {
+		pr_info("Invalid BIOS magic\n");
+		kfree(ctx);
+		return NULL;
+	}
+	if (strncmp
+	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
+	     strlen(ATOM_ATI_MAGIC))) {
+		pr_info("Invalid ATI magic\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	base = CU16(ATOM_ROM_TABLE_PTR);
+	if (strncmp
+	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
+	     strlen(ATOM_ROM_MAGIC))) {
+		pr_info("Invalid ATOM magic\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
+	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
+	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+	if (!ctx->iio) {
+		atom_destroy(ctx);
+		return NULL;
+	}
+
+	str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
+	while (*str && ((*str == '\n') || (*str == '\r')))
+		str++;
+	/* name string isn't always 0 terminated */
+	for (i = 0; i < 511; i++) {
+		name[i] = str[i];
+		if (name[i] < '.' || name[i] > 'z') {
+			name[i] = 0;
+			break;
+		}
+	}
+	pr_info("ATOM BIOS: %s\n", name);
+
+	return ctx;
+}
+
+int atom_asic_init(struct atom_context *ctx)
+{
+	struct radeon_device *rdev = ctx->card->dev->dev_private;
+	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+	uint32_t ps[16];
+	int ret;
+
+	memset(ps, 0, 64);
+
+	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
+	if (!ps[0] || !ps[1])
+		return 1;
+
+	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+		return 1;
+	ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+	if (ret)
+		return ret;
+
+	memset(ps, 0, 64);
+
+	if (rdev->family < CHIP_R600) {
+		if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+			atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+	}
+	return ret;
+}
+
+void atom_destroy(struct atom_context *ctx)
+{
+	kfree(ctx->iio);
+	kfree(ctx);
+}
+
+bool atom_parse_data_header(struct atom_context *ctx, int index,
+			    uint16_t * size, uint8_t * frev, uint8_t * crev,
+			    uint16_t * data_start)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->data_table + offset);
+	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+
+	if (!mdt[index])
+		return false;
+
+	if (size)
+		*size = CU16(idx);
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	*data_start = idx;
+	return true;
+}
+
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+			   uint8_t * crev)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->cmd_table + offset);
+	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+
+	if (!mct[index])
+		return false;
+
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	return true;
+}
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+	uint16_t data_offset;
+	int usage_bytes = 0;
+	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+	if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+		firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+		DRM_DEBUG("atom firmware requested %08x %dkb\n",
+			  le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+			  le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+		usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+	}
+	ctx->scratch_size_bytes = 0;
+	if (usage_bytes == 0)
+		usage_bytes = 20 * 1024;
+	/* allocate some scratch memory */
+	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+	if (!ctx->scratch)
+		return -ENOMEM;
+	ctx->scratch_size_bytes = usage_bytes;
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
new file mode 100644
index 0000000..6d014dd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_H
+#define ATOM_H
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#define ATOM_BIOS_MAGIC		0xAA55
+#define ATOM_ATI_MAGIC_PTR	0x30
+#define ATOM_ATI_MAGIC		" 761295520"
+#define ATOM_ROM_TABLE_PTR	0x48
+
+#define ATOM_ROM_MAGIC		"ATOM"
+#define ATOM_ROM_MAGIC_PTR	4
+
+#define ATOM_ROM_MSG_PTR	0x10
+#define ATOM_ROM_CMD_PTR	0x1E
+#define ATOM_ROM_DATA_PTR	0x20
+
+#define ATOM_CMD_INIT		0
+#define ATOM_CMD_SETSCLK	0x0A
+#define ATOM_CMD_SETMCLK	0x0B
+#define ATOM_CMD_SETPCLK	0x0C
+#define ATOM_CMD_SPDFANCNTL	0x39
+
+#define ATOM_DATA_FWI_PTR	0xC
+#define ATOM_DATA_IIO_PTR	0x32
+
+#define ATOM_FWI_DEFSCLK_PTR	8
+#define ATOM_FWI_DEFMCLK_PTR	0xC
+#define ATOM_FWI_MAXSCLK_PTR	0x24
+#define ATOM_FWI_MAXMCLK_PTR	0x28
+
+#define ATOM_CT_SIZE_PTR	0
+#define ATOM_CT_WS_PTR		4
+#define ATOM_CT_PS_PTR		5
+#define ATOM_CT_PS_MASK		0x7F
+#define ATOM_CT_CODE_PTR	6
+
+#define ATOM_OP_CNT		123
+#define ATOM_OP_EOT		91
+
+#define ATOM_CASE_MAGIC		0x63
+#define ATOM_CASE_END		0x5A5A
+
+#define ATOM_ARG_REG		0
+#define ATOM_ARG_PS		1
+#define ATOM_ARG_WS		2
+#define ATOM_ARG_FB		3
+#define ATOM_ARG_ID		4
+#define ATOM_ARG_IMM		5
+#define ATOM_ARG_PLL		6
+#define ATOM_ARG_MC		7
+
+#define ATOM_SRC_DWORD		0
+#define ATOM_SRC_WORD0		1
+#define ATOM_SRC_WORD8		2
+#define ATOM_SRC_WORD16		3
+#define ATOM_SRC_BYTE0		4
+#define ATOM_SRC_BYTE8		5
+#define ATOM_SRC_BYTE16		6
+#define ATOM_SRC_BYTE24		7
+
+#define ATOM_WS_QUOTIENT	0x40
+#define ATOM_WS_REMAINDER	0x41
+#define ATOM_WS_DATAPTR		0x42
+#define ATOM_WS_SHIFT		0x43
+#define ATOM_WS_OR_MASK		0x44
+#define ATOM_WS_AND_MASK	0x45
+#define ATOM_WS_FB_WINDOW	0x46
+#define ATOM_WS_ATTRIBUTES	0x47
+#define ATOM_WS_REGPTR  	0x48
+
+#define ATOM_IIO_NOP		0
+#define ATOM_IIO_START		1
+#define ATOM_IIO_READ		2
+#define ATOM_IIO_WRITE		3
+#define ATOM_IIO_CLEAR		4
+#define ATOM_IIO_SET		5
+#define ATOM_IIO_MOVE_INDEX	6
+#define ATOM_IIO_MOVE_ATTR	7
+#define ATOM_IIO_MOVE_DATA	8
+#define ATOM_IIO_END		9
+
+#define ATOM_IO_MM		0
+#define ATOM_IO_PCI		1
+#define ATOM_IO_SYSIO		2
+#define ATOM_IO_IIO		0x80
+
+struct card_info {
+	struct drm_device *dev;
+	void (* reg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* reg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* ioreg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* ioreg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* mc_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* mc_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* pll_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* pll_read)(struct card_info *, uint32_t);          /*  filled by driver */
+};
+
+struct atom_context {
+	struct card_info *card;
+	struct mutex mutex;
+	struct mutex scratch_mutex;
+	void *bios;
+	uint32_t cmd_table, data_table;
+	uint16_t *iio;
+
+	uint16_t data_block;
+	uint32_t fb_base;
+	uint32_t divmul[2];
+	uint16_t io_attr;
+	uint16_t reg_block;
+	uint8_t shift;
+	int cs_equal, cs_above;
+	int io_mode;
+	uint32_t *scratch;
+	int scratch_size_bytes;
+};
+
+extern int atom_debug;
+
+struct atom_context *atom_parse(struct card_info *, void *);
+int atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
+int atom_asic_init(struct atom_context *);
+void atom_destroy(struct atom_context *);
+bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+			    uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool atom_parse_cmd_header(struct atom_context *ctx, int index,
+			   uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
+#include "atom-types.h"
+#include "atombios.h"
+#include "ObjectID.h"
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
new file mode 100644
index 0000000..4b86e8b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -0,0 +1,7981 @@
+/*
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.  
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/****************************************************************************/	
+/*Portion I: Definitions  shared between VBIOS and Driver                   */
+/****************************************************************************/
+
+
+#ifndef _ATOMBIOS_H
+#define _ATOMBIOS_H
+
+#define ATOM_VERSION_MAJOR                   0x00020000
+#define ATOM_VERSION_MINOR                   0x00000002
+
+#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
+
+/* Endianness should be specified before inclusion,
+ * default to little endian
+ */
+#ifndef ATOM_BIG_ENDIAN
+#error Endian not specified
+#endif
+
+#ifdef _H2INC
+  #ifndef ULONG 
+    typedef unsigned long ULONG;
+  #endif
+
+  #ifndef UCHAR
+    typedef unsigned char UCHAR;
+  #endif
+
+  #ifndef USHORT 
+    typedef unsigned short USHORT;
+  #endif
+#endif
+      
+#define ATOM_DAC_A            0 
+#define ATOM_DAC_B            1
+#define ATOM_EXT_DAC          2
+
+#define ATOM_CRTC1            0
+#define ATOM_CRTC2            1
+#define ATOM_CRTC3            2
+#define ATOM_CRTC4            3
+#define ATOM_CRTC5            4
+#define ATOM_CRTC6            5
+#define ATOM_CRTC_INVALID     0xFF
+
+#define ATOM_DIGA             0
+#define ATOM_DIGB             1
+
+#define ATOM_PPLL1            0
+#define ATOM_PPLL2            1
+#define ATOM_DCPLL            2
+#define ATOM_PPLL0            2
+#define ATOM_PPLL3            3
+
+#define ATOM_EXT_PLL1         8
+#define ATOM_EXT_PLL2         9
+#define ATOM_EXT_CLOCK        10
+#define ATOM_PPLL_INVALID     0xFF
+
+#define ENCODER_REFCLK_SRC_P1PLL       0       
+#define ENCODER_REFCLK_SRC_P2PLL       1
+#define ENCODER_REFCLK_SRC_DCPLL       2
+#define ENCODER_REFCLK_SRC_EXTCLK      3
+#define ENCODER_REFCLK_SRC_INVALID     0xFF
+
+#define ATOM_SCALER1          0
+#define ATOM_SCALER2          1
+
+#define ATOM_SCALER_DISABLE   0   
+#define ATOM_SCALER_CENTER    1   
+#define ATOM_SCALER_EXPANSION 2   
+#define ATOM_SCALER_MULTI_EX  3   
+
+#define ATOM_DISABLE          0
+#define ATOM_ENABLE           1
+#define ATOM_LCD_BLOFF                          (ATOM_DISABLE+2)
+#define ATOM_LCD_BLON                           (ATOM_ENABLE+2)
+#define ATOM_LCD_BL_BRIGHTNESS_CONTROL          (ATOM_ENABLE+3)
+#define ATOM_LCD_SELFTEST_START									(ATOM_DISABLE+5)
+#define ATOM_LCD_SELFTEST_STOP									(ATOM_ENABLE+5)
+#define ATOM_ENCODER_INIT			                  (ATOM_DISABLE+7)
+#define ATOM_INIT			                          (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS                         (ATOM_DISABLE+8)
+
+#define ATOM_BLANKING         1
+#define ATOM_BLANKING_OFF     0
+
+#define ATOM_CURSOR1          0
+#define ATOM_CURSOR2          1
+
+#define ATOM_ICON1            0
+#define ATOM_ICON2            1
+
+#define ATOM_CRT1             0
+#define ATOM_CRT2             1
+
+#define ATOM_TV_NTSC          1
+#define ATOM_TV_NTSCJ         2
+#define ATOM_TV_PAL           3
+#define ATOM_TV_PALM          4
+#define ATOM_TV_PALCN         5
+#define ATOM_TV_PALN          6
+#define ATOM_TV_PAL60         7
+#define ATOM_TV_SECAM         8
+#define ATOM_TV_CV            16
+
+#define ATOM_DAC1_PS2         1
+#define ATOM_DAC1_CV          2
+#define ATOM_DAC1_NTSC        3
+#define ATOM_DAC1_PAL         4
+
+#define ATOM_DAC2_PS2         ATOM_DAC1_PS2
+#define ATOM_DAC2_CV          ATOM_DAC1_CV
+#define ATOM_DAC2_NTSC        ATOM_DAC1_NTSC
+#define ATOM_DAC2_PAL         ATOM_DAC1_PAL
+ 
+#define ATOM_PM_ON            0
+#define ATOM_PM_STANDBY       1
+#define ATOM_PM_SUSPEND       2
+#define ATOM_PM_OFF           3
+
+/* Bit0:{=0:single, =1:dual},
+   Bit1 {=0:666RGB, =1:888RGB},
+   Bit2:3:{Grey level}
+   Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
+
+#define ATOM_PANEL_MISC_DUAL               0x00000001
+#define ATOM_PANEL_MISC_888RGB             0x00000002
+#define ATOM_PANEL_MISC_GREY_LEVEL         0x0000000C
+#define ATOM_PANEL_MISC_FPDI               0x00000010
+#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT   2
+#define ATOM_PANEL_MISC_SPATIAL            0x00000020
+#define ATOM_PANEL_MISC_TEMPORAL           0x00000040
+#define ATOM_PANEL_MISC_API_ENABLED        0x00000080
+
+
+#define MEMTYPE_DDR1              "DDR1"
+#define MEMTYPE_DDR2              "DDR2"
+#define MEMTYPE_DDR3              "DDR3"
+#define MEMTYPE_DDR4              "DDR4"
+
+#define ASIC_BUS_TYPE_PCI         "PCI"
+#define ASIC_BUS_TYPE_AGP         "AGP"
+#define ASIC_BUS_TYPE_PCIE        "PCI_EXPRESS"
+
+/* Maximum size of that FireGL flag string */
+
+#define ATOM_FIREGL_FLAG_STRING     "FGL"             //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING  3        //sizeof( ATOM_FIREGL_FLAG_STRING )
+
+#define ATOM_FAKE_DESKTOP_STRING    "DSK"             //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING  ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 
+
+#define ATOM_M54T_FLAG_STRING       "M54T"            //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING    4        //sizeof( ATOM_M54T_FLAG_STRING )
+
+#define HW_ASSISTED_I2C_STATUS_FAILURE          2
+#define HW_ASSISTED_I2C_STATUS_SUCCESS          1
+
+#pragma pack(1)                                       /* BIOS data must use byte alignment */
+
+/*  Define offset to location of ROM header. */
+
+#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER		0x00000048L
+#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE				    0x00000002L
+
+#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE    0x94
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE   20    /* including the terminator 0x0! */
+#define	OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER		0x002f
+#define	OFFSET_TO_GET_ATOMBIOS_STRINGS_START		0x006e
+
+/* Common header for all ROM Data tables.
+  Every table pointed  _ATOM_MASTER_DATA_TABLE has this common header. 
+  And the pointer actually points to this header. */
+
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+  USHORT usStructureSize;
+  UCHAR  ucTableFormatRevision;   /*Change it when the Parser is not backward compatible */
+  UCHAR  ucTableContentRevision;  /*Change it only when the table needs to change but the firmware */
+                                  /*Image can't be updated, while Driver needs to carry the new table! */
+}ATOM_COMMON_TABLE_HEADER;
+
+/****************************************************************************/	
+// Structure stores the ROM header.
+/****************************************************************************/	
+typedef struct _ATOM_ROM_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER		sHeader;
+  UCHAR	 uaFirmWareSignature[4];    /*Signature to distinguish between Atombios and non-atombios, 
+                                      atombios should init it as "ATOM", don't change the position */
+  USHORT usBiosRuntimeSegmentAddress;
+  USHORT usProtectedModeInfoOffset;
+  USHORT usConfigFilenameOffset;
+  USHORT usCRC_BlockOffset;
+  USHORT usBIOS_BootupMessageOffset;
+  USHORT usInt10Offset;
+  USHORT usPciBusDevInitCode;
+  USHORT usIoBaseAddress;
+  USHORT usSubsystemVendorID;
+  USHORT usSubsystemID;
+  USHORT usPCI_InfoOffset; 
+  USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+  USHORT usMasterDataTableOffset;   /*Offset for SW to get all data table offsets, Don't change the position */
+  UCHAR  ucExtendedFunctionCode;
+  UCHAR  ucReserved;
+}ATOM_ROM_HEADER;
+
+/*==============================Command Table Portion==================================== */
+
+#ifdef	UEFI_BUILD
+	#define	UTEMP	USHORT
+	#define	USHORT	void*
+#endif
+
+/****************************************************************************/	
+// Structures used in Command.mtb 
+/****************************************************************************/	
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+  USHORT ASIC_Init;                              //Function Table, used by various SW components,latest version 1.1
+  USHORT GetDisplaySurfaceSize;                  //Atomic Table,  Used by Bios when enabling HW ICON
+  USHORT ASIC_RegistersInit;                     //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT VRAM_BlockVenderDetection;              //Atomic Table,  used only by Bios
+  USHORT DIGxEncoderControl;										 //Only used by Bios
+  USHORT MemoryControllerInit;                   //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT EnableCRTCMemReq;                       //Function Table,directly used by various SW components,latest version 2.1
+  USHORT MemoryParamAdjust; 										 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock if needed
+  USHORT DVOEncoderControl;                      //Function Table,directly used by various SW components,latest version 1.2
+  USHORT GPIOPinControl;												 //Atomic Table,  only used by Bios
+  USHORT SetEngineClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetMemoryClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetPixelClock;                          //Function Table,directly used by various SW components,latest version 1.2  
+  USHORT EnableDispPowerGating;                  //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT ResetMemoryDLL;                         //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ResetMemoryDevice;                      //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryPLLInit;                          //Atomic Table,  used only by Bios
+  USHORT AdjustDisplayPll;											 //Atomic Table,  used by various SW componentes. 
+  USHORT AdjustMemoryController;                 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock                
+  USHORT EnableASIC_StaticPwrMgt;                //Atomic Table,  only used by Bios
+  USHORT SetUniphyInstance;                      //Atomic Table,  only used by Bios   
+  USHORT DAC_LoadDetection;                      //Atomic Table,  directly used by various SW components,latest version 1.2  
+  USHORT LVTMAEncoderControl;                    //Atomic Table,directly used by various SW components,latest version 1.3
+  USHORT HW_Misc_Operation;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT DAC1EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1  
+  USHORT DAC2EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT DVOOutputControl;                       //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT CV1OutputControl;                       //Atomic Table,  Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead 
+  USHORT GetConditionalGoldenSetting;            //Only used by Bios
+  USHORT TVEncoderControl;                       //Function Table,directly used by various SW components,latest version 1.1
+  USHORT PatchMCSetting;                         //only used by BIOS
+  USHORT MC_SEQ_Control;                         //only used by BIOS
+  USHORT Gfx_Harvesting;                         //Atomic Table,  Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting
+  USHORT EnableScaler;                           //Atomic Table,  used only by Bios
+  USHORT BlankCRTC;                              //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableCRTC;                             //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT GetPixelClock;                          //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableVGA_Render;                       //Function Table,directly used by various SW components,latest version 1.1
+  USHORT GetSCLKOverMCLKRatio;                   //Atomic Table,  only used by Bios
+  USHORT SetCRTC_Timing;                         //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetCRTC_OverScan;                       //Atomic Table,  used by various SW components,latest version 1.1 
+  USHORT SetCRTC_Replication;                    //Atomic Table,  used only by Bios
+  USHORT SelectCRTC_Source;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableGraphSurfaces;                    //Atomic Table,  used only by Bios
+  USHORT UpdateCRTC_DoubleBufferRegisters;			 //Atomic Table,  used only by Bios
+  USHORT LUT_AutoFill;                           //Atomic Table,  only used by Bios
+  USHORT EnableHW_IconCursor;                    //Atomic Table,  only used by Bios
+  USHORT GetMemoryClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT GetEngineClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT SetCRTC_UsingDTDTiming;                 //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ExternalEncoderControl;                 //Atomic Table,  directly used by various SW components,latest version 2.1
+  USHORT LVTMAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT VRAM_BlockDetectionByStrap;             //Atomic Table,  used only by Bios
+  USHORT MemoryCleanUp;                          //Atomic Table,  only used by Bios    
+  USHORT ProcessI2cChannelTransaction;           //Function Table,only used by Bios
+  USHORT WriteOneByteToHWAssistedI2C;            //Function Table,indirectly used by various SW components 
+  USHORT ReadHWAssistedI2CStatus;                //Atomic Table,  indirectly used by various SW components
+  USHORT SpeedFanControl;                        //Function Table,indirectly used by various SW components,called from ASIC_Init
+  USHORT PowerConnectorDetection;                //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT MC_Synchronization;                     //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ComputeMemoryEnginePLL;                 //Atomic Table,  indirectly used by various SW components,called from SetMemory/EngineClock
+  USHORT MemoryRefreshConversion;                //Atomic Table,  indirectly used by various SW components,called from SetMemory or SetEngineClock
+  USHORT VRAM_GetCurrentInfoBlock;               //Atomic Table,  used only by Bios
+  USHORT DynamicMemorySettings;                  //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryTraining;                         //Atomic Table,  used only by Bios
+  USHORT EnableSpreadSpectrumOnPPLL;             //Atomic Table,  directly used by various SW components,latest version 1.2
+  USHORT TMDSAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetVoltage;                             //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+  USHORT DAC1OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT DAC2OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ComputeMemoryClockParam;                //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+  USHORT ClockSource;                            //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT MemoryDeviceInit;                       //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT GetDispObjectInfo;                      //Atomic Table,  indirectly used by various SW components,called from EnableVGARender
+  USHORT DIG1EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG1TransmitterControl;                 //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2TransmitterControl;	               //Atomic Table,directly used by various SW components,latest version 1.1 
+  USHORT ProcessAuxChannelTransaction;					 //Function Table,only used by Bios
+  USHORT DPEncoderService;											 //Function Table,only used by Bios
+  USHORT GetVoltageInfo;                         //Function Table,only used by Bios since SI
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;   
+
+// For backward compatible 
+#define ReadEDIDFromHWAssistedI2C                ProcessI2cChannelTransaction
+#define DPTranslatorControl                      DIG2EncoderControl
+#define UNIPHYTransmitterControl			     DIG1TransmitterControl
+#define LVTMATransmitterControl				     DIG2TransmitterControl
+#define SetCRTC_DPM_State                        GetConditionalGoldenSetting
+#define ASIC_StaticPwrMgtStatusChange            SetUniphyInstance 
+#define HPDInterruptService                      ReadHWAssistedI2CStatus
+#define EnableVGA_Access                         GetSCLKOverMCLKRatio
+#define EnableYUV                                GetDispObjectInfo                         
+#define DynamicClockGating                       EnableDispPowerGating
+#define SetupHWAssistedI2CStatus                 ComputeMemoryClockParam
+
+#define TMDSAEncoderControl                      PatchMCSetting
+#define LVDSEncoderControl                       MC_SEQ_Control
+#define LCD1OutputControl                        HW_Misc_Operation
+#define TV1OutputControl                         Gfx_Harvesting
+
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+  ATOM_COMMON_TABLE_HEADER           sHeader;
+  ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/	
+// Structures used in every command table
+/****************************************************************************/	
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
+#if ATOM_BIG_ENDIAN
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword), 
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword), 
+#else
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword), 
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword), 
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+#endif
+}ATOM_TABLE_ATTRIBUTE;
+
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
+{
+  ATOM_TABLE_ATTRIBUTE sbfAccess;
+  USHORT               susAccess;
+}ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/	
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. 
+// And the pointer actually points to this header.
+/****************************************************************************/	
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER CommonHeader;
+  ATOM_TABLE_ATTRIBUTE     TableAttribute;	
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+
+/****************************************************************************/	
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/	
+#define COMPUTE_MEMORY_PLL_PARAM        1
+#define COMPUTE_ENGINE_PLL_PARAM        2
+#define ADJUST_MC_SETTING_PARAM         3
+
+/****************************************************************************/	
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/	
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulClockFreq:24;
+#else
+  ULONG ulClockFreq:24;
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG             0x80
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+  ULONG   ulClock;        //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+  UCHAR   ucAction;       //0:reserved //1:Memory //2:Engine  
+  UCHAR   ucReserved;     //may expand to return larger Fbdiv later
+  UCHAR   ucFbDiv;        //return value
+  UCHAR   ucPostDiv;      //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+  ULONG   ulClock;        //When return, [23:0] return real clock 
+  UCHAR   ucAction;       //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+  USHORT  usFbDiv;		    //return Feedback value to be written to register
+  UCHAR   ucPostDiv;      //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION   COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+
+
+#define SET_CLOCK_FREQ_MASK                     0x00FFFFFF  //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK                  0x01000000  //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK            0x02000000	//Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04000000  //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK									0x08000000	//Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL											0x10000000	//Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+#define USE_SS_ENABLED_PIXEL_CLOCK  USE_NON_BUS_CLOCK_MASK
+
+#define b3USE_NON_BUS_CLOCK_MASK                  0x01       //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH                 0x02	     //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04       //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK									0x08       //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL											0x10			 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+#else
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+#endif
+}ATOM_COMPUTE_CLOCK_FREQ;
+
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+  USHORT usFbDivFrac;  
+  USHORT usFbDiv;  
+}ATOM_S_MPLL_FB_DIVIDER;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  UCHAR   ucCntlFlag;                         //Output Parameter      
+  UCHAR   ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
+#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN          1
+#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE            2
+#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE         4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9						8
+
+
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucPostDiv:8;        //return parameter: post divider which is used to program to register directly
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock 
+#else
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock 
+  ULONG  ucPostDiv:8;        //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  union
+  {
+    UCHAR   ucCntlFlag;                       //Output Flags
+    UCHAR   ucInputFlag;                      //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+  };
+  UCHAR   ucReserved;                       
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+
+typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6
+{
+  ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+  ULONG   ulReserved[2];
+}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6;
+
+//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
+#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK            0x0f
+#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK           0x00
+#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK                     0x01
+
+typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
+{
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4  ulClock;         //Output Parameter: ucPostDiv=DFS divider
+  ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter: PLL FB divider
+  UCHAR   ucPllRefDiv;                      //Output Parameter: PLL ref divider      
+  UCHAR   ucPllPostDiv;                     //Output Parameter: PLL post divider      
+  UCHAR   ucPllCntlFlag;                    //Output Flags: control flag
+  UCHAR   ucReserved;                       
+}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6;
+
+//ucPllCntlFlag
+#define SPLL_CNTL_FLAG_VCO_MODE_MASK            0x03 
+
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
+
+// use for ComputeMemoryClockParamTable
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
+{
+  union
+  {
+    ULONG  ulClock;         
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output:UPPER_WORD=FB_DIV_INTEGER,  LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
+  };
+  UCHAR   ucDllSpeed;                         //Output 
+  UCHAR   ucPostDiv;                          //Output
+  union{
+    UCHAR   ucInputFlag;                      //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
+    UCHAR   ucPllCntlFlag;                    //Output: 
+  };
+  UCHAR   ucBWCntl;                       
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
+
+// definition of ucInputFlag
+#define MPLL_INPUT_FLAG_STROBE_MODE_EN          0x01
+// definition of ucPllCntlFlag
+#define MPLL_CNTL_FLAG_VCO_MODE_MASK            0x03 
+#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL            0x04
+#define MPLL_CNTL_FLAG_QDR_ENABLE               0x08
+#define MPLL_CNTL_FLAG_AD_HALF_RATE             0x10
+
+//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
+#define MPLL_CNTL_FLAG_BYPASS_AD_PLL            0x04
+
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulMemoryClock;
+  ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/	
+// Structures used by SetEngineClockTable
+/****************************************************************************/	
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
+
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by SetMemoryClockTable
+/****************************************************************************/	
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
+
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/	
+typedef struct _ASIC_INIT_PARAMETERS
+{
+  ULONG ulDefaultEngineClock;         //In 10Khz unit
+  ULONG ulDefaultMemoryClock;         //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+  ASIC_INIT_PARAMETERS sASICInitClocks;
+  SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/	
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS 
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
+#define  DYNAMIC_CLOCK_GATING_PS_ALLOCATION  DYNAMIC_CLOCK_GATING_PARAMETERS
+
+/****************************************************************************/	
+// Structure used by EnableDispPowerGatingTable.ctb
+/****************************************************************************/	
+typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 
+{
+  UCHAR ucDispPipeId;                 // ATOM_CRTC1, ATOM_CRTC2, ...
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[2];
+}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
+
+/****************************************************************************/	
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/	
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION  ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/	
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+  USHORT usDeviceID;                  //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+  UCHAR  ucDacType;                   //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+  UCHAR  ucMisc;											//Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
+
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
+#define DAC_LOAD_MISC_YPrPb						0x01
+
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+  DAC_LOAD_DETECTION_PARAMETERS            sDacload;
+  ULONG                                    Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/	
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS 
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucDacStandard;               // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+                                      // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
+
+#define DAC_ENCODER_CONTROL_PS_ALLOCATION  DAC_ENCODER_CONTROL_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by DIG1EncoderControlTable
+//                    DIG2EncoderControlTable
+//                    ExternalEncoderControlTable
+/****************************************************************************/	
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+  UCHAR  ucConfig;		  
+                            // [2] Link Select:
+                            // =0: PHY linkA if bfLane<3
+                            // =1: PHY linkB if bfLanes<3
+                            // =0: PHY linkA+B if bfLanes=3
+                            // [3] Transmitter Sel
+                            // =0: UNIPHY or PCIEPHY
+                            // =1: LVTMA 					
+  UCHAR ucAction;           // =0: turn off encoder					
+                            // =1: turn on encoder			
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
+#define DIG_ENCODER_CONTROL_PS_ALLOCATION			  DIG_ENCODER_CONTROL_PARAMETERS
+#define EXTERNAL_ENCODER_CONTROL_PARAMETER			DIG_ENCODER_CONTROL_PARAMETERS
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK				0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ		0x00
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ		0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ		0x02
+#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK				  0x04
+#define ATOM_ENCODER_CONFIG_LINKA								  0x00
+#define ATOM_ENCODER_CONFIG_LINKB								  0x04
+#define ATOM_ENCODER_CONFIG_LINKA_B							  ATOM_TRANSMITTER_CONFIG_LINKA
+#define ATOM_ENCODER_CONFIG_LINKB_A							  ATOM_ENCODER_CONFIG_LINKB
+#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK	0x08
+#define ATOM_ENCODER_CONFIG_UNIPHY							  0x00
+#define ATOM_ENCODER_CONFIG_LVTMA								  0x08
+#define ATOM_ENCODER_CONFIG_TRANSMITTER1				  0x00
+#define ATOM_ENCODER_CONFIG_TRANSMITTER2				  0x08
+#define ATOM_ENCODER_CONFIG_DIGB								  0x80			// VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE:  Enable Encoder
+// ATOM_DISABLE: Disable Encoder
+
+//ucEncoderMode
+#define ATOM_ENCODER_MODE_DP											0
+#define ATOM_ENCODER_MODE_LVDS										1
+#define ATOM_ENCODER_MODE_DVI											2
+#define ATOM_ENCODER_MODE_HDMI										3
+#define ATOM_ENCODER_MODE_SDVO										4
+#define ATOM_ENCODER_MODE_DP_AUDIO                5
+#define ATOM_ENCODER_MODE_TV											13
+#define ATOM_ENCODER_MODE_CV											14
+#define ATOM_ENCODER_MODE_CRT											15
+#define ATOM_ENCODER_MODE_DVO											16
+#define ATOM_ENCODER_MODE_DP_SST                  ATOM_ENCODER_MODE_DP    // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST                  5                       // For DP1.2
+
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:2;
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucReserved:1;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:1;
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucReserved1:2;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V2;
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+  UCHAR ucAction;                                       
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucStatus;           // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK				0x01
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK				  0x04
+#define ATOM_ENCODER_CONFIG_V2_LINKA								  0x00
+#define ATOM_ENCODER_CONFIG_V2_LINKB								  0x04
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK	  0x18
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1				    0x00
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2				    0x08
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3				    0x10
+
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START       0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1    0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2    0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3    0x13
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE    0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF                 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON                  0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS    0x0e
+#define ATOM_ENCODER_CMD_SETUP                        0x0f
+#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE             0x10
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:3;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:3;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER					  0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+  UCHAR ucAction;                              
+  union {
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;      // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+	                    // =0:     external DP
+	                    // =1:     internal DP2
+	                    // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI           
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:2;
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+#else
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+    UCHAR ucReserved:2;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ		  0x02
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ		  0x03
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER					  0x50
+#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER					  0x60
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  union{
+  ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;                              
+  union {
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;      // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+	                    // =0:     external DP
+	                    // =1:     internal DP2
+	                    // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucHPD_ID;           // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
+
+// define ucBitPerColor: 
+#define PANEL_BPC_UNDEFINE                               0x00
+#define PANEL_6BIT_PER_COLOR                             0x01 
+#define PANEL_8BIT_PER_COLOR                             0x02
+#define PANEL_10BIT_PER_COLOR                            0x03
+#define PANEL_12BIT_PER_COLOR                            0x04
+#define PANEL_16BIT_PER_COLOR                            0x05
+
+//define ucPanelMode
+#define DP_PANEL_MODE_EXTERNAL_DP_MODE                   0x00
+#define DP_PANEL_MODE_INTERNAL_DP2_MODE                  0x01
+#define DP_PANEL_MODE_INTERNAL_DP1_MODE                  0x11
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable
+//                    LVTMATransmitterControlTable
+//                    DVOOutputControlTable
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE
+{
+  UCHAR ucLaneSel;
+  UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+	union
+	{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+	USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  UCHAR ucConfig;
+													// [0]=0: 4 lane Link,      
+													//    =1: 8 lane Link ( Dual Links TMDS ) 
+                          // [1]=0: InCoherent mode   
+													//    =1: Coherent Mode										
+													// [2] Link Select:
+  												// =0: PHY linkA   if bfLane<3
+													// =1: PHY linkB   if bfLanes<3
+		  										// =0: PHY linkA+B if bfLanes=3		
+                          // [5:4]PCIE lane Sel
+                          // =0: lane 0~3 or 0~7
+                          // =1: lane 4~7
+                          // =2: lane 8~11 or 8~15
+                          // =3: lane 12~15 
+	UCHAR ucAction;				  // =0: turn off encoder					
+	                        // =1: turn on encoder			
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION		DIG_TRANSMITTER_CONTROL_PARAMETERS					
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK	0x00ff			
+
+//ucConfig 
+#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK			0x01
+#define ATOM_TRANSMITTER_CONFIG_COHERENT				0x02
+#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK		0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA						0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB						0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B					0x00			
+#define ATOM_TRANSMITTER_CONFIG_LINKB_A					0x04
+
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK	0x08			// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER		0x00				// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER		0x08				// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK			0x30
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL			0x00
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE			0x20
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN		0x30
+#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK		0xc0
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_3				0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_7				0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_4_7				0x40
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_11				0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_15				0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_12_15			0xc0
+
+//ucAction
+#define ATOM_TRANSMITTER_ACTION_DISABLE					       0
+#define ATOM_TRANSMITTER_ACTION_ENABLE					       1
+#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF				       2
+#define ATOM_TRANSMITTER_ACTION_LCD_BLON				       3
+#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL  4
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START		 5
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP			 6
+#define ATOM_TRANSMITTER_ACTION_INIT						       7
+#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT	       8
+#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT		       9
+#define ATOM_TRANSMITTER_ACTION_SETUP						       10
+#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH           11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON               12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF              13
+
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucReserved:1;               
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucReserved:1;               
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR			0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT				          0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA  			            0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKB				            0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER		          0x00				// only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER		          0x08				// only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+
+// Bit4
+#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR			        0x10
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3           	0x80	//EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+	union
+	{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+	USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+	UCHAR ucAction;				  // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+	union
+	{
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+	  USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+	UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR			0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT				          0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA  			            0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB				            0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER		          0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER		          0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL          		        0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL		                  0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT            0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3           	0x80	//EF
+
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+  UCHAR ucLaneSel;
+ 	union
+ 	{  
+ 	  UCHAR ucLaneSet;
+ 	  struct {
+#if ATOM_BIG_ENDIAN
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+#else
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+#endif
+ 		};
+ 	}; 
+}ATOM_DP_VS_MODE_V4;
+ 
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+  union
+  {
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+    USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode     Redefined comparing to previous version
+  };
+  union
+  {
+  ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX	                        
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR			0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT				          0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA  			            0x00			
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB				            0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER		          0x00				 
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER		          0x08				
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL		                0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL		                0x20   // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT           0x30   // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3           	0x80	//EF
+
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucReservd1:1;
+  UCHAR ucHPDSel:3;
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucReserved:1;
+#else
+  UCHAR ucReserved:1;
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucHPDSel:3;
+  UCHAR ucReservd1:1;
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V5;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+{
+  USHORT usSymClock;		        // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock,  (HDMI deep color), =pixel clock * deep_color_ratio
+  UCHAR  ucPhyId;                   // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+  UCHAR  ucAction;				    // define as ATOM_TRANSMITER_ACTION_xxx
+  UCHAR  ucLaneNum;                 // indicate lane number 1-8
+  UCHAR  ucConnObjId;               // Connector Object Id defined in ObjectId.h
+  UCHAR  ucDigMode;                 // indicate DIG mode
+  union{
+  ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR  ucDigEncoderSel;           // indicate DIG front end encoder 
+  UCHAR  ucDPLaneSet;
+  UCHAR  ucReserved;
+  UCHAR  ucReserved1;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
+
+//ucPhyId
+#define ATOM_PHY_ID_UNIPHYA                                 0  
+#define ATOM_PHY_ID_UNIPHYB                                 1
+#define ATOM_PHY_ID_UNIPHYC                                 2
+#define ATOM_PHY_ID_UNIPHYD                                 3
+#define ATOM_PHY_ID_UNIPHYE                                 4
+#define ATOM_PHY_ID_UNIPHYF                                 5
+#define ATOM_PHY_ID_UNIPHYG                                 6
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V5__DIGA_SEL                       0x01
+#define ATOM_TRANMSITTER_V5__DIGB_SEL                       0x02
+#define ATOM_TRANMSITTER_V5__DIGC_SEL                       0x04
+#define ATOM_TRANMSITTER_V5__DIGD_SEL                       0x08
+#define ATOM_TRANMSITTER_V5__DIGE_SEL                       0x10
+#define ATOM_TRANMSITTER_V5__DIGF_SEL                       0x20
+#define ATOM_TRANMSITTER_V5__DIGG_SEL                       0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP                      0
+#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS                    1
+#define ATOM_TRANSMITTER_DIGMODE_V5_DVI                     2
+#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI                    3
+#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO                    4
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST                  5
+
+// ucDPLaneSet
+#define DP_LANE_SET__0DB_0_4V                               0x00
+#define DP_LANE_SET__0DB_0_6V                               0x01
+#define DP_LANE_SET__0DB_0_8V                               0x02
+#define DP_LANE_SET__0DB_1_2V                               0x03
+#define DP_LANE_SET__3_5DB_0_4V                             0x08  
+#define DP_LANE_SET__3_5DB_0_6V                             0x09
+#define DP_LANE_SET__3_5DB_0_8V                             0x0a
+#define DP_LANE_SET__6DB_0_4V                               0x10
+#define DP_LANE_SET__6DB_0_6V                               0x11
+#define DP_LANE_SET__9_5DB_0_4V                             0x18  
+
+// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+// Bit1
+#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT				          0x02
+
+// Bit3:2
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 	        0x0c
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT		    0x02
+
+#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL		                0x04
+#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL		                0x08   
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT           0x0c
+// Bit6:4
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK		          0x70
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT		      0x04
+
+#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL				        0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL				          0x10
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL				          0x20
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL				          0x30
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL				          0x40
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL				          0x50
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL				          0x60
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5            DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+
+
+/****************************************************************************/	
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+  union{
+  USHORT usPixelClock;      // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT 
+  USHORT usConnectorId;     // connector id, valid when ucAction = INIT
+  };
+  UCHAR  ucConfig;          // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucAction;          // 
+  UCHAR  ucEncoderMode;     // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucLaneNum;         // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucBitPerColor;     // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+  UCHAR  ucReserved;        
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT         0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT          0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT           0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP          0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP              0x14
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ		  0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK		    0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1		            0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2		            0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3		            0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+  EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+  ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
+/****************************************************************************/	
+// Structures used by DAC1OuputControlTable
+//                    DAC2OuputControlTable
+//                    LVTMAOutputControlTable  (Before DEC30)
+//                    TMDSAOutputControlTable  (Before DEC30)
+/****************************************************************************/	
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+  UCHAR  ucAction;                    // Possible input:ATOM_ENABLE||ATOMDISABLE
+                                      // When the display is LCD, in addition to above:
+                                      // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+                                      // ATOM_LCD_SELFTEST_STOP
+                                      
+  UCHAR  aucPadding[3];               // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+
+#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 
+#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CRT2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 
+#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define TV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define TV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define LCD1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DVO_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DVO_OUTPUT_CONTROL_PS_ALLOCATION   DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
+#define DVO_OUTPUT_CONTROL_PARAMETERS_V3	 DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by BlankCRTCTable
+/****************************************************************************/	
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+  UCHAR  ucCRTC;                    	// ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucBlanking;                  // ATOM_BLANKING or ATOM_BLANKINGOFF
+  USHORT usBlackColorRCr;
+  USHORT usBlackColorGY;
+  USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
+#define BLANK_CRTC_PS_ALLOCATION    BLANK_CRTC_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by EnableCRTCTable
+//                    EnableCRTCMemReqTable
+//                    UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/	
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE 
+  UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
+#define ENABLE_CRTC_PS_ALLOCATION   ENABLE_CRTC_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+  USHORT usOverscanRight;             // right
+  USHORT usOverscanLeft;              // left
+  USHORT usOverscanBottom;            // bottom
+  USHORT usOverscanTop;               // top
+  UCHAR  ucCRTC;                      // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
+#define SET_CRTC_OVERSCAN_PS_ALLOCATION  SET_CRTC_OVERSCAN_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+  UCHAR ucH_Replication;              // horizontal replication
+  UCHAR ucV_Replication;              // vertical replication
+  UCHAR usCRTC;                       // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
+#define SET_CRTC_REPLICATION_PS_ALLOCATION  SET_CRTC_REPLICATION_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/	
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucDevice;                     // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+  UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
+#define SELECT_CRTC_SOURCE_PS_ALLOCATION  SELECT_CRTC_SOURCE_PARAMETERS
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEncoderID;                  // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+  UCHAR ucEncodeMode;									// Encoding mode, only valid when using DIG1/DIG2/DVO
+  UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID    						0x00 
+//#define ASIC_INT_TV_ENCODER_ID									0x02
+//#define ASIC_INT_DIG1_ENCODER_ID								0x03
+//#define ASIC_INT_DAC2_ENCODER_ID								0x04
+//#define ASIC_EXT_TV_ENCODER_ID									0x06
+//#define ASIC_INT_DVO_ENCODER_ID									0x07
+//#define ASIC_INT_DIG2_ENCODER_ID								0x09
+//#define ASIC_EXT_DIG_ENCODER_ID									0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP										0
+//#define ATOM_ENCODER_MODE_LVDS									1
+//#define ATOM_ENCODER_MODE_DVI										2
+//#define ATOM_ENCODER_MODE_HDMI									3
+//#define ATOM_ENCODER_MODE_SDVO									4
+//#define ATOM_ENCODER_MODE_TV										13
+//#define ATOM_ENCODER_MODE_CV										14
+//#define ATOM_ENCODER_MODE_CRT										15
+
+/****************************************************************************/	
+// Structures used by SetPixelClockTable
+//                    GetPixelClockTable 
+/****************************************************************************/	
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
+#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
+#define MISC_DEVICE_INDEX_MASK        0xF0
+#define MISC_DEVICE_INDEX_SHIFT       4
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucMiscInfo;                  // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV										13
+//ATOM_ENCODER_MODE_CV										14
+//ATOM_ENCODER_MODE_CRT										15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL							0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED						0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED						0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL						0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT							0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT						0x04
+//#define DVO_ENCODER_CONFIG_24BIT								0x08
+
+//ucMiscInfo: also changed, see below
+#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL						0x01
+#define PIXEL_CLOCK_MISC_VGA_MODE										0x02
+#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK							0x04
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1							0x00
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2							0x04
+#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK			0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC                    0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE               0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE           0x20
+
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucTransmitterId;             // graphic encoder id defined in objectId.h
+	union
+	{
+  UCHAR  ucEncoderMode;               // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+	UCHAR  ucDVOConfig;									// when use DVO, need to know SDR/DDR, 12bit or 24bit
+	};
+  UCHAR  ucMiscInfo;                  // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+                                      // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+                                      // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
+
+#define PIXEL_CLOCK_PARAMETERS_LAST			PIXEL_CLOCK_PARAMETERS_V2
+#define GET_PIXEL_CLOCK_PS_ALLOCATION		PIXEL_CLOCK_PARAMETERS_LAST
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+  UCHAR  ucCRTC;             // ATOM_CRTC1~6, indicate the CRTC controller to 
+                             // drive the pixel clock. not used for DCPLL case.
+  union{
+  UCHAR  ucReserved;
+  UCHAR  ucFracFbDiv;        // [gphan] temporary to prevent build problem.  remove it after driver code is changed.
+  };
+  USHORT usPixelClock;       // target the pixel clock to drive the CRTC timing
+                             // 0 means disable PPLL/DCPLL. 
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP              0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP              0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC             0x10
+
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+  union{
+    CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;    // pixel clock and CRTC id frequency 
+    ULONG ulDispEngClkFreq;                  // dispclk frequency
+  };
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined                                            
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6           0x08    //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6           0x04    //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
+#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK            0x40
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+  PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+  UCHAR  ucStatus;
+  UCHAR  ucRefDivSrc;                 // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+  UCHAR  ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+  PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+/****************************************************************************/	
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/	
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
+	USHORT usPixelClock;
+	UCHAR ucTransmitterID;
+	UCHAR ucEncodeMode;
+	union
+	{
+		UCHAR ucDVOConfig;									//if DVO, need passing link rate and output 12bitlow or 24bit
+		UCHAR ucConfig;											//if none DVO, not defined yet
+	};
+	UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_PARAMETERS;
+
+#define ADJUST_DISPLAY_CONFIG_SS_ENABLE       0x10
+#define ADJUST_DISPLAY_PLL_PS_ALLOCATION			ADJUST_DISPLAY_PLL_PARAMETERS
+
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+	USHORT usPixelClock;                    // target pixel clock
+	UCHAR ucTransmitterID;                  // GPU transmitter id defined in objectid.h
+	UCHAR ucEncodeMode;                     // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+  UCHAR ucDispPllConfig;                 // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+  UCHAR ucExtTransmitterID;               // external encoder id.
+	UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL                0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED               0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED               0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL              0x000c     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT                0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT              0x0004     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT                   0x0008     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE                   0x0010     // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE               0x0020     // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK                   0x0040     // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+  ULONG ulDispPllFreq;                 // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+  UCHAR ucRefDiv;                      // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+  UCHAR ucPostDiv;                     // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+  UCHAR ucReserved[2];  
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+  union 
+  {
+    ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3  sInput;
+    ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+  };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/	
+// Structures used by EnableYUVTable
+/****************************************************************************/	
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+  UCHAR ucCRTC;                       // Which CRTC needs this YUV or RGB format
+  UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
+#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by GetMemoryClockTable
+/****************************************************************************/	
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulReturnMemoryClock;          // current memory speed in 10KHz unit
+} GET_MEMORY_CLOCK_PARAMETERS;
+#define GET_MEMORY_CLOCK_PS_ALLOCATION  GET_MEMORY_CLOCK_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by GetEngineClockTable
+/****************************************************************************/	
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulReturnEngineClock;          // current engine speed in 10KHz unit
+} GET_ENGINE_CLOCK_PARAMETERS;
+#define GET_ENGINE_CLOCK_PS_ALLOCATION  GET_ENGINE_CLOCK_PARAMETERS
+
+/****************************************************************************/	
+// Following Structures and constant may be obsolete
+/****************************************************************************/	
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usVRAMAddress;      //Address in Frame Buffer where to pace raw EDID
+  USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
+                                //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
+  UCHAR     ucSlaveAddr;        //Read from which slave
+  UCHAR     ucLineNumber;       //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION  READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
+
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE                  0
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES              1
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK       2
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK  3
+#define  ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK       4
+
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usByteOffset;       //Write to which byte
+                                //Upper portion of usByteOffset is Format of data 
+                                //1bytePS+offsetPS
+                                //2bytesPS+offsetPS
+                                //blockID+offsetPS
+                                //blockID+offsetID
+                                //blockID+counterID+offsetID
+  UCHAR     ucData;             //PS data1
+  UCHAR     ucStatus;           //Status byte 1=success, 2=failure, Also is used as PS data2
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+
+#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION  WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
+
+/**************************************************************************/
+#define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+/****************************************************************************/	
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/	
+typedef struct	_POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+	UCHAR   ucPwrBehaviorId;							
+	USHORT	usPwrBudget;								 //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{                               
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+	UCHAR   ucReserved;
+	USHORT	usPwrBudget;								 //how much power currently boot to in unit of watt
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+
+/****************************LVDS SS Command Table Definitions**********************/
+
+/****************************************************************************/	
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/	
+typedef struct	_ENABLE_LVDS_SS_PARAMETERS
+{
+  USHORT  usSpreadSpectrumPercentage;       
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct	_ENABLE_LVDS_SS_PARAMETERS_V2
+{
+  USHORT  usSpreadSpectrumPercentage;       
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct	_ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;           // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPpll;												  // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL            0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT    8
+
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+  USHORT  usSpreadSpectrumAmountFrac;   // SS_AMOUNT_DSFRAC New in DCE5.0
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+    
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
+#define ATOM_PPLL_SS_TYPE_V3_P0PLL            ATOM_PPLL_SS_TYPE_V3_DCPLL
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT    8
+
+#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/**************************************************************************/
+
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+  PIXEL_CLOCK_PARAMETERS sPCLKInput;
+  ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion 
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
+
+#define ENABLE_VGA_RENDER_PS_ALLOCATION   SET_PIXEL_CLOCK_PS_ALLOCATION
+
+/****************************************************************************/	
+// Structures used by ###
+/****************************************************************************/	
+typedef struct	_MEMORY_TRAINING_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
+#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
+
+/****************************LVDS and other encoder command table definitions **********************/
+
+
+/****************************************************************************/	
+// Structures used by LVDSEncoderControlTable   (Before DCE30)
+//                    LVTMAEncoderControlTable  (Before DCE30)
+//                    TMDSAEncoderControlTable  (Before DCE30)
+/****************************************************************************/	
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // bit0=0: Enable single link
+                        //     =1: Enable dual link
+                        // Bit1=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION  LVDS_ENCODER_CONTROL_PARAMETERS
+   
+#define TMDS1_ENCODER_CONTROL_PARAMETERS    LVDS_ENCODER_CONTROL_PARAMETERS
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS    TMDS1_ENCODER_CONTROL_PARAMETERS
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
+
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // see PANEL_ENCODER_MISC_xx defintions below
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+  UCHAR  ucTruncate;    // bit0=0: Disable truncate
+                        //     =1: Enable truncate
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucSpatial;     // bit0=0: Disable spatial dithering
+                        //     =1: Enable spatial dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucTemporal;    // bit0=0: Disable temporal dithering
+                        //     =1: Enable temporal dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+                        // bit5=0: Gray level 2
+                        //     =1: Gray level 4
+  UCHAR  ucFRC;         // bit4=0: 25FRC_SEL pattern E
+                        //     =1: 25FRC_SEL pattern F
+                        // bit6:5=0: 50FRC_SEL pattern A
+                        //       =1: 50FRC_SEL pattern B
+                        //       =2: 50FRC_SEL pattern C
+                        //       =3: 50FRC_SEL pattern D
+                        // bit7=0: 75FRC_SEL pattern E
+                        //     =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+   
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2    LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+  
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2    TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
+
+#define LVDS_ENCODER_CONTROL_PARAMETERS_V3     LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3  LVDS_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
+
+/****************************************************************************/	
+// Structures used by ###
+/****************************************************************************/	
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{                               
+  UCHAR    ucEnable;            // Enable or Disable External TMDS encoder
+  UCHAR    ucMisc;              // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+  UCHAR    ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{                               
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION   sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+
+#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{                               
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+  DIG_ENCODER_CONTROL_PARAMETERS            sDigEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/	
+//ucTableFormatRevision=1,ucTableContentRevision=3
+
+//ucDVOConfig:
+#define DVO_ENCODER_CONFIG_RATE_SEL							0x01
+#define DVO_ENCODER_CONFIG_DDR_SPEED						0x00
+#define DVO_ENCODER_CONFIG_SDR_SPEED						0x01
+#define DVO_ENCODER_CONFIG_OUTPUT_SEL						0x0c
+#define DVO_ENCODER_CONFIG_LOW12BIT							0x00
+#define DVO_ENCODER_CONFIG_UPPER12BIT						0x04
+#define DVO_ENCODER_CONFIG_24BIT								0x08
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock; 
+  UCHAR  ucDVOConfig;
+  UCHAR  ucAction;														//ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  UCHAR  ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3	DVO_ENCODER_CONTROL_PARAMETERS_V3
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4
+{
+  USHORT usPixelClock; 
+  UCHAR  ucDVOConfig;
+  UCHAR  ucAction;														//ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  UCHAR  ucBitPerColor;                       //please refer to definition of PANEL_xBIT_PER_COLOR
+  UCHAR  ucReseved[3];
+}DVO_ENCODER_CONTROL_PARAMETERS_V1_4;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4	DVO_ENCODER_CONTROL_PARAMETERS_V1_4
+
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for 
+// bit1=0: non-coherent mode
+//     =1: coherent mode
+
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
+#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST     LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST  LVDS_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define DVO_ENCODER_CONTROL_PARAMETERS_LAST      DVO_ENCODER_CONTROL_PARAMETERS
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST   DVO_ENCODER_CONTROL_PS_ALLOCATION
+
+//==========================================================================================
+#define PANEL_ENCODER_MISC_DUAL                0x01
+#define PANEL_ENCODER_MISC_COHERENT            0x02
+#define	PANEL_ENCODER_MISC_TMDS_LINKB					 0x04
+#define	PANEL_ENCODER_MISC_HDMI_TYPE					 0x08
+
+#define PANEL_ENCODER_ACTION_DISABLE           ATOM_DISABLE
+#define PANEL_ENCODER_ACTION_ENABLE            ATOM_ENABLE
+#define PANEL_ENCODER_ACTION_COHERENTSEQ       (ATOM_ENABLE+1)
+
+#define PANEL_ENCODER_TRUNCATE_EN              0x01
+#define PANEL_ENCODER_TRUNCATE_DEPTH           0x10
+#define PANEL_ENCODER_SPATIAL_DITHER_EN        0x01
+#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH     0x10
+#define PANEL_ENCODER_TEMPORAL_DITHER_EN       0x01
+#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH    0x10
+#define PANEL_ENCODER_TEMPORAL_LEVEL_4         0x20
+#define PANEL_ENCODER_25FRC_MASK               0x10
+#define PANEL_ENCODER_25FRC_E                  0x00
+#define PANEL_ENCODER_25FRC_F                  0x10
+#define PANEL_ENCODER_50FRC_MASK               0x60
+#define PANEL_ENCODER_50FRC_A                  0x00
+#define PANEL_ENCODER_50FRC_B                  0x20
+#define PANEL_ENCODER_50FRC_C                  0x40
+#define PANEL_ENCODER_50FRC_D                  0x60
+#define PANEL_ENCODER_75FRC_MASK               0x80
+#define PANEL_ENCODER_75FRC_E                  0x00
+#define PANEL_ENCODER_75FRC_F                  0x80
+
+/****************************************************************************/	
+// Structures used by SetVoltageTable
+/****************************************************************************/	
+#define SET_VOLTAGE_TYPE_ASIC_VDDC             1
+#define SET_VOLTAGE_TYPE_ASIC_MVDDC            2
+#define SET_VOLTAGE_TYPE_ASIC_MVDDQ            3
+#define SET_VOLTAGE_TYPE_ASIC_VDDCI            4
+#define SET_VOLTAGE_INIT_MODE                  5
+#define SET_VOLTAGE_GET_MAX_VOLTAGE            6					//Gets the Max. voltage for the soldered Asic
+
+#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE       0x1
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_A         0x2
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_B         0x4
+
+#define	SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE      0x0
+#define	SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL      0x1
+#define	SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK     0x2
+
+typedef struct	_SET_VOLTAGE_PARAMETERS
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // To set all, to set source A or source B or ...
+  UCHAR    ucVoltageIndex;              // An index to tell which voltage level
+  UCHAR    ucReserved;          
+}SET_VOLTAGE_PARAMETERS;
+
+typedef struct	_SET_VOLTAGE_PARAMETERS_V2
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // Not used, maybe use for state machine for differen power mode
+  USHORT   usVoltageLevel;              // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
+
+// used by both SetVoltageTable v1.3 and v1.4
+typedef struct	_SET_VOLTAGE_PARAMETERS_V1_3
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Indicate action: Set voltage level
+  USHORT   usVoltageLevel;              // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
+}SET_VOLTAGE_PARAMETERS_V1_3;
+
+//ucVoltageType
+#define VOLTAGE_TYPE_VDDC                    1
+#define VOLTAGE_TYPE_MVDDC                   2
+#define VOLTAGE_TYPE_MVDDQ                   3
+#define VOLTAGE_TYPE_VDDCI                   4
+
+//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
+#define ATOM_SET_VOLTAGE                     0        //Set voltage Level
+#define ATOM_INIT_VOLTAGE_REGULATOR          3        //Init Regulator
+#define ATOM_SET_VOLTAGE_PHASE               4        //Set Vregulator Phase, only for SVID/PVID regulator
+#define ATOM_GET_MAX_VOLTAGE                 6        //Get Max Voltage, not used from SetVoltageTable v1.3
+#define ATOM_GET_VOLTAGE_LEVEL               6        //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4
+#define ATOM_GET_LEAKAGE_ID                  8        //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4 
+
+// define vitual voltage id in usVoltageLevel
+#define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
+#define ATOM_VIRTUAL_VOLTAGE_ID1             0xff02
+#define ATOM_VIRTUAL_VOLTAGE_ID2             0xff03
+#define ATOM_VIRTUAL_VOLTAGE_ID3             0xff04
+#define ATOM_VIRTUAL_VOLTAGE_ID4             0xff05
+#define ATOM_VIRTUAL_VOLTAGE_ID5             0xff06
+#define ATOM_VIRTUAL_VOLTAGE_ID6             0xff07
+#define ATOM_VIRTUAL_VOLTAGE_ID7             0xff08
+
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+  SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+// New Added from SI for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id 
+  ULONG    ulReserved;
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
+typedef struct  _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  ULONG    ulVotlageGpioState;
+  ULONG    ulVoltageGPioMask;
+}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
+typedef struct  _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  USHORT   usVoltageLevel;
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  ULONG    ulReseved;
+}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+
+// GetVoltageInfo v1.1 ucVoltageMode
+#define	ATOM_GET_VOLTAGE_VID                0x00
+#define ATOM_GET_VOTLAGE_INIT_SEQ           0x03
+#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID   0x04
+#define ATOM_GET_VOLTAGE_SVID2              0x07        //Get SVI2 Regulator Info
+
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
+#define	ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
+
+#define	ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
+#define	ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+
+// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id 
+  ULONG    ulSCLKFreq;                  // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
+
+// New in GetVoltageInfo v1.2 ucVoltageMode
+#define ATOM_GET_VOLTAGE_EVV_VOLTAGE        0x09        
+
+// New Added from CI Hawaii for EVV feature 
+typedef struct  _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
+{
+  USHORT   usVoltageLevel;                               // real voltage level in unit of mv
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  ULONG    ulReseved;
+}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
+
+/****************************************************************************/	
+// Structures used by TVEncoderControlTable
+/****************************************************************************/	
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucTvStandard;                // See definition "ATOM_TV_NTSC ..."
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+  TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;          
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
+
+//==============================Data Table Portion====================================
+
+/****************************************************************************/	
+// Structure used in Data.mtb
+/****************************************************************************/	
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+  USHORT        UtilityPipeLine;	        // Offest for the utility to get parser info,Don't change this position!
+  USHORT        MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios 
+  USHORT        MultimediaConfigInfo;     // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+  USHORT        StandardVESA_Timing;      // Only used by Bios
+  USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
+  USHORT        PaletteData;              // Only used by BIOS
+  USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info 
+  USHORT        DIGTransmitterInfo;       // Internal used by VBIOS only version 3.1
+  USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1 
+  USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
+  USHORT        GPIO_I2C_Info;            // Shared by various SW components,latest version 1.2 will be used from R600           
+  USHORT        VRAM_UsageByFirmware;     // Shared by various SW components,latest version 1.3 will be used from R600
+  USHORT        GPIO_Pin_LUT;             // Shared by various SW components,latest version 1.1
+  USHORT        VESA_ToInternalModeLUT;   // Only used by Bios
+  USHORT        ComponentVideoInfo;       // Shared by various SW components,latest version 2.1 will be used from R600
+  USHORT        PowerPlayInfo;            // Shared by various SW components,latest version 2.1,new design from R600
+  USHORT        CompassionateData;        // Will be obsolete from R600
+  USHORT        SaveRestoreInfo;          // Only used by Bios
+  USHORT        PPLL_SS_Info;             // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+  USHORT        OemInfo;                  // Defined and used by external SW, should be obsolete soon
+  USHORT        XTMDS_Info;               // Will be obsolete from R600
+  USHORT        MclkSS_Info;              // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+  USHORT        Object_Header;            // Shared by various SW components,latest version 1.1
+  USHORT        IndirectIOAccess;         // Only used by Bios,this table position can't change at all!!
+  USHORT        MC_InitParameter;         // Only used by command table
+  USHORT        ASIC_VDDC_Info;						// Will be obsolete from R600
+  USHORT        ASIC_InternalSS_Info;			// New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+  USHORT        TV_VideoMode;							// Only used by command table
+  USHORT        VRAM_Info;								// Only used by command table, latest version 1.3
+  USHORT        MemoryTrainingInfo;				// Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+  USHORT        IntegratedSystemInfo;			// Shared by various SW components
+  USHORT        ASIC_ProfilingInfo;				// New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+  USHORT        VoltageObjectInfo;				// Shared by various SW components, latest version 1.1
+	USHORT				PowerSourceInfo;					// Shared by various SW components, latest versoin 1.1
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{ 
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+// For backward compatible 
+#define LVDS_Info                LCD_Info
+#define DAC_Info                 PaletteData
+#define TMDS_Info                DIGTransmitterInfo
+
+/****************************************************************************/	
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ULONG                    ulSignature;      // HW info table signature string "$ATI"
+  UCHAR                    ucI2C_Type;       // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+  UCHAR                    ucTV_OutInfo;     // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+  UCHAR                    ucVideoPortInfo;  // Provides the video port capabilities
+  UCHAR                    ucHostPortInfo;   // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/	
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ULONG                    ulSignature;      // MM info table signature sting "$MMT"
+  UCHAR                    ucTunerInfo;      // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+  UCHAR                    ucAudioChipInfo;  // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+  UCHAR                    ucProductID;      // Defines as OEM ID or ATI board ID dependent on product type setting
+  UCHAR                    ucMiscInfo1;      // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+  UCHAR                    ucMiscInfo2;      // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+  UCHAR                    ucMiscInfo3;      // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+  UCHAR                    ucMiscInfo4;      // Video Decoder Host Config (2:0) reserved (7:3)
+  UCHAR                    ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+
+/****************************************************************************/	
+// Structures used in FirmwareInfoTable
+/****************************************************************************/	
+
+// usBIOSCapability Definition:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; 
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; 
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; 
+// Others: Reserved
+#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED         0x0001
+#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT            0x0002
+#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT     0x0004
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT      0x0008		// (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable. 
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT      0x0010		// (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable. 
+#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU         0x0020
+#define ATOM_BIOS_INFO_WMI_SUPPORT                  0x0040
+#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM   0x0080
+#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT          0x0100
+#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK        0x1E00
+#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
+#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE  0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT  0x0008		// (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT  0x0010		// (valid from v2.1 ): =1: engclk ss enable with external ss chip
+
+#ifndef _H2INC
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:1;
+  USHORT SCL2Redefined:1;
+  USHORT PostWithoutModeSet:1;
+  USHORT HyperMemory_Size:4;
+  USHORT HyperMemory_Support:1;
+  USHORT PPMode_Assigned:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT GPUControlsBL:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT DualCRTC_Support:1;
+  USHORT FirmwarePosted:1;
+#else
+  USHORT FirmwarePosted:1;
+  USHORT DualCRTC_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT GPUControlsBL:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT PPMode_Assigned:1;
+  USHORT HyperMemory_Support:1;
+  USHORT HyperMemory_Size:4;
+  USHORT PostWithoutModeSet:1;
+  USHORT SCL2Redefined:1;
+  USHORT Reserved:1;
+#endif
+}ATOM_FIRMWARE_CAPABILITY;
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  ATOM_FIRMWARE_CAPABILITY sbfAccess;
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#else
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#endif
+
+typedef struct _ATOM_FIRMWARE_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucPadding[3];               //Don't use them
+  ULONG                           aulReservedForBIOS[3];      //Don't use them
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit, the definitions above can't change!!!
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS[2];      //Don't use them
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS;         //Don't use them
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved1;
+  ULONG                           ulReserved2;
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit
+  UCHAR                           ucReserved1;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved4[3];
+}ATOM_FIRMWARE_INFO_V2_1;
+
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulSPLL_OutputFreq;          //In 10Khz unit  
+  ULONG                           ulGPUPLL_OutputFreq;        //In 10Khz unit
+  ULONG                           ulReserved1;                //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulReserved2;                //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock  ?
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.          
+  UCHAR                           ucReserved3;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  UCHAR                           ucRemoteDisplayConfig;
+  UCHAR                           ucReserved5[3];             //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+  ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+  ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+  USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usBootUpVDDCIVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved9[3];
+  USHORT                          usBootUpMVDDCVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  USHORT                          usReserved12;
+  ULONG                           ulReserved10[3];            // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
+
+#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
+
+
+// definition of ucRemoteDisplayConfig
+#define REMOTE_DISPLAY_DISABLE                   0x00
+#define REMOTE_DISPLAY_ENABLE                    0x01
+
+/****************************************************************************/	
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/	
+#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN      0x2
+#define IGP_CAP_FLAG_AC_CARD               0x4
+#define IGP_CAP_FLAG_SDVO_CARD             0x8
+#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE     0x10
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG	                          ulBootUpEngineClock;		    //in 10kHz unit
+  ULONG	                          ulBootUpMemoryClock;		    //in 10kHz unit
+  ULONG	                          ulMaxSystemMemoryClock;	    //in 10kHz unit
+  ULONG	                          ulMinSystemMemoryClock;	    //in 10kHz unit
+  UCHAR                           ucNumberOfCyclesInPeriodHi;
+  UCHAR                           ucLCDTimingSel;             //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+  USHORT                          usReserved1;
+  USHORT                          usInterNBVoltageLow;        //An intermidiate PMW value to set the voltage 
+  USHORT                          usInterNBVoltageHigh;       //Another intermidiate PMW value to set the voltage 
+  ULONG	                          ulReserved[2];
+
+  USHORT	                        usFSBClock;			            //In MHz unit
+  USHORT                          usCapabilityFlag;		        //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+																                              //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+                                                              //Bit[4]==1: P/2 mode, ==0: P/1 mode
+  USHORT	                        usPCIENBCfgReg7;				    //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+  USHORT	                        usK8MemoryClock;            //in MHz unit
+  USHORT	                        usK8SyncStartDelay;         //in 0.01 us unit
+  USHORT	                        usK8DataReturnTime;         //in 0.01 us unit
+  UCHAR                           ucMaxNBVoltage;
+  UCHAR                           ucMinNBVoltage;
+  UCHAR                           ucMemoryType;					      //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+  UCHAR                           ucNumberOfCyclesInPeriod;		//CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod 
+  UCHAR                           ucStartingPWM_HighTime;     //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+  UCHAR                           ucHTLinkWidth;              //16 bit vs. 8 bit
+  UCHAR                           ucMaxNBVoltageHigh;    
+  UCHAR                           ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
+
+/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
+ulBootUpMemoryClock:    For Intel IGP,it's the UMA system memory clock 
+                        For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
+ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+                        For AMD IGP,for now this can be 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 
+                        For AMD IGP,for now this can be 0
+
+usFSBClock:             For Intel IGP,it's FSB Freq 
+                        For AMD IGP,it's HT Link Speed
+
+usK8MemoryClock:        For AMD IGP only. For RevF CPU, set it to 200
+usK8SyncStartDelay:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+usK8DataReturnTime:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+
+VC:Voltage Control
+ucMaxNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+ucNumberOfCyclesInPeriod:   Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. 
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 
+
+ucMaxNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of  the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+
+usInterNBVoltageLow:    Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh:   Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+*/
+
+
+/*
+The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. 
+The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
+
+SW components can access the IGP system infor structure in the same way as before
+*/
+
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG	                     ulBootUpEngineClock;       //in 10kHz unit
+  ULONG			     ulReserved1[2];            //must be 0x0 for the reserved
+  ULONG	                     ulBootUpUMAClock;          //in 10kHz unit
+  ULONG	                     ulBootUpSidePortClock;     //in 10kHz unit
+  ULONG	                     ulMinSidePortClock;        //in 10kHz unit
+  ULONG			     ulReserved2[6];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //see explanation below
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulDDISlot1Config;
+  ULONG                      ulDDISlot2Config;
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  UCHAR                      ucDockingPinBit;
+  UCHAR                      ucDockingPinPolarity;
+  ULONG                      ulDockingPinCFGInfo;
+  ULONG                      ulCPUCapInfo;
+  USHORT                     usNumberOfCyclesInPeriod;
+  USHORT                     usMaxNBVoltage;
+  USHORT                     usMinNBVoltage;
+  USHORT                     usBootUpNBVoltage;
+  ULONG                      ulHTLinkFreq;              //in 10Khz
+  USHORT                     usMinHTLinkWidth;
+  USHORT                     usMaxHTLinkWidth;
+  USHORT                     usUMASyncStartDelay;
+  USHORT                     usUMADataReturnTime;
+  USHORT                     usLinkStatusZeroTime;
+  USHORT                     usDACEfuse;				//for storing badgap value (for RS880 only)
+  ULONG                      ulHighVoltageHTLinkFreq;     // in 10Khz
+  ULONG                      ulLowVoltageHTLinkFreq;      // in 10Khz
+  USHORT                     usMaxUpStreamHTLinkWidth;
+  USHORT                     usMaxDownStreamHTLinkWidth;
+  USHORT                     usMinUpStreamHTLinkWidth;
+  USHORT                     usMinDownStreamHTLinkWidth;
+  USHORT                     usFirmwareVersion;         //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+  USHORT                     usFullT0Time;             // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+  ULONG                      ulReserved3[96];          //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;   
+
+/*
+ulBootUpEngineClock:   Boot-up Engine Clock in 10Khz;
+ulBootUpUMAClock:      Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
+ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
+
+ulSystemConfig:  
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; 
+Bit[1]=1: system boots up at AMD overdrived state or user customized  mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
+      =0: system boots up at driver control state. Power state depends on PowerPlay table.
+Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
+Bit[3]=1: Only one power state(Performance) will be supported.
+      =0: Multiple power states supported from PowerPlay table.
+Bit[4]=1: CLMC is supported and enabled on current system. 
+      =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.  
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.  
+      =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
+Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
+      =0: Voltage settings is determined by powerplay table.
+Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
+      =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+      =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+      =0: DLL Shut Down feature is not enabled or supported on current system.
+
+ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
+
+ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
+			              [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
+
+ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
+      [3:0]  - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+			[7:4]  - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+      When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+      in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+      one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+			[15:8] - Lane configuration attribute; 
+      [23:16]- Connector type, possible value:
+               CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_HDMI_TYPE_A
+               CONNECTOR_OBJECT_ID_DISPLAYPORT
+               CONNECTOR_OBJECT_ID_eDP
+			[31:24]- Reserved
+
+ulDDISlot2Config: Same as Slot1.
+ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
+For IGP, Hypermemory is the only memory type showed in CCC.
+
+ucUMAChannelNumber:  how many channels for the UMA;
+
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin 
+ucDockingPinBit:     which bit in this register to read the pin status;
+ucDockingPinPolarity:Polarity of the pin when docked;
+
+ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
+
+usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. 
+usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
+                    GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
+                    PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
+                    GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
+usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
+
+ulHTLinkFreq:       Bootup HT link Frequency in 10Khz.
+usMinHTLinkWidth:   Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. 
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth:   Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. 
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.  
+
+usUMASyncStartDelay: Memory access latency, required for watermark calculation 
+usUMADataReturnTime: Memory access latency, required for watermark calculation
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us 
+for Griffin or Greyhound. SBIOS needs to convert to actual time by:
+                     if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
+                     if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
+                     if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
+                     if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
+
+ulHighVoltageHTLinkFreq:     HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHTLinkFreq(bootup frequency). 
+ulLowVoltageHTLinkFreq:      HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHighVoltageHTLinkFreq.
+
+usMaxUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
+usMaxDownStreamHTLinkWidth:  same as above.
+usMinUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
+usMinDownStreamHTLinkWidth:  same as above.
+*/
+
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo  - CPU type definition 
+#define    INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU             0
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN        1
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI         5
+
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI    // this deff reflects max defined CPU code
+
+#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
+#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE                  0x00000004 
+#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY         0x00000008
+#define SYSTEM_CONFIG_CLMC_ENABLED                        0x00000010
+#define SYSTEM_CONFIG_CDLW_ENABLED                        0x00000020
+#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED              0x00000040
+#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED            0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED                        0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED                0x00000200
+
+#define IGP_DDI_SLOT_LANE_CONFIG_MASK                     0x000000FF
+
+#define b0IGP_DDI_SLOT_LANE_MAP_MASK                      0x0F
+#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK              0xF0
+#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3                    0x01
+#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7                    0x02
+#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11                   0x04
+#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15                  0x08
+
+#define IGP_DDI_SLOT_ATTRIBUTE_MASK                       0x0000FF00
+#define IGP_DDI_SLOT_CONFIG_REVERSED                      0x00000100
+#define b1IGP_DDI_SLOT_CONFIG_REVERSED                    0x01
+
+#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK                  0x00FF0000
+
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG	                     ulBootUpEngineClock;       //in 10kHz unit
+  ULONG                      ulDentistVCOFreq;          //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK. 
+  ULONG                      ulLClockFreq;              //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+  ULONG	                     ulBootUpUMAClock;          //in 10kHz unit
+  ULONG                      ulReserved1[8];            //must be 0x0 for the reserved
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulReserved2[4];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //TBD
+  ULONG                      ulCPUCapInfo;              //TBD
+  USHORT                     usMaxNBVoltage;            //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usMinNBVoltage;            //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usBootUpNBVoltage;         //boot up NB voltage
+  UCHAR                      ucHtcTmpLmt;               //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+  UCHAR                      ucTjOffset;                //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+  ULONG                      ulReserved3[4];            //must be 0x0 for the reserved
+  ULONG                      ulDDISlot1Config;          //see above ulDDISlot1Config definition
+  ULONG                      ulDDISlot2Config;
+  ULONG                      ulDDISlot3Config;
+  ULONG                      ulDDISlot4Config;
+  ULONG                      ulReserved4[4];            //must be 0x0 for the reserved
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  USHORT                     usReserved;
+  ULONG                      ulReserved5[4];            //must be 0x0 for the reserved
+  ULONG                      ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+  ULONG                      ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+  ULONG                      ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+  ULONG                      ulReserved6[61];           //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;   
+
+#define ATOM_CRT_INT_ENCODER1_INDEX                       0x00000000
+#define ATOM_LCD_INT_ENCODER1_INDEX                       0x00000001
+#define ATOM_TV_INT_ENCODER1_INDEX                        0x00000002
+#define ATOM_DFP_INT_ENCODER1_INDEX                       0x00000003
+#define ATOM_CRT_INT_ENCODER2_INDEX                       0x00000004
+#define ATOM_LCD_EXT_ENCODER1_INDEX                       0x00000005
+#define ATOM_TV_EXT_ENCODER1_INDEX                        0x00000006
+#define ATOM_DFP_EXT_ENCODER1_INDEX                       0x00000007
+#define ATOM_CV_INT_ENCODER1_INDEX                        0x00000008
+#define ATOM_DFP_INT_ENCODER2_INDEX                       0x00000009
+#define ATOM_CRT_EXT_ENCODER1_INDEX                       0x0000000A
+#define ATOM_CV_EXT_ENCODER1_INDEX                        0x0000000B
+#define ATOM_DFP_INT_ENCODER3_INDEX                       0x0000000C
+#define ATOM_DFP_INT_ENCODER4_INDEX                       0x0000000D
+
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID    											0x00 
+#define ASIC_INT_TV_ENCODER_ID														0x02
+#define ASIC_INT_DIG1_ENCODER_ID													0x03
+#define ASIC_INT_DAC2_ENCODER_ID													0x04
+#define ASIC_EXT_TV_ENCODER_ID														0x06
+#define ASIC_INT_DVO_ENCODER_ID														0x07
+#define ASIC_INT_DIG2_ENCODER_ID													0x09
+#define ASIC_EXT_DIG_ENCODER_ID														0x05
+#define ASIC_EXT_DIG2_ENCODER_ID													0x08
+#define ASIC_INT_DIG3_ENCODER_ID													0x0a
+#define ASIC_INT_DIG4_ENCODER_ID													0x0b
+#define ASIC_INT_DIG5_ENCODER_ID													0x0c
+#define ASIC_INT_DIG6_ENCODER_ID													0x0d
+#define ASIC_INT_DIG7_ENCODER_ID													0x0e
+
+//define Encoder attribute
+#define ATOM_ANALOG_ENCODER																0
+#define ATOM_DIGITAL_ENCODER															1		
+#define ATOM_DP_ENCODER															      2		
+
+#define ATOM_ENCODER_ENUM_MASK                            0x70
+#define ATOM_ENCODER_ENUM_ID1                             0x00
+#define ATOM_ENCODER_ENUM_ID2                             0x10
+#define ATOM_ENCODER_ENUM_ID3                             0x20
+#define ATOM_ENCODER_ENUM_ID4                             0x30
+#define ATOM_ENCODER_ENUM_ID5                             0x40 
+#define ATOM_ENCODER_ENUM_ID6                             0x50
+
+#define ATOM_DEVICE_CRT1_INDEX                            0x00000000
+#define ATOM_DEVICE_LCD1_INDEX                            0x00000001
+#define ATOM_DEVICE_TV1_INDEX                             0x00000002
+#define ATOM_DEVICE_DFP1_INDEX                            0x00000003
+#define ATOM_DEVICE_CRT2_INDEX                            0x00000004
+#define ATOM_DEVICE_LCD2_INDEX                            0x00000005
+#define ATOM_DEVICE_DFP6_INDEX                            0x00000006
+#define ATOM_DEVICE_DFP2_INDEX                            0x00000007
+#define ATOM_DEVICE_CV_INDEX                              0x00000008
+#define ATOM_DEVICE_DFP3_INDEX                            0x00000009
+#define ATOM_DEVICE_DFP4_INDEX                            0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX                            0x0000000B
+
+#define ATOM_DEVICE_RESERVEDC_INDEX                       0x0000000C
+#define ATOM_DEVICE_RESERVEDD_INDEX                       0x0000000D
+#define ATOM_DEVICE_RESERVEDE_INDEX                       0x0000000E
+#define ATOM_DEVICE_RESERVEDF_INDEX                       0x0000000F
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO                    (ATOM_DEVICE_DFP3_INDEX+1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2                  ATOM_MAX_SUPPORTED_DEVICE_INFO
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3                  (ATOM_DEVICE_DFP5_INDEX + 1 )
+
+#define ATOM_MAX_SUPPORTED_DEVICE                         (ATOM_DEVICE_RESERVEDF_INDEX+1)
+
+#define ATOM_DEVICE_CRT1_SUPPORT                          (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT                          (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT                           (0x1L << ATOM_DEVICE_TV1_INDEX  )
+#define ATOM_DEVICE_DFP1_SUPPORT                          (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT                          (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT                          (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT                          (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT                          (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT                            (0x1L << ATOM_DEVICE_CV_INDEX   )
+#define ATOM_DEVICE_DFP3_SUPPORT                          (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT                          (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT                          (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+#define ATOM_DEVICE_CRT_SUPPORT                           (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT                           (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT |  ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT                            (ATOM_DEVICE_TV1_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT                           (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+
+#define ATOM_DEVICE_CONNECTOR_TYPE_MASK                   0x000000F0
+#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT                  0x00000004
+#define ATOM_DEVICE_CONNECTOR_VGA                         0x00000001
+#define ATOM_DEVICE_CONNECTOR_DVI_I                       0x00000002
+#define ATOM_DEVICE_CONNECTOR_DVI_D                       0x00000003
+#define ATOM_DEVICE_CONNECTOR_DVI_A                       0x00000004
+#define ATOM_DEVICE_CONNECTOR_SVIDEO                      0x00000005
+#define ATOM_DEVICE_CONNECTOR_COMPOSITE                   0x00000006
+#define ATOM_DEVICE_CONNECTOR_LVDS                        0x00000007
+#define ATOM_DEVICE_CONNECTOR_DIGI_LINK                   0x00000008
+#define ATOM_DEVICE_CONNECTOR_SCART                       0x00000009
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A                 0x0000000A
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B                 0x0000000B
+#define ATOM_DEVICE_CONNECTOR_CASE_1                      0x0000000E
+#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT                 0x0000000F
+
+
+#define ATOM_DEVICE_DAC_INFO_MASK                         0x0000000F
+#define ATOM_DEVICE_DAC_INFO_SHIFT                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_NODAC                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_DACA                         0x00000001
+#define ATOM_DEVICE_DAC_INFO_DACB                         0x00000002
+#define ATOM_DEVICE_DAC_INFO_EXDAC                        0x00000003
+
+#define ATOM_DEVICE_I2C_ID_NOI2C                          0x00000000
+
+#define ATOM_DEVICE_I2C_LINEMUX_MASK                      0x0000000F
+#define ATOM_DEVICE_I2C_LINEMUX_SHIFT                     0x00000000
+
+#define ATOM_DEVICE_I2C_ID_MASK                           0x00000070
+#define ATOM_DEVICE_I2C_ID_SHIFT                          0x00000004
+#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE              0x00000001
+#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE                  0x00000002
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE                0x00000003    //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL                 0x00000004    //For IGP RS690
+
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK                 0x00000080
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT                0x00000007
+#define	ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C            0x00000000
+#define	ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C            0x00000001
+
+//  usDeviceSupport:
+//  Bits0	= 0 - no CRT1 support= 1- CRT1 is supported
+//  Bit 1	= 0 - no LCD1 support= 1- LCD1 is supported
+//  Bit 2	= 0 - no TV1  support= 1- TV1  is supported
+//  Bit 3	= 0 - no DFP1 support= 1- DFP1 is supported
+//  Bit 4	= 0 - no CRT2 support= 1- CRT2 is supported
+//  Bit 5	= 0 - no LCD2 support= 1- LCD2 is supported
+//  Bit 6	= 0 - no DFP6 support= 1- DFP6 is supported
+//  Bit 7	= 0 - no DFP2 support= 1- DFP2 is supported
+//  Bit 8	= 0 - no CV   support= 1- CV   is supported
+//  Bit 9	= 0 - no DFP3 support= 1- DFP3 is supported
+//  Bit 10      = 0 - no DFP4 support= 1- DFP4 is supported
+//  Bit 11      = 0 - no DFP5 support= 1- DFP5 is supported
+//   
+//  
+
+/****************************************************************************/
+/* Structure used in MclkSS_InfoTable                                       */
+/****************************************************************************/
+//		ucI2C_ConfigID
+//    [7:0] - I2C LINE Associate ID
+//          = 0   - no I2C
+//    [7]		-	HW_Cap        =	1,  [6:0]=HW assisted I2C ID(HW line selection)
+//                          =	0,  [6:0]=SW assisted I2C ID
+//    [6-4]	- HW_ENGINE_ID  =	1,  HW engine for NON multimedia use
+//                          =	2,	HW engine for Multimedia use
+//                          =	3-7	Reserved for future I2C engines
+//		[3-0] - I2C_LINE_MUX  = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfHW_Capable:1;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfI2C_LineMux:4;
+#else
+  UCHAR   bfI2C_LineMux:4;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
+
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+  ATOM_I2C_ID_CONFIG sbfAccess;
+  UCHAR              ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+   
+
+/****************************************************************************/	
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/	
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+  USHORT                    usClkMaskRegisterIndex;
+  USHORT                    usClkEnRegisterIndex;
+  USHORT                    usClkY_RegisterIndex;
+  USHORT                    usClkA_RegisterIndex;
+  USHORT                    usDataMaskRegisterIndex;
+  USHORT                    usDataEnRegisterIndex;
+  USHORT                    usDataY_RegisterIndex;
+  USHORT                    usDataA_RegisterIndex;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+  UCHAR                     ucClkMaskShift;
+  UCHAR                     ucClkEnShift;
+  UCHAR                     ucClkY_Shift;
+  UCHAR                     ucClkA_Shift;
+  UCHAR                     ucDataMaskShift;
+  UCHAR                     ucDataEnShift;
+  UCHAR                     ucDataY_Shift;
+  UCHAR                     ucDataA_Shift;
+  UCHAR                     ucReserved1;
+  UCHAR                     ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_GPIO_I2C_ASSIGMENT   asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/	
+// Common Structure used in other structures
+/****************************************************************************/	
+
+#ifndef _H2INC
+  
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{ 
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:6;
+  USHORT RGB888:1;
+  USHORT DoubleClock:1;
+  USHORT Interlace:1;
+  USHORT CompositeSync:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT VerticalCutOff:1;
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HorizontalCutOff:1;
+#else
+  USHORT HorizontalCutOff:1;
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VerticalCutOff:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT CompositeSync:1;
+  USHORT Interlace:1;
+  USHORT DoubleClock:1;
+  USHORT RGB888:1;
+  USHORT Reserved:6;           
+#endif
+}ATOM_MODE_MISC_INFO;
+  
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{ 
+  ATOM_MODE_MISC_INFO sbfAccess;
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+  
+#else
+  
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{ 
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+   
+#endif
+
+// usModeMiscInfo-
+#define ATOM_H_CUTOFF           0x01
+#define ATOM_HSYNC_POLARITY     0x02             //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY     0x04             //0=Active High, 1=Active Low
+#define ATOM_V_CUTOFF           0x08
+#define ATOM_H_REPLICATIONBY2   0x10
+#define ATOM_V_REPLICATIONBY2   0x20
+#define ATOM_COMPOSITESYNC      0x40
+#define ATOM_INTERLACE          0x80
+#define ATOM_DOUBLE_CLOCK_MODE  0x100
+#define ATOM_RGB888_MODE        0x200
+
+//usRefreshRate-
+#define ATOM_REFRESH_43         43
+#define ATOM_REFRESH_47         47
+#define ATOM_REFRESH_56         56	
+#define ATOM_REFRESH_60         60
+#define ATOM_REFRESH_65         65
+#define ATOM_REFRESH_70         70
+#define ATOM_REFRESH_72         72
+#define ATOM_REFRESH_75         75
+#define ATOM_REFRESH_85         85
+
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+//	VESA_HTOTAL			=	VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+//						=	EDID_HA + EDID_HBL
+//	VESA_HDISP			=	VESA_ACTIVE	=	EDID_HA
+//	VESA_HSYNC_START	=	VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+//						=	EDID_HA + EDID_HSO
+//	VESA_HSYNC_WIDTH	=	VESA_HSYNC_TIME	=	EDID_HSPW
+//	VESA_BORDER			=	EDID_BORDER
+
+/****************************************************************************/	
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+  USHORT  usH_Size;
+  USHORT  usH_Blanking_Time;
+  USHORT  usV_Size;
+  USHORT  usV_Blanking_Time;			
+  USHORT  usH_SyncOffset;
+  USHORT  usH_SyncWidth;
+  USHORT  usV_SyncOffset;
+  USHORT  usV_SyncWidth;
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;  
+  UCHAR   ucH_Border;         // From DFP EDID
+  UCHAR   ucV_Border;
+  UCHAR   ucCRTC;             // ATOM_CRTC1 or ATOM_CRTC2  
+  UCHAR   ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/	
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+  USHORT                      usH_Total;        // horizontal total
+  USHORT                      usH_Disp;         // horizontal display
+  USHORT                      usH_SyncStart;    // horozontal Sync start
+  USHORT                      usH_SyncWidth;    // horizontal Sync width
+  USHORT                      usV_Total;        // vertical total
+  USHORT                      usV_Disp;         // vertical display
+  USHORT                      usV_SyncStart;    // vertical Sync start
+  USHORT                      usV_SyncWidth;    // vertical Sync width
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  UCHAR                       ucCRTC;           // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR                       ucOverscanRight;  // right
+  UCHAR                       ucOverscanLeft;   // left
+  UCHAR                       ucOverscanBottom; // bottom
+  UCHAR                       ucOverscanTop;    // top
+  UCHAR                       ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
+#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
+
+/****************************************************************************/	
+// Structure used in StandardVESA_TimingTable
+//                   AnalogTV_InfoTable 
+//                   ComponentVideoInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MODE_TIMING
+{
+  USHORT  usCRTC_H_Total;
+  USHORT  usCRTC_H_Disp;
+  USHORT  usCRTC_H_SyncStart;
+  USHORT  usCRTC_H_SyncWidth;
+  USHORT  usCRTC_V_Total;
+  USHORT  usCRTC_V_Disp;
+  USHORT  usCRTC_V_SyncStart;
+  USHORT  usCRTC_V_SyncWidth;
+  USHORT  usPixelClock;					                 //in 10Khz unit
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  USHORT  usCRTC_OverscanRight;
+  USHORT  usCRTC_OverscanLeft;
+  USHORT  usCRTC_OverscanBottom;
+  USHORT  usCRTC_OverscanTop;
+  USHORT  usReserve;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+  USHORT  usPixClk;
+  USHORT  usHActive;
+  USHORT  usHBlanking_Time;
+  USHORT  usVActive;
+  USHORT  usVBlanking_Time;			
+  USHORT  usHSyncOffset;
+  USHORT  usHSyncWidth;
+  USHORT  usVSyncOffset;
+  USHORT  usVSyncWidth;
+  USHORT  usImageHSize;
+  USHORT  usImageVSize;
+  UCHAR   ucHBorder;
+  UCHAR   ucVBorder;
+  ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/	
+// Structure used in LVDS_InfoTable 
+//  * Need a document to describe this table
+/****************************************************************************/	
+#define SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+#define SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+#define SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+#define SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usModePatchTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap; 
+	UCHAR								ucPanelInfoSize;					//  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  UCHAR               ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_READ_EDID                  0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_DRR_SUPPORTED              0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_eDP                        0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK    0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}   
+#define PANEL_RANDOM_DITHER   0x80
+#define PANEL_RANDOM_DITHER_MASK   0x80
+
+#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12   // no need to change this 
+
+/****************************************************************************/	
+// Structures used by LCD_InfoTable V1.3    Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families:  NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+typedef struct _ATOM_LCD_INFO_V13
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  ULONG               ulReserved0;
+  UCHAR               ucLCD_Misc;                // Reorganized in V13
+                                                 // Bit0: {=0:single, =1:dual},
+                                                 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888}  // was {=0:666RGB, =1:888RGB},
+                                                 // Bit3:2: {Grey level}
+                                                 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) 
+                                                 // Bit7   Reserved.  was for ATOM_PANEL_MISC_API_ENABLED, still need it?  
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap;  // Reorganized in V13 
+                                                 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+                                                 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+                                                 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+                                                 // Bit7-3: Reserved 
+  UCHAR               ucPanelInfoSize;					 //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  USHORT              usBacklightPWM;            //  Backlight PWM in Hz. New in _V13
+
+  UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
+
+  UCHAR               ucOffDelay_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
+  UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
+  UCHAR               ucReserved1;
+
+  UCHAR               ucDPCD_eDP_CONFIGURATION_CAP;     // dpcd 0dh
+  UCHAR               ucDPCD_MAX_LINK_RATE;             // dpcd 01h
+  UCHAR               ucDPCD_MAX_LANE_COUNT;            // dpcd 02h
+  UCHAR               ucDPCD_MAX_DOWNSPREAD;            // dpcd 03h
+
+  USHORT              usMaxPclkFreqInSingleLink;        // Max PixelClock frequency in single link mode. 
+  UCHAR               uceDPToLVDSRxId;
+  UCHAR               ucLcdReservd;
+  ULONG               ulReserved[2];
+}ATOM_LCD_INFO_V13;  
+
+#define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13    
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL                   0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI                   0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL             0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT       2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK   0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR         0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR         0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+ 
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_V13_READ_EDID              0x1        // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_V13_DRR_SUPPORTED          0x2        // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
+
+//uceDPToLVDSRxId
+#define eDP_TO_LVDS_RX_DISABLE                  0x00       // no eDP->LVDS translator chip 
+#define eDP_TO_LVDS_COMMON_ID                   0x01       // common eDP->LVDS translator chip without AMD SW init
+#define eDP_TO_LVDS_RT_ID                       0x02       // RT tanslator which require AMD SW init
+
+typedef struct  _ATOM_PATCH_RECORD_MODE
+{
+  UCHAR     ucRecordType;
+  USHORT    usHDisp;
+  USHORT    usVDisp;
+}ATOM_PATCH_RECORD_MODE;
+
+typedef struct  _ATOM_LCD_RTS_RECORD
+{
+  UCHAR     ucRecordType;
+  UCHAR     ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
+
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!! 
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct  _ATOM_LCD_MODE_CONTROL_CAP
+{
+  UCHAR     ucRecordType;
+  USHORT    usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
+
+#define LCD_MODE_CAP_BL_OFF                   1
+#define LCD_MODE_CAP_CRTC_OFF                 2
+#define LCD_MODE_CAP_PANEL_OFF                4
+
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+  UCHAR ucRecordType;
+  UCHAR ucFakeEDIDLength;
+  UCHAR ucFakeEDIDString[1];    // This actually has ucFakeEdidLength elements.
+} ATOM_FAKE_EDID_PATCH_RECORD;
+
+typedef struct  _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+   UCHAR    ucRecordType;
+   USHORT		usHSize;
+   USHORT		usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+
+#define LCD_MODE_PATCH_RECORD_MODE_TYPE       1
+#define LCD_RTS_RECORD_TYPE                   2
+#define LCD_CAP_RECORD_TYPE                   3
+#define LCD_FAKE_EDID_PATCH_RECORD_TYPE       4
+#define LCD_PANEL_RESOLUTION_RECORD_TYPE      5
+#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE     6
+#define ATOM_RECORD_END_TYPE                  0xFF
+
+/****************************Spread Spectrum Info Table Definitions **********************/
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+  USHORT              usSpreadSpectrumPercentage; 
+  UCHAR               ucSpreadSpectrumType;	    //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS  Others:TBD
+  UCHAR               ucSS_Step;
+  UCHAR               ucSS_Delay;
+  UCHAR               ucSS_Id;
+  UCHAR               ucRecommendedRef_Div;
+  UCHAR               ucSS_Range;               //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+
+#define ATOM_MAX_SS_ENTRY                      16
+#define ATOM_DP_SS_ID1												 0x0f1			// SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well. 
+#define ATOM_DP_SS_ID2												 0x0f2			// SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable. 
+#define ATOM_LVLINK_2700MHz_SS_ID              0x0f3      // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID              0x0f4      // SS ID for LV link translator chip at 1.62Ghz
+
+
+#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+#define ATOM_INTERNAL_SS_MASK                  0x00000000
+#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+#define EXEC_SS_STEP_SIZE_SHIFT                2
+#define EXEC_SS_DELAY_SHIFT                    4    
+#define ACTIVEDATA_TO_BLON_DELAY_SHIFT         4
+
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_SPREAD_SPECTRUM_ASSIGNMENT   asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/	
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/	
+//ucTVBootUpDefaultStd definition:
+
+//ATOM_TV_NTSC                1
+//ATOM_TV_NTSCJ               2
+//ATOM_TV_PAL                 3
+//ATOM_TV_PALM                4
+//ATOM_TV_PALCN               5
+//ATOM_TV_PALN                6
+//ATOM_TV_PAL60               7
+//ATOM_TV_SECAM               8
+
+//ucTVSupportedStd definition:
+#define NTSC_SUPPORT          0x1
+#define NTSCJ_SUPPORT         0x2
+
+#define PAL_SUPPORT           0x4
+#define PALM_SUPPORT          0x8
+#define PALCN_SUPPORT         0x10
+#define PALN_SUPPORT          0x20
+#define PAL60_SUPPORT         0x40
+#define SECAM_SUPPORT         0x80
+
+#define MAX_SUPPORTED_TV_TIMING    2
+
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    ucTV_SupportedStandard;
+  UCHAR                    ucTV_BootUpDefaultStandard; 
+  UCHAR                    ucExt_TV_ASIC_ID;
+  UCHAR                    ucExt_TV_ASIC_SlaveAddr;
+  /*ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
+  ATOM_MODE_TIMING         aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
+
+#define MAX_SUPPORTED_TV_TIMING_V1_2    3
+
+typedef struct _ATOM_ANALOG_TV_INFO_V1_2
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    ucTV_SupportedStandard;
+  UCHAR                    ucTV_BootUpDefaultStandard; 
+  UCHAR                    ucExt_TV_ASIC_ID;
+  UCHAR                    ucExt_TV_ASIC_SlaveAddr;
+  ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
+}ATOM_ANALOG_TV_INFO_V1_2;
+
+typedef struct _ATOM_DPCD_INFO
+{
+  UCHAR   ucRevisionNumber;        //10h : Revision 1.0; 11h : Revision 1.1   
+  UCHAR   ucMaxLinkRate;           //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+  UCHAR   ucMaxLane;               //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP 
+  UCHAR   ucMaxDownSpread;         //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK    0x1F
+
+/**************************************************************************/
+// VRAM usage and their defintions
+
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios:  ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX 
+
+#ifndef VESA_MEMORY_IN_64K_BLOCK
+#define VESA_MEMORY_IN_64K_BLOCK        0x100       //256*64K=16Mb (Max. VESA memory is 16Mb!)
+#endif
+
+#define ATOM_EDID_RAW_DATASIZE          256         //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE        4096        //In Bytes
+#define ATOM_HWICON_INFOTABLE_SIZE      32
+#define MAX_DTD_MODE_IN_VRAM            6
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)    //28= (SIZEOF ATOM_DTD_FORMAT) 
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE  32*8                         //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET         (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)    
+#define ATOM_DP_DPCD_OFFSET             (DFP_ENCODER_TYPE_OFFSET + 4 )        
+
+#define ATOM_HWICON1_SURFACE_ADDR       0
+#define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_HWICON_INFOTABLE_ADDR      (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_CRT1_EDID_ADDR             (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
+#define ATOM_CRT1_DTD_MODE_TBL_ADDR     (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT1_STD_MODE_TBL_ADDR	    (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD1_EDID_ADDR             (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_DTD_MODE_TBL_ADDR     (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR   	(ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV1_DTD_MODE_TBL_ADDR      (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP1_EDID_ADDR             (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP1_DTD_MODE_TBL_ADDR     (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP1_STD_MODE_TBL_ADDR	    (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CRT2_EDID_ADDR             (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CRT2_DTD_MODE_TBL_ADDR     (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT2_STD_MODE_TBL_ADDR	    (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD2_EDID_ADDR             (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_DTD_MODE_TBL_ADDR     (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR   	(ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP6_EDID_ADDR             (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR     (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR     (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP2_EDID_ADDR             (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_DTD_MODE_TBL_ADDR     (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP2_STD_MODE_TBL_ADDR     (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CV_EDID_ADDR               (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CV_DTD_MODE_TBL_ADDR       (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CV_STD_MODE_TBL_ADDR       (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP3_EDID_ADDR             (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP3_DTD_MODE_TBL_ADDR     (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP3_STD_MODE_TBL_ADDR     (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP4_EDID_ADDR             (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP4_DTD_MODE_TBL_ADDR     (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP4_STD_MODE_TBL_ADDR     (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP5_EDID_ADDR             (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DP_TRAINING_TBL_ADDR       (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 1024)       
+#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START + 512        
+
+//The size below is in Kb!
+#define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+   
+#define ATOM_VRAM_RESERVE_V2_SIZE      32
+
+#define	ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
+#define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
+#define	ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
+#define	ATOM_VRAM_BLOCK_NEEDS_RESERVATION      0x0
+
+/***********************************************************************************/	
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+//        at running time.   
+// note2: From RV770, the memory is more than 32bit addressable, so we will change 
+//        ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains 
+//        exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware 
+//        (in offset to start of memory address) is KB aligned instead of byte aligend.
+/***********************************************************************************/	
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can  have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else	//Non VGA case
+ if (FB_Size<=2Gb)
+    FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+	  FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
+/***********************************************************************************/	
+#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO			1
+
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO	asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
+
+// change verion to 1.5, when allow driver to allocate the vram area for command table access. 
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5	asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/	
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/	
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+  USHORT                   usGpioPin_AIndex;
+  UCHAR                    ucGpioPinBitShift;
+  UCHAR                    ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
+
+//ucGPIO_ID pre-define id for multiple usage
+//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC switching feature is enable
+#define PP_AC_DC_SWITCH_GPIO_PINID          60
+//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
+#define VDDC_VRHOT_GPIO_PINID               61
+//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
+#define VDDC_PCC_GPIO_PINID                 62
+
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+  ATOM_COMMON_TABLE_HEADER  sHeader;
+  ATOM_GPIO_PIN_ASSIGNMENT	asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/	
+// Structure used in ComponentVideoInfoTable	
+/****************************************************************************/	
+#define GPIO_PIN_ACTIVE_HIGH          0x1
+
+#define MAX_SUPPORTED_CV_STANDARDS    5
+
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK  0x1F    // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK  0x60    // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK    0x80    // [7]
+
+typedef struct _ATOM_GPIO_INFO
+{
+  USHORT  usAOffset;
+  UCHAR   ucSettings;
+  UCHAR   ucReserved;
+}ATOM_GPIO_INFO;
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
+#define ATOM_CV_RESTRICT_FORMAT_SELECTION           0x2
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN                   0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK              0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A       0x01     //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B       0x02     //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT   0x0   
+
+//Line 3 out put 2.2V              
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04     //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08     //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2     
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A        0x10     //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B        0x20     //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT    0x4 
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK              0x3F     // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST             0x80     //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A   3   //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B   4   //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT             usMask_PinRegisterIndex;
+  USHORT             usEN_PinRegisterIndex;
+  USHORT             usY_PinRegisterIndex;
+  USHORT             usA_PinRegisterIndex;
+  UCHAR              ucBitShift;
+  UCHAR              ucPinActiveState;  //ucPinActiveState: Bit0=1 active high, =0 active low
+  ATOM_DTD_FORMAT    sReserved;         // must be zeroed out
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucReserved[3];
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucReserved;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
+
+#define ATOM_COMPONENT_VIDEO_INFO_LAST  ATOM_COMPONENT_VIDEO_INFO_V21
+
+/****************************************************************************/	
+// Structure used in object_InfoTable
+/****************************************************************************/	
+typedef struct _ATOM_OBJECT_HEADER
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+  USHORT                    usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+typedef struct  _ATOM_DISPLAY_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[1];                             //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct  _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[2];                            //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder 
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+  UCHAR                           ucNumOfDispPath;
+  UCHAR                           ucVersion;
+  UCHAR                           ucPadding[2];
+  ATOM_DISPLAY_OBJECT_PATH        asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+
+typedef struct _ATOM_OBJECT                                //each object has this structure    
+{
+  USHORT              usObjectID;
+  USHORT              usSrcDstTableOffset;
+  USHORT              usRecordOffset;                     //this pointing to a bunch of records defined below
+  USHORT              usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE                         //Above 4 object table offset pointing to a bunch of objects all have this structure     
+{
+  UCHAR               ucNumberOfObjects;
+  UCHAR               ucPadding[3];
+  ATOM_OBJECT         asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT         //usSrcDstTableOffset pointing to this structure
+{
+  UCHAR               ucNumberOfSrc;
+  USHORT              usSrcObjectID[1];
+  UCHAR               ucNumberOfDst;
+  USHORT              usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0                   0
+#define EXT_HPDPIN_LUTINDEX_1                   1
+#define EXT_HPDPIN_LUTINDEX_2                   2
+#define EXT_HPDPIN_LUTINDEX_3                   3
+#define EXT_HPDPIN_LUTINDEX_4                   4
+#define EXT_HPDPIN_LUTINDEX_5                   5
+#define EXT_HPDPIN_LUTINDEX_6                   6
+#define EXT_HPDPIN_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES   (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0                   0
+#define EXT_AUXDDC_LUTINDEX_1                   1
+#define EXT_AUXDDC_LUTINDEX_2                   2
+#define EXT_AUXDDC_LUTINDEX_3                   3
+#define EXT_AUXDDC_LUTINDEX_4                   4
+#define EXT_AUXDDC_LUTINDEX_5                   5
+#define EXT_AUXDDC_LUTINDEX_6                   6
+#define EXT_AUXDDC_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES   (EXT_AUXDDC_LUTINDEX_7+1)
+
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS 
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDP_Lane3_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane0_Source:2;
+#else
+  UCHAR ucDP_Lane0_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping. 
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDVI_CLK_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA2_Source:2;
+#else
+  UCHAR ucDVI_DATA2_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
+typedef struct _EXT_DISPLAY_PATH
+{
+  USHORT  usDeviceTag;                    //A bit vector to show what devices are supported 
+  USHORT  usDeviceACPIEnum;               //16bit device ACPI id. 
+  USHORT  usDeviceConnector;              //A physical connector for displays to plug in, using object connector definitions
+  UCHAR   ucExtAUXDDCLutIndex;            //An index into external AUX/DDC channel LUT
+  UCHAR   ucExtHPDPINLutIndex;            //An index into external HPD pin LUT
+  USHORT  usExtEncoderObjId;              //external encoder object id
+  union{
+    UCHAR   ucChannelMapping;                  // if ucChannelMapping=0, using default one to one mapping
+    ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+    ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+  };
+  UCHAR   ucChPNInvert;                   // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
+  USHORT  usCaps;
+  USHORT  usReserved; 
+}EXT_DISPLAY_PATH;
+   
+#define NUMBER_OF_UCHAR_FOR_GUID          16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH    7
+
+//usCaps
+#define  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE          0x01
+#define  EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN        0x02
+
+typedef  struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR                    ucGuid [NUMBER_OF_UCHAR_FOR_GUID];     // a GUID is a 16 byte long string
+  EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+  UCHAR                    ucChecksum;                            // a simple Checksum of the sum of whole structure equal to 0x0. 
+  UCHAR                    uc3DStereoPinId;                       // use for eDP panel
+  UCHAR                    ucRemoteDisplayConfig;
+  UCHAR                    uceDPToLVDSRxId;
+  UCHAR                    ucFixDPVoltageSwing;                   // usCaps[1]=1, this indicate DP_LANE_SET value
+  UCHAR                    Reserved[3];                           // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are different but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+  UCHAR               ucRecordType;                      //An emun to indicate the record type
+  UCHAR               ucRecordSize;                      //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE                           1         
+#define ATOM_HPD_INT_RECORD_TYPE                       2
+#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE             3
+#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE          4
+#define	ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE	     5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE          6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE      7
+#define ATOM_JTAG_RECORD_TYPE                          8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE              9
+#define ATOM_ENCODER_DVO_CF_RECORD_TYPE               10
+#define ATOM_CONNECTOR_CF_RECORD_TYPE                 11
+#define	ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE	      12
+#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE  13
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE	      14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE	15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE          16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE          17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
+#define ATOM_ENCODER_CAP_RECORD_TYPE                   20
+#define ATOM_BRACKET_LAYOUT_RECORD_TYPE                21
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_BRACKET_LAYOUT_RECORD_TYPE
+
+typedef struct  _ATOM_I2C_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          sucI2cId; 
+  UCHAR                       ucI2CAddr;              //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct  _ATOM_HPD_INT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDIntGPIOID;         //Corresponding block in GPIO_PIN_INFO table gives the pin info           
+  UCHAR                       ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct  _ATOM_OUTPUT_PROTECTION_RECORD 
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucProtectionFlag;
+  UCHAR                       ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG
+{
+  ULONG                       ulACPIDeviceEnum;       //Reserved for now
+  USHORT                      usDeviceID;             //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+  USHORT                      usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucNumberOfDevice;
+  UCHAR                       ucReserved;
+  ATOM_CONNECTOR_DEVICE_TAG   asDeviceTag[1];         //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct  _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR						            ucConfigGPIOID;
+  UCHAR						            ucConfigGPIOState;	    //Set to 1 when it's active high to enable external flow in
+  UCHAR                       ucFlowinGPIPID;
+  UCHAR                       ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct  _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucCTL1GPIO_ID;
+  UCHAR                       ucCTL1GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL2GPIO_ID;
+  UCHAR                       ucCTL2GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL3GPIO_ID;
+  UCHAR                       ucCTL3GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTLFPGA_IN_ID;
+  UCHAR                       ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucGPIOID;               //Corresponding block in GPIO_PIN_INFO table gives the pin info 
+  UCHAR                       ucTVActiveState;        //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct  _ATOM_JTAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucTMSGPIO_ID;
+  UCHAR                       ucTMSGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTCKGPIO_ID;
+  UCHAR                       ucTCKGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDOGPIO_ID;
+  UCHAR                       ucTDOGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDIGPIO_ID;
+  UCHAR                       ucTDIGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+  UCHAR                       ucGPIOID;               // GPIO_ID, find the corresponding ID in GPIO_LUT table
+  UCHAR                       ucGPIO_PinState;        // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct  _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucFlags;                // Future expnadibility
+  UCHAR                       ucNumberOfPins;         // Number of GPIO pins used to control the object
+  ATOM_GPIO_PIN_CONTROL_PAIR  asGpio[1];              // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state 
+#define GPIO_PIN_TYPE_INPUT             0x00
+#define GPIO_PIN_TYPE_OUTPUT            0x10
+#define GPIO_PIN_TYPE_HW_CONTROL        0x20
+
+//For GPIO_PIN_TYPE_OUTPUT the following is defined 
+#define GPIO_PIN_OUTPUT_STATE_MASK      0x01
+#define GPIO_PIN_OUTPUT_STATE_SHIFT     0
+#define GPIO_PIN_STATE_ACTIVE_LOW       0x0
+#define GPIO_PIN_STATE_ACTIVE_HIGH      0x1
+
+// Indexes to GPIO array in GLSync record 
+// GLSync record is for Frame Lock/Gen Lock feature.
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK    0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC     1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC     2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ  3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT  4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET   6
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL  8
+#define ATOM_GPIO_INDEX_GLSYNC_MAX       9
+
+typedef struct  _ATOM_ENCODER_DVO_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ULONG                       ulStrengthControl;      // DVOA strength control for CF
+  UCHAR                       ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
+
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2                  0x01         // DP1.2 HBR2 is supported by HW encoder
+#define ATOM_ENCODER_CAP_RECORD_HBR2_EN               0x02         // DP1.2 HBR2 setting is qualified and HBR2 can be enabled 
+
+typedef struct  _ATOM_ENCODER_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  union {
+    USHORT                    usEncoderCap;         
+    struct {
+#if ATOM_BIG_ENDIAN
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+#else
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+#endif
+    };
+  }; 
+}ATOM_ENCODER_CAP_RECORD;                             
+
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
+
+typedef struct  _ATOM_CONNECTOR_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usMaxPixClk;
+  UCHAR                       ucFlowCntlGpioId;
+  UCHAR                       ucSwapCntlGpioId;
+  UCHAR                       ucConnectedDvoBundle;
+  UCHAR                       ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+	ATOM_DTD_FORMAT							asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;                //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+  UCHAR                       ucSubConnectorType;     //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+  UCHAR                       ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+	ATOM_COMMON_RECORD_HEADER   sheader;                
+	UCHAR												ucMuxType;							//decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+	UCHAR												ucMuxControlPin;
+	UCHAR												ucMuxState[2];					//for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+	ATOM_COMMON_RECORD_HEADER   sheader;                
+	UCHAR												ucMuxType;
+	UCHAR												ucMuxControlPin;
+	UCHAR												ucMuxState[2];					//for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
+#define ATOM_ROUTER_MUX_PIN_STATE_MASK								0x0f
+#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT		0x01
+
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD     //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES];  //An fixed size array which maps external pins to internal GPIO_PIN_INFO table 
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD  //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES];  //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usObjectID;         //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_LAYOUT_INFO
+{
+   USHORT usConnectorObjectId;
+   UCHAR  ucConnectorType;
+   UCHAR  ucPosition;
+}ATOM_CONNECTOR_LAYOUT_INFO;
+
+// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
+#define CONNECTOR_TYPE_DVI_D                 1
+#define CONNECTOR_TYPE_DVI_I                 2
+#define CONNECTOR_TYPE_VGA                   3
+#define CONNECTOR_TYPE_HDMI                  4
+#define CONNECTOR_TYPE_DISPLAY_PORT          5
+#define CONNECTOR_TYPE_MINI_DISPLAY_PORT     6
+
+typedef struct  _ATOM_BRACKET_LAYOUT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucLength;
+  UCHAR                       ucWidth;
+  UCHAR                       ucConnNum;
+  UCHAR                       ucReserved;
+  ATOM_CONNECTOR_LAYOUT_INFO  asConnInfo[1];
+}ATOM_BRACKET_LAYOUT_RECORD;
+
+/****************************************************************************/	
+// ASIC voltage data table
+/****************************************************************************/	
+typedef struct  _ATOM_VOLTAGE_INFO_HEADER
+{
+   USHORT   usVDDCBaseLevel;                //In number of 50mv unit
+   USHORT   usReserved;                     //For possible extension table offset
+   UCHAR    ucNumOfVoltageEntries;
+   UCHAR    ucBytesPerVoltageEntry;
+   UCHAR    ucVoltageStep;                  //Indicating in how many mv increament is one step, 0.5mv unit
+   UCHAR    ucDefaultVoltageEntry;
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct  _ATOM_VOLTAGE_INFO
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+   ATOM_VOLTAGE_INFO_HEADER viHeader;
+   UCHAR    ucVoltageEntries[64];            //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct  _ATOM_VOLTAGE_FORMULA
+{
+   USHORT   usVoltageBaseLevel;             // In number of 1mv unit
+   USHORT   usVoltageStep;                  // Indicating in how many mv increament is one step, 1mv unit
+	 UCHAR		ucNumOfVoltageEntries;					// Number of Voltage Entry, which indicate max Voltage
+	 UCHAR		ucFlag;													// bit0=0 :step is 1mv =1 0.5mv
+	 UCHAR		ucBaseVID;											// if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+	 UCHAR		ucReserved;
+	 UCHAR		ucVIDAdjustEntries[32];					// 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct  _VOLTAGE_LUT_ENTRY
+{
+	 USHORT		usVoltageCode;									// The Voltage ID, either GPIO or I2C code
+	 USHORT		usVoltageValue;									// The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct  _ATOM_VOLTAGE_FORMULA_V2
+{
+	 UCHAR		ucNumOfVoltageEntries;					// Number of Voltage Entry, which indicate max Voltage
+	 UCHAR		ucReserved[3];
+	 VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+	UCHAR		 ucVoltageControlId;							//Indicate it is controlled by I2C or GPIO or HW state machine		
+  UCHAR    ucVoltageControlI2cLine;
+  UCHAR    ucVoltageControlAddress;
+  UCHAR    ucVoltageControlOffset;	 	
+  USHORT   usGpioPin_AIndex;								//GPIO_PAD register index
+  UCHAR    ucGpioPinBitShift[9];						//at most 8 pin support 255 VIDs, termintate with 0xff
+	UCHAR		 ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
+#define	VOLTAGE_CONTROLLED_BY_HW							0x00
+#define	VOLTAGE_CONTROLLED_BY_I2C_MASK				0x7F
+#define	VOLTAGE_CONTROLLED_BY_GPIO						0x80
+#define	VOLTAGE_CONTROL_ID_LM64								0x01									//I2C control, used for R5xx Core Voltage
+#define	VOLTAGE_CONTROL_ID_DAC								0x02									//I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define	VOLTAGE_CONTROL_ID_VT116xM						0x03									//I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402							0x04									
+#define VOLTAGE_CONTROL_ID_UP6266 						0x05									
+#define VOLTAGE_CONTROL_ID_SCORPIO						0x06
+#define	VOLTAGE_CONTROL_ID_VT1556M						0x07									
+#define	VOLTAGE_CONTROL_ID_CHL822x						0x08									
+#define	VOLTAGE_CONTROL_ID_VT1586M						0x09
+#define VOLTAGE_CONTROL_ID_UP1637 						0x0A
+#define	VOLTAGE_CONTROL_ID_CHL8214            0x0B
+#define	VOLTAGE_CONTROL_ID_UP1801             0x0C
+#define	VOLTAGE_CONTROL_ID_ST6788A            0x0D
+#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2      0x0E
+#define VOLTAGE_CONTROL_ID_AD527x      	      0x0F
+#define VOLTAGE_CONTROL_ID_NCP81022    	      0x10
+#define VOLTAGE_CONTROL_ID_LTC2635			  0x11
+
+typedef struct  _ATOM_VOLTAGE_OBJECT
+{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+	 UCHAR		ucSize;													//Size of Object	
+	 ATOM_VOLTAGE_CONTROL			asControl;			//describ how to control 	 
+ 	 ATOM_VOLTAGE_FORMULA			asFormula;			//Indicate How to convert real Voltage to VID 
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_V2
+{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+	 UCHAR		ucSize;													//Size of Object	
+	 ATOM_VOLTAGE_CONTROL			asControl;			//describ how to control 	 
+ 	 ATOM_VOLTAGE_FORMULA_V2	asFormula;			//Indicate How to convert real Voltage to VID 
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT_V2			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct  _ATOM_LEAKID_VOLTAGE
+{
+	UCHAR		ucLeakageId;
+	UCHAR		ucReserved;
+	USHORT	usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+   UCHAR		ucVoltageMode;							    //Indicate voltage control mode: Init/Set/Leakage/Set phase 
+	 USHORT		usSize;													//Size of Object	
+}ATOM_VOLTAGE_OBJECT_HEADER_V3;
+
+// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode
+#define VOLTAGE_OBJ_GPIO_LUT                 0        //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ          3        //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_PHASE_LUT                4        //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_SVID2                    7        //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_EVV                      8 
+#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT     0x10     //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT   0x11     //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT  0x12     //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+
+typedef struct  _VOLTAGE_LUT_ENTRY_V2
+{
+	 ULONG		ulVoltageId;									  // The Voltage ID which is used to program GPIO register
+	 USHORT		usVoltageValue;									// The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
+{
+  USHORT	usVoltageLevel; 							  // The Voltage ID which is used to program GPIO register
+  USHORT  usVoltageId;                    
+	USHORT	usLeakageId;									  // The corresponding Voltage Value, in mV
+}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _ATOM_I2C_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ
+   UCHAR	ucVoltageRegulatorId;					  //Indicate Voltage Regulator Id
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;	 	
+   ULONG    ulReserved;
+   VOLTAGE_LUT_ENTRY asVolI2cLut[1];        // end with 0xff
+}ATOM_I2C_VOLTAGE_OBJECT_V3;
+
+// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
+#define VOLTAGE_DATA_ONE_BYTE                0
+#define VOLTAGE_DATA_TWO_BYTE                1
+
+typedef struct  _ATOM_GPIO_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;   // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
+   UCHAR    ucVoltageGpioCntlId;         // default is 0 which indicate control through CG VID mode 
+   UCHAR    ucGpioEntryNum;              // indiate the entry numbers of Votlage/Gpio value Look up table
+   UCHAR    ucPhaseDelay;                // phase delay in unit of micro second
+   UCHAR    ucReserved;   
+   ULONG    ulGpioMaskVal;               // GPIO Mask value
+   VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];   
+}ATOM_GPIO_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = 0x10/0x11/0x12
+   UCHAR    ucLeakageCntlId;             // default is 0
+   UCHAR    ucLeakageEntryNum;           // indicate the entry number of LeakageId/Voltage Lut table
+   UCHAR    ucReserved[2];               
+   ULONG    ulMaxVoltageLevel;
+   LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];   
+}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
+
+
+typedef struct  _ATOM_SVID2_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = VOLTAGE_OBJ_SVID2
+// 14:7 – PSI0_VID
+// 6 – PSI0_EN
+// 5 – PSI1
+// 4:2 – load line slope trim. 
+// 1:0 – offset trim, 
+   USHORT   usLoadLine_PSI;    
+// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
+   UCHAR    ucSVDGpioId;     //0~31 indicate GPIO0~31
+   UCHAR    ucSVCGpioId;     //0~31 indicate GPIO0~31
+   ULONG    ulReserved;
+}ATOM_SVID2_VOLTAGE_OBJECT_V3;
+
+typedef union _ATOM_VOLTAGE_OBJECT_V3{
+  ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
+  ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
+  ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
+  ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
+}ATOM_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V3_1
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT_V3			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
+
+typedef struct  _ATOM_ASIC_PROFILE_VOLTAGE
+{
+	UCHAR		ucProfileId;
+	UCHAR		ucReserved;
+	USHORT	usSize;
+	USHORT	usEfuseSpareStartAddr;
+	USHORT	usFuseIndex[8];												//from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, 
+	ATOM_LEAKID_VOLTAGE					asLeakVol[2];			//Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE			1		
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE			1
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE					2
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO
+{
+  ATOM_COMMON_TABLE_HEADER			asHeader; 
+	ATOM_ASIC_PROFILE_VOLTAGE			asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER			asHeader; 
+  UCHAR  ucLeakageBinNum;                // indicate the entry number of LeakageId/Voltage Lut table
+  USHORT usLeakageBinArrayOffset;        // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher) 
+
+  UCHAR  ucElbVDDC_Num;               
+  USHORT usElbVDDC_IdArrayOffset;        // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 )
+  USHORT usElbVDDC_LevelArrayOffset;     // offset of 2 dimension voltage level USHORT array
+
+  UCHAR  ucElbVDDCI_Num;
+  USHORT usElbVDDCI_IdArrayOffset;       // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 )
+  USHORT usElbVDDCI_LevelArrayOffset;    // offset of 2 dimension voltage level USHORT array
+}ATOM_ASIC_PROFILING_INFO_V2_1;
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_1
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader; 
+  ULONG  ulEvvDerateTdp;
+  ULONG  ulEvvDerateTdc;
+  ULONG  ulBoardCoreTemp;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulLeakageTemp;
+  ULONG  ulLeakageVoltage;
+  ULONG  ulCACmEncodeRange;
+  ULONG  ulCACmEncodeAverage;
+  ULONG  ulCACbEncodeRange;
+  ULONG  ulCACbEncodeAverage;
+  ULONG  ulKt_bEncodeRange;
+  ULONG  ulKt_bEncodeAverage;
+  ULONG  ulKv_mEncodeRange;
+  ULONG  ulKv_mEncodeAverage;
+  ULONG  ulKv_bEncodeRange;
+  ULONG  ulKv_bEncodeAverage;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMin;
+  ULONG  ulEfuseLogisticAlpha;
+  USHORT usPowerDpm0;
+  USHORT usCurrentDpm0;
+  USHORT usPowerDpm1;
+  USHORT usCurrentDpm1;
+  USHORT usPowerDpm2;
+  USHORT usCurrentDpm2;
+  USHORT usPowerDpm3;
+  USHORT usCurrentDpm3;
+  USHORT usPowerDpm4;
+  USHORT usCurrentDpm4;
+  USHORT usPowerDpm5;
+  USHORT usCurrentDpm5;
+  USHORT usPowerDpm6;
+  USHORT usCurrentDpm6;
+  USHORT usPowerDpm7;
+  USHORT usCurrentDpm7;
+}ATOM_ASIC_PROFILING_INFO_V3_1;
+
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+	UCHAR	ucPwrSrcId;													// Power source
+	UCHAR	ucPwrSensorType;										// GPIO, I2C or none
+	UCHAR	ucPwrSensId;											  // if GPIO detect, it is GPIO id,  if I2C detect, it is I2C id
+	UCHAR	ucPwrSensSlaveAddr;									// Slave address if I2C detect
+	UCHAR ucPwrSensRegIndex;									// I2C register Index if I2C detect
+	UCHAR ucPwrSensRegBitMask;								// detect which bit is used if I2C detect
+	UCHAR	ucPwrSensActiveState;								// high active or low active
+	UCHAR	ucReserve[3];												// reserve		
+	USHORT usSensPwr;													// in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+		ATOM_COMMON_TABLE_HEADER		asHeader;
+		UCHAR												asPwrbehave[16];
+		ATOM_POWER_SOURCE_OBJECT		asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
+#define POWERSOURCE_PCIE_ID1						0x00
+#define POWERSOURCE_6PIN_CONNECTOR_ID1	0x01
+#define POWERSOURCE_8PIN_CONNECTOR_ID1	0x02
+#define POWERSOURCE_6PIN_CONNECTOR_ID2	0x04
+#define POWERSOURCE_8PIN_CONNECTOR_ID2	0x08
+
+//define ucPwrSensorId
+#define POWER_SENSOR_ALWAYS							0x00
+#define POWER_SENSOR_GPIO								0x01
+#define POWER_SENSOR_I2C								0x02
+
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+  ULONG      ulVoltageIndex;                      // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table        
+  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+  ULONG      ulSupportedSCLK;               // Maximum clock supported with specified voltage index,  unit in 10kHz
+  USHORT     usVoltageIndex;                // The Voltage Index indicated by FUSE for specified SCLK  
+  USHORT     usVoltageID;                   // The Voltage ID indicated by FUSE for specified SCLK 
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE             1       // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;          
+  ULONG  ulBootUpUMAClock;          
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];            
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;   
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;           
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;              
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  ULONG  ulCSR_M3_ARB_CNTL_DEFAULT[10];  
+  ULONG  ulCSR_M3_ARB_CNTL_UVD[10]; 
+  ULONG  ulCSR_M3_ARB_CNTL_FS3D[10];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  ULONG  ulReserved3[15]; 
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;   
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;   
+
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
+
+//ucLVDSMisc:                   
+#define SYS_INFO_LVDSMISC__888_FPDI_MODE                                             0x01
+#define SYS_INFO_LVDSMISC__DL_CH_SWAP                                                0x02
+#define SYS_INFO_LVDSMISC__888_BPC                                                   0x04
+#define SYS_INFO_LVDSMISC__OVERRIDE_EN                                               0x08
+#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW                                           0x10
+// new since Trinity
+#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN                               0x20
+
+// not used any more
+#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW                                          0x04
+#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW                                          0x08
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        Other display related flags, not defined yet. 
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[3]=0: Enable HW AUX mode detection logic
+                                        =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+**********************************************************************************************************************/
+
+// this Table is used for Liano/Ontario APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V6    sIntegratedSysInfo;   
+  ULONG  ulPowerplayTable[128];  
+}ATOM_FUSION_SYSTEM_INFO_V1; 
+
+
+typedef struct _ATOM_TDP_CONFIG_BITS
+{
+#if ATOM_BIG_ENDIAN
+  ULONG   uReserved:2;
+  ULONG   uTDP_Value:14;  // Original TDP value in tens of milli watts
+  ULONG   uCTDP_Value:14; // Override value in tens of milli watts
+  ULONG   uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
+#else
+  ULONG   uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
+  ULONG   uCTDP_Value:14; // Override value in tens of milli watts
+  ULONG   uTDP_Value:14;  // Original TDP value in tens of milli watts
+  ULONG   uReserved:2;
+#endif
+}ATOM_TDP_CONFIG_BITS;
+
+typedef union _ATOM_TDP_CONFIG
+{
+  ATOM_TDP_CONFIG_BITS TDP_config;
+  ULONG            TDP_config_all;
+}ATOM_TDP_CONFIG;
+
+/**********************************************************************************************************************
+  ATOM_FUSION_SYSTEM_INFO_V1 Description
+sIntegratedSysInfo:               refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
+ulPowerplayTable[128]:            This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]    
+**********************************************************************************************************************/ 
+
+// this IntegrateSystemInfoTable is used for Trinity APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ATOM_TDP_CONFIG  asTdpConfig;
+  ULONG  ulReserved[19];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucTravisLVDSVolAdjust;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucMinAllowedBL_Level;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];
+  USHORT usNBP2Voltage;               
+  USHORT usNBP3Voltage;
+  ULONG  ulNbpStateNClkFreq[4];
+  UCHAR  ucNBDPMEnable;
+  UCHAR  ucReserved[3];
+  UCHAR  ucDPMState0VclkFid;
+  UCHAR  ucDPMState0DclkFid;
+  UCHAR  ucDPMState1VclkFid;
+  UCHAR  ucDPMState1DclkFid;
+  UCHAR  ucDPMState2VclkFid;
+  UCHAR  ucDPMState2DclkFid;
+  UCHAR  ucDPMState3VclkFid;
+  UCHAR  ucDPMState3DclkFid;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT            0x01
+#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT  0x02
+#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT       0x04
+#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT                         0x08
+
+// ulGPUCapInfo
+#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE                0x01
+#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE                               0x02
+#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT                         0x08
+#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS                               0x10
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Trinity projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. 
+                                        =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. 
+                                  bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+                                        =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+                                  bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+                                        =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+                                  bit[3]=0: VBIOS fast boot is disable
+                                        =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
+                                        =1: DP mode use single PLL mode
+                                  bit[3]=0: Enable AUX HW mode detection logic
+                                        =1: Disable AUX HW mode detection logic
+                                      
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usNBP2Voltage:                    VID for voltage on NB P2 State
+usNBP3Voltage:                    VID for voltage on NB P3 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+                                  [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
+ucTravisLVDSVolAdjust             When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust 
+                                  value to program Travis register LVDS_CTRL_4
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms:    LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+                                  =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms:     LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).  
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffVARY_BLtoDE_in4Ms:    LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. 
+                                  =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffDEtoDIGON_in4Ms:      LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. 
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSOffToOnDelay_in4Ms:         LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. 
+                                  =0 means to use VBIOS default delay which is 125 ( 500ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
+                                  LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:  
+                                  LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucMinAllowedBL_Level:             Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0. 
+
+ulNbpStateMemclkFreq[4]:          system memory clock frequncey in unit of 10Khz in different NB pstate. 
+
+**********************************************************************************************************************/
+
+// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulVBIOSMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulDISP_CLK2Freq;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulReserved2;
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;
+  ULONG  ulReserved3;
+  USHORT usGPUReservedSysMemSize;
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ATOM_TDP_CONFIG  asTdpConfig;
+  ULONG  ulReserved[19];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulReserved4;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  ulGPUReservedSysMemBaseAddrLo;
+  ULONG  ulGPUReservedSysMemBaseAddrHi;
+  ULONG  ulReserved5[3];
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucTravisLVDSVolAdjust;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucMinAllowedBL_Level;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];
+  ULONG  ulReserved6;               
+  ULONG  ulNbpStateNClkFreq[4];
+  USHORT usNBPStateVoltage[4];            
+  USHORT usBootUpNBVoltage;   
+  USHORT usReserved2; 
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_8;
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels).
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Trinity projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+
+ulVBIOSMisc:      	              Miscellenous flags for VBIOS requirement and interface 
+                                  bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. 
+                                        =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
+                                  bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+                                        =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+                                  bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+                                        =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+                                  bit[3]=0: VBIOS fast boot is disable
+                                        =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+
+ulGPUCapInfo:                     bit[0~2]= Reserved
+                                  bit[3]=0: Enable AUX HW mode detection logic
+                                        =1: Disable AUX HW mode detection logic
+                                  bit[4]=0: Disable DFS bypass feature
+                                        =1: Enable DFS bypass feature
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled. 
+                                  Bit[3]=0: GNB DPM is disabled
+                                        =1: GNB DPM is enabled                                
+ulCPUCapInfo:                     TBD
+
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+
+strVBIOSMsg[40]:                  VBIOS boot up customized message string 
+
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+
+usPCIEClkSSPercentage:            PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+
+usGPUReservedSysMemSize:          Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB.
+ulGPUReservedSysMemBaseAddrLo:    Low 32 bits base address to the reserved system memory. 
+ulGPUReservedSysMemBaseAddrHi:    High 32 bits base address to the reserved system memory. 
+
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+                                  [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
+ucTravisLVDSVolAdjust             When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust 
+                                  value to program Travis register LVDS_CTRL_4
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms:    
+                                  LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+                                  =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms:     
+                                  LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).  
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOffVARY_BLtoDE_in4Ms:    
+                                  LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. 
+                                  =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOffDEtoDIGON_in4Ms:      
+                                   LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. 
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSOffToOnDelay_in4Ms:         
+                                  LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. 
+                                  =0 means to use VBIOS default delay which is 125 ( 500ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
+                                  LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:  
+                                  LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucMinAllowedBL_Level:             Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0. 
+
+ulLCDBitDepthControlVal:          GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL
+
+ulNbpStateMemclkFreq[4]:          system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3).
+ulNbpStateNClkFreq[4]:            NB P-State NClk frequency in different NB P-State
+usNBPStateVoltage[4]:             NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage
+usBootUpNBVoltage:                NB P-State voltage during boot up before driver loaded 
+sExtDispConnInfo:                 Display connector information table provided to VBIOS
+
+**********************************************************************************************************************/
+
+// this Table is used for Kaveri/Kabini APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_8    sIntegratedSysInfo;       // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
+  ULONG                               ulPowerplayTable[128];    // Update comments here to link new powerplay table definition structure
+}ATOM_FUSION_SYSTEM_INFO_V2; 
+
+
+/**************************************************************************/
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
+#define ICS91719  1
+#define ICS91720  2
+
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+  UCHAR         ucNunberOfBytes;                                              //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+  UCHAR         ucI2CData[1];                                                 //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+  ATOM_I2C_ID_CONFIG_ACCESS       sucI2cId;               //I2C line and HW/SW assisted cap.
+  UCHAR		                        ucSSChipID;             //SS chip being used
+  UCHAR		                        ucSSChipSlaveAddr;      //Slave Address to set up this SS chip
+  UCHAR                           ucNumOfI2CDataRecords;  //number of data block
+  ATOM_I2C_DATA_RECORD            asI2CData[1];  
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct  _ATOM_ASIC_MVDD_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_I2C_DEVICE_SETUP_INFO      asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
+#define ATOM_MCLK_SS_INFO         ATOM_ASIC_MVDD_INFO
+
+//==========================================================================================
+/**************************************************************************/
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+	ULONG								ulTargetClockRange;						//Clock Out frequence (VCO ), in unit of 10Khz
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateInKhz;						//in unit of kHz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit1=0 Down Spread,=1 Center Spread.
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
+#define ASIC_INTERNAL_MEMORY_SS	         1
+#define ASIC_INTERNAL_ENGINE_SS	         2
+#define ASIC_INTERNAL_UVD_SS             3
+#define ASIC_INTERNAL_SS_ON_TMDS         4
+#define ASIC_INTERNAL_SS_ON_HDMI         5
+#define ASIC_INTERNAL_SS_ON_LVDS         6
+#define ASIC_INTERNAL_SS_ON_DP           7
+#define ASIC_INTERNAL_SS_ON_DCPLL        8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK     9
+#define ASIC_INTERNAL_VCE_SS             10
+#define ASIC_INTERNAL_GPUPLL_SS          11
+
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+	ULONG								ulTargetClockRange;						//For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4
+	USHORT							usSpreadRateIn10Hz;						//in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+//#define ATOM_INTERNAL_SS_MASK                  0x00000000
+//#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT		      asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT_V2		  asSpreadSpectrum[1];      //this is point only. 
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+	ULONG								ulTargetClockRange;						//For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateIn10Hz;						//in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode
+#define SS_MODE_V3_CENTRE_SPREAD_MASK             0x01
+#define SS_MODE_V3_EXTERNAL_SS_MASK               0x02
+#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK    0x10
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT_V3		  asSpreadSpectrum[1];      //this is pointer only. 
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
+#define ATOM_DEVICE_CONNECT_INFO_DEF  0
+#define ATOM_ROM_LOCATION_DEF         1
+#define ATOM_TV_STANDARD_DEF          2
+#define ATOM_ACTIVE_INFO_DEF          3
+#define ATOM_LCD_INFO_DEF             4
+#define ATOM_DOS_REQ_INFO_DEF         5
+#define ATOM_ACC_CHANGE_INFO_DEF      6
+#define ATOM_DOS_MODE_INFO_DEF        7
+#define ATOM_I2C_CHANNEL_STATUS_DEF   8
+#define ATOM_I2C_CHANNEL_STATUS1_DEF  9
+#define ATOM_INTERNAL_TIMER_DEF       10
+
+// BIOS_0_SCRATCH Definition 
+#define ATOM_S0_CRT1_MONO               0x00000001L
+#define ATOM_S0_CRT1_COLOR              0x00000002L
+#define ATOM_S0_CRT1_MASK               (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE_A         0x00000004L
+#define ATOM_S0_TV1_SVIDEO_A            0x00000008L
+#define ATOM_S0_TV1_MASK_A              (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
+
+#define ATOM_S0_CV_A                    0x00000010L
+#define ATOM_S0_CV_DIN_A                0x00000020L
+#define ATOM_S0_CV_MASK_A               (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
+
+#define ATOM_S0_CRT2_MONO               0x00000100L
+#define ATOM_S0_CRT2_COLOR              0x00000200L
+#define ATOM_S0_CRT2_MASK               (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE           0x00000400L
+#define ATOM_S0_TV1_SVIDEO              0x00000800L
+#define ATOM_S0_TV1_SCART               0x00004000L
+#define ATOM_S0_TV1_MASK                (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
+
+#define ATOM_S0_CV                      0x00001000L
+#define ATOM_S0_CV_DIN                  0x00002000L
+#define ATOM_S0_CV_MASK                 (ATOM_S0_CV+ATOM_S0_CV_DIN)
+
+#define ATOM_S0_DFP1                    0x00010000L
+#define ATOM_S0_DFP2                    0x00020000L
+#define ATOM_S0_LCD1                    0x00040000L
+#define ATOM_S0_LCD2                    0x00080000L
+#define ATOM_S0_DFP6                    0x00100000L
+#define ATOM_S0_DFP3                    0x00200000L
+#define ATOM_S0_DFP4                    0x00400000L
+#define ATOM_S0_DFP5                    0x00800000L
+
+#define ATOM_S0_DFP_MASK                ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
+
+#define ATOM_S0_FAD_REGISTER_BUG        0x02000000L // If set, indicates we are running a PCIE asic with 
+                                                    // the FAD/HDP reg access bug.  Bit is read by DAL, this is obsolete from RV5xx
+
+#define ATOM_S0_THERMAL_STATE_MASK      0x1C000000L
+#define ATOM_S0_THERMAL_STATE_SHIFT     26
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 
+
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S0_CRT1_MONOb0             0x01
+#define ATOM_S0_CRT1_COLORb0            0x02
+#define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+
+#define ATOM_S0_TV1_COMPOSITEb0         0x04
+#define ATOM_S0_TV1_SVIDEOb0            0x08
+#define ATOM_S0_TV1_MASKb0              (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
+
+#define ATOM_S0_CVb0                    0x10
+#define ATOM_S0_CV_DINb0                0x20
+#define ATOM_S0_CV_MASKb0               (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
+
+#define ATOM_S0_CRT2_MONOb1             0x01
+#define ATOM_S0_CRT2_COLORb1            0x02
+#define ATOM_S0_CRT2_MASKb1             (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
+
+#define ATOM_S0_TV1_COMPOSITEb1         0x04
+#define ATOM_S0_TV1_SVIDEOb1            0x08
+#define ATOM_S0_TV1_SCARTb1             0x40
+#define ATOM_S0_TV1_MASKb1              (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
+
+#define ATOM_S0_CVb1                    0x10
+#define ATOM_S0_CV_DINb1                0x20
+#define ATOM_S0_CV_MASKb1               (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
+
+#define ATOM_S0_DFP1b2                  0x01
+#define ATOM_S0_DFP2b2                  0x02
+#define ATOM_S0_LCD1b2                  0x04
+#define ATOM_S0_LCD2b2                  0x08
+#define ATOM_S0_DFP6b2                  0x10
+#define ATOM_S0_DFP3b2                  0x20
+#define ATOM_S0_DFP4b2                  0x40
+#define ATOM_S0_DFP5b2                  0x80
+
+
+#define ATOM_S0_THERMAL_STATE_MASKb3    0x1C
+#define ATOM_S0_THERMAL_STATE_SHIFTb3   2
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
+#define ATOM_S0_LCD1_SHIFT              18
+
+// BIOS_1_SCRATCH Definition
+#define ATOM_S1_ROM_LOCATION_MASK       0x0000FFFFL
+#define ATOM_S1_PCI_BUS_DEV_MASK        0xFFFF0000L
+
+//	BIOS_2_SCRATCH Definition
+#define ATOM_S2_TV1_STANDARD_MASK       0x0000000FL
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK   0x0000FF00L
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT  8
+
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK       0x0C000000L
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE     0x10000000L
+
+#define ATOM_S2_DEVICE_DPMS_STATE       0x00010000L
+#define ATOM_S2_VRI_BRIGHT_ENABLE       0x20000000L
+
+#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE     0x0
+#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE    0x1
+#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE   0x2
+#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE   0x3
+#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
+
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
+#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+#define ATOM_S2_DEVICE_DPMS_STATEb2     0x01
+
+#define ATOM_S2_DEVICE_DPMS_MASKw1      0x3FF
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3     0x0C
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3   0x10
+#define ATOM_S2_TMDS_COHERENT_MODEb3    0x10          // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+#define ATOM_S2_VRI_BRIGHT_ENABLEb3     0x20
+#define ATOM_S2_ROTATION_STATE_MASKb3   0xC0
+
+
+// BIOS_3_SCRATCH Definition
+#define ATOM_S3_CRT1_ACTIVE             0x00000001L
+#define ATOM_S3_LCD1_ACTIVE             0x00000002L
+#define ATOM_S3_TV1_ACTIVE              0x00000004L
+#define ATOM_S3_DFP1_ACTIVE             0x00000008L
+#define ATOM_S3_CRT2_ACTIVE             0x00000010L
+#define ATOM_S3_LCD2_ACTIVE             0x00000020L
+#define ATOM_S3_DFP6_ACTIVE             0x00000040L
+#define ATOM_S3_DFP2_ACTIVE             0x00000080L
+#define ATOM_S3_CV_ACTIVE               0x00000100L
+#define ATOM_S3_DFP3_ACTIVE							0x00000200L
+#define ATOM_S3_DFP4_ACTIVE							0x00000400L
+#define ATOM_S3_DFP5_ACTIVE							0x00000800L
+
+#define ATOM_S3_DEVICE_ACTIVE_MASK      0x00000FFFL
+
+#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE         0x00001000L
+#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
+
+#define ATOM_S3_CRT1_CRTC_ACTIVE        0x00010000L
+#define ATOM_S3_LCD1_CRTC_ACTIVE        0x00020000L
+#define ATOM_S3_TV1_CRTC_ACTIVE         0x00040000L
+#define ATOM_S3_DFP1_CRTC_ACTIVE        0x00080000L
+#define ATOM_S3_CRT2_CRTC_ACTIVE        0x00100000L
+#define ATOM_S3_LCD2_CRTC_ACTIVE        0x00200000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE        0x00400000L
+#define ATOM_S3_DFP2_CRTC_ACTIVE        0x00800000L
+#define ATOM_S3_CV_CRTC_ACTIVE          0x01000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE				0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE				0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE				0x08000000L
+
+#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNG    0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
+#define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S3_CRT1_ACTIVEb0           0x01
+#define ATOM_S3_LCD1_ACTIVEb0           0x02
+#define ATOM_S3_TV1_ACTIVEb0            0x04
+#define ATOM_S3_DFP1_ACTIVEb0           0x08
+#define ATOM_S3_CRT2_ACTIVEb0           0x10
+#define ATOM_S3_LCD2_ACTIVEb0           0x20
+#define ATOM_S3_DFP6_ACTIVEb0           0x40
+#define ATOM_S3_DFP2_ACTIVEb0           0x80
+#define ATOM_S3_CV_ACTIVEb1             0x01
+#define ATOM_S3_DFP3_ACTIVEb1						0x02
+#define ATOM_S3_DFP4_ACTIVEb1						0x04
+#define ATOM_S3_DFP5_ACTIVEb1						0x08
+
+#define ATOM_S3_ACTIVE_CRTC1w0          0xFFF
+
+#define ATOM_S3_CRT1_CRTC_ACTIVEb2      0x01
+#define ATOM_S3_LCD1_CRTC_ACTIVEb2      0x02
+#define ATOM_S3_TV1_CRTC_ACTIVEb2       0x04
+#define ATOM_S3_DFP1_CRTC_ACTIVEb2      0x08
+#define ATOM_S3_CRT2_CRTC_ACTIVEb2      0x10
+#define ATOM_S3_LCD2_CRTC_ACTIVEb2      0x20
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2      0x40
+#define ATOM_S3_DFP2_CRTC_ACTIVEb2      0x80
+#define ATOM_S3_CV_CRTC_ACTIVEb3        0x01
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3			0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3			0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3			0x08
+
+#define ATOM_S3_ACTIVE_CRTC2w1          0xFFF
+
+// BIOS_4_SCRATCH Definition
+#define ATOM_S4_LCD1_PANEL_ID_MASK      0x000000FFL
+#define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
+#define ATOM_S4_LCD1_REFRESH_SHIFT      8
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S4_LCD1_PANEL_ID_MASKb0	  0x0FF
+#define ATOM_S4_LCD1_REFRESH_MASKb1		  ATOM_S4_LCD1_PANEL_ID_MASKb0
+#define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
+
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
+#define ATOM_S5_DOS_REQ_CRT1b0          0x01
+#define ATOM_S5_DOS_REQ_LCD1b0          0x02
+#define ATOM_S5_DOS_REQ_TV1b0           0x04
+#define ATOM_S5_DOS_REQ_DFP1b0          0x08
+#define ATOM_S5_DOS_REQ_CRT2b0          0x10
+#define ATOM_S5_DOS_REQ_LCD2b0          0x20
+#define ATOM_S5_DOS_REQ_DFP6b0          0x40
+#define ATOM_S5_DOS_REQ_DFP2b0          0x80
+#define ATOM_S5_DOS_REQ_CVb1            0x01
+#define ATOM_S5_DOS_REQ_DFP3b1					0x02
+#define ATOM_S5_DOS_REQ_DFP4b1					0x04
+#define ATOM_S5_DOS_REQ_DFP5b1					0x08
+
+#define ATOM_S5_DOS_REQ_DEVICEw0        0x0FFF
+
+#define ATOM_S5_DOS_REQ_CRT1            0x0001
+#define ATOM_S5_DOS_REQ_LCD1            0x0002
+#define ATOM_S5_DOS_REQ_TV1             0x0004
+#define ATOM_S5_DOS_REQ_DFP1            0x0008
+#define ATOM_S5_DOS_REQ_CRT2            0x0010
+#define ATOM_S5_DOS_REQ_LCD2            0x0020
+#define ATOM_S5_DOS_REQ_DFP6            0x0040
+#define ATOM_S5_DOS_REQ_DFP2            0x0080
+#define ATOM_S5_DOS_REQ_CV              0x0100
+#define ATOM_S5_DOS_REQ_DFP3            0x0200
+#define ATOM_S5_DOS_REQ_DFP4            0x0400
+#define ATOM_S5_DOS_REQ_DFP5            0x0800
+
+#define ATOM_S5_DOS_FORCE_CRT1b2        ATOM_S5_DOS_REQ_CRT1b0
+#define ATOM_S5_DOS_FORCE_TV1b2         ATOM_S5_DOS_REQ_TV1b0
+#define ATOM_S5_DOS_FORCE_CRT2b2        ATOM_S5_DOS_REQ_CRT2b0
+#define ATOM_S5_DOS_FORCE_CVb3          ATOM_S5_DOS_REQ_CVb1
+#define ATOM_S5_DOS_FORCE_DEVICEw1      (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+                                        (ATOM_S5_DOS_FORCE_CVb3<<8))
+
+// BIOS_6_SCRATCH Definition
+#define ATOM_S6_DEVICE_CHANGE           0x00000001L
+#define ATOM_S6_SCALER_CHANGE           0x00000002L
+#define ATOM_S6_LID_CHANGE              0x00000004L
+#define ATOM_S6_DOCKING_CHANGE          0x00000008L
+#define ATOM_S6_ACC_MODE                0x00000010L
+#define ATOM_S6_EXT_DESKTOP_MODE        0x00000020L
+#define ATOM_S6_LID_STATE               0x00000040L
+#define ATOM_S6_DOCK_STATE              0x00000080L
+#define ATOM_S6_CRITICAL_STATE          0x00000100L
+#define ATOM_S6_HW_I2C_BUSY_STATE       0x00000200L
+#define ATOM_S6_THERMAL_STATE_CHANGE    0x00000400L
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS   0x00000800L
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL         0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO  0x00002000L //Aspect ratio expansion Request bit for LCD
+
+#define ATOM_S6_DISPLAY_STATE_CHANGE    0x00004000L        //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE        0x00008000L        //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
+
+#define ATOM_S6_ACC_REQ_CRT1            0x00010000L
+#define ATOM_S6_ACC_REQ_LCD1            0x00020000L
+#define ATOM_S6_ACC_REQ_TV1             0x00040000L
+#define ATOM_S6_ACC_REQ_DFP1            0x00080000L
+#define ATOM_S6_ACC_REQ_CRT2            0x00100000L
+#define ATOM_S6_ACC_REQ_LCD2            0x00200000L
+#define ATOM_S6_ACC_REQ_DFP6            0x00400000L
+#define ATOM_S6_ACC_REQ_DFP2            0x00800000L
+#define ATOM_S6_ACC_REQ_CV              0x01000000L
+#define ATOM_S6_ACC_REQ_DFP3						0x02000000L
+#define ATOM_S6_ACC_REQ_DFP4						0x04000000L
+#define ATOM_S6_ACC_REQ_DFP5						0x08000000L
+
+#define ATOM_S6_ACC_REQ_MASK                0x0FFF0000L
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE    0x10000000L
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH    0x20000000L
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S6_DEVICE_CHANGEb0         0x01
+#define ATOM_S6_SCALER_CHANGEb0         0x02
+#define ATOM_S6_LID_CHANGEb0            0x04
+#define ATOM_S6_DOCKING_CHANGEb0        0x08
+#define ATOM_S6_ACC_MODEb0              0x10
+#define ATOM_S6_EXT_DESKTOP_MODEb0      0x20
+#define ATOM_S6_LID_STATEb0             0x40
+#define ATOM_S6_DOCK_STATEb0            0x80
+#define ATOM_S6_CRITICAL_STATEb1        0x01
+#define ATOM_S6_HW_I2C_BUSY_STATEb1     0x02  
+#define ATOM_S6_THERMAL_STATE_CHANGEb1  0x04
+#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1        0x10    
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 
+
+#define ATOM_S6_ACC_REQ_CRT1b2          0x01
+#define ATOM_S6_ACC_REQ_LCD1b2          0x02
+#define ATOM_S6_ACC_REQ_TV1b2           0x04
+#define ATOM_S6_ACC_REQ_DFP1b2          0x08
+#define ATOM_S6_ACC_REQ_CRT2b2          0x10
+#define ATOM_S6_ACC_REQ_LCD2b2          0x20
+#define ATOM_S6_ACC_REQ_DFP6b2          0x40
+#define ATOM_S6_ACC_REQ_DFP2b2          0x80
+#define ATOM_S6_ACC_REQ_CVb3            0x01
+#define ATOM_S6_ACC_REQ_DFP3b3          0x02
+#define ATOM_S6_ACC_REQ_DFP4b3          0x04
+#define ATOM_S6_ACC_REQ_DFP5b3          0x08
+
+#define ATOM_S6_ACC_REQ_DEVICEw1        ATOM_S5_DOS_REQ_DEVICEw0
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3    0x40
+#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3    0x80
+
+#define ATOM_S6_DEVICE_CHANGE_SHIFT             0
+#define ATOM_S6_SCALER_CHANGE_SHIFT             1
+#define ATOM_S6_LID_CHANGE_SHIFT                2
+#define ATOM_S6_DOCKING_CHANGE_SHIFT            3
+#define ATOM_S6_ACC_MODE_SHIFT                  4
+#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT          5
+#define ATOM_S6_LID_STATE_SHIFT                 6
+#define ATOM_S6_DOCK_STATE_SHIFT                7
+#define ATOM_S6_CRITICAL_STATE_SHIFT            8
+#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT         9
+#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT      10
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT     11
+#define ATOM_S6_REQ_SCALER_SHIFT                12
+#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT         13
+#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT      14
+#define ATOM_S6_I2C_STATE_CHANGE_SHIFT          15
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT  28
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT  29
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT     30
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT     31
+
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
+#define ATOM_S7_DOS_MODE_TYPEb0             0x03
+#define ATOM_S7_DOS_MODE_VGAb0              0x00
+#define ATOM_S7_DOS_MODE_VESAb0             0x01
+#define ATOM_S7_DOS_MODE_EXTb0              0x02
+#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0      0x0C
+#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0     0xF0
+#define ATOM_S7_DOS_8BIT_DAC_ENb1           0x01
+#define ATOM_S7_ASIC_INIT_COMPLETEb1        0x02
+#define ATOM_S7_ASIC_INIT_COMPLETE_MASK     0x00000200
+#define ATOM_S7_DOS_MODE_NUMBERw1           0x0FFFF
+
+#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT       8
+
+// BIOS_8_SCRATCH Definition
+#define ATOM_S8_I2C_CHANNEL_BUSY_MASK       0x00000FFFF
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK     0x0FFFF0000   
+
+#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT      0
+#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT       16
+
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK  0x0000FFFF
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK  
+#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK    0xFFFF0000
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   
+#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   16
+#endif
+
+ 
+#define ATOM_FLAG_SET                         0x20
+#define ATOM_FLAG_CLEAR                       0
+#define CLEAR_ATOM_S6_ACC_MODE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_LID_STATE                 ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE               ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE			          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE  ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE            ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)  
+#define CLEAR_ATOM_S6_REQ_SCALER              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO         ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO       ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG           ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1                    ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )|  ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN           ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN         ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/	
+//Portion II: Definitinos only used in Driver
+/****************************************************************************/
+
+// Macros used by driver
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define	GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
+
+#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
+#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
+
+/****************************************************************************/	
+//Portion III: Definitinos only used in VBIOS
+/****************************************************************************/
+#define ATOM_DAC_SRC					0x80
+#define ATOM_SRC_DAC1					0
+#define ATOM_SRC_DAC2					0x80
+
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+  ULONG ulTargetMemoryClock; //In 10Khz unit
+  UCHAR   ucAction;					 //not define yet
+  UCHAR   ucFbDiv_Hi;				 //Fbdiv Hi byte
+  UCHAR   ucFbDiv;					 //FB value
+  UCHAR   ucPostDiv;				 //Post div
+}MEMORY_PLLINIT_PARAMETERS;
+
+#define MEMORY_PLLINIT_PS_ALLOCATION  MEMORY_PLLINIT_PARAMETERS
+
+
+#define	GPIO_PIN_WRITE													0x01			
+#define	GPIO_PIN_READ														0x00
+
+typedef struct  _GPIO_PIN_CONTROL_PARAMETERS
+{
+  UCHAR ucGPIO_ID;           //return value, read from GPIO pins
+  UCHAR ucGPIOBitShift;	     //define which bit in uGPIOBitVal need to be update 
+	UCHAR ucGPIOBitVal;		     //Set/Reset corresponding bit defined in ucGPIOBitMask
+  UCHAR ucAction;				     //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+  UCHAR ucScaler;            // ATOM_SCALER1, ATOM_SCALER2
+  UCHAR ucEnable;            // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+  UCHAR ucTVStandard;        // 
+  UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS; 
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS 
+
+//ucEnable:
+#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION    0
+#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION  1
+#define SCALER_ENABLE_2TAP_ALPHA_MODE               2
+#define SCALER_ENABLE_MULTITAP_MODE                 3
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+  ULONG  usHWIconHorzVertPosn;        // Hardware Icon Vertical position
+  UCHAR  ucHWIconVertOffset;          // Hardware Icon Vertical offset
+  UCHAR  ucHWIconHorzOffset;          // Hardware Icon Horizontal offset
+  UCHAR  ucSelection;                 // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+  ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS  sEnableIcon;
+  ENABLE_CRTC_PARAMETERS                  sReserved;  
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2	
+  UCHAR  ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT usDeviceId;                  // Active Device Id for this surface. If no device, set to 0. 
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  USHORT usGraphPitch;
+  UCHAR  ucColorDepth;
+  UCHAR  ucPixelFormat;
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucModeType;
+  UCHAR  ucReserved;
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
+
+// ucEnable
+#define ATOM_GRAPH_CONTROL_SET_PITCH             0x0f
+#define ATOM_GRAPH_CONTROL_SET_DISP_START        0x10
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+  ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;          
+  ENABLE_YUV_PS_ALLOCATION        sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+  USHORT  usMemoryStart;                //in 8Kb boundary, offset from memory base address
+  USHORT  usMemorySize;                 //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
+#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+  USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC                 
+  USHORT  usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; 
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
+{
+  union{
+    USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC                 
+    USHORT  usSurface; 
+  };
+  USHORT usY_Size;
+  USHORT usDispXStart;               
+  USHORT usDispYStart;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2; 
+
+
+typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3 
+{
+  UCHAR  ucLutId;
+  UCHAR  ucAction;
+  USHORT usLutStartIndex;
+  USHORT usLutLength;
+  USHORT usLutOffsetInVram;
+}PALETTE_DATA_CONTROL_PARAMETERS_V3;
+
+// ucAction:
+#define PALETTE_DATA_AUTO_FILL            1
+#define PALETTE_DATA_READ                 2
+#define PALETTE_DATA_WRITE                3
+
+
+typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
+{
+  UCHAR  ucInterruptId;
+  UCHAR  ucServiceId;
+  UCHAR  ucStatus;
+  UCHAR  ucReserved;
+}INTERRUPT_SERVICE_PARAMETER_V2;
+
+// ucInterruptId
+#define HDP1_INTERRUPT_ID                 1
+#define HDP2_INTERRUPT_ID                 2
+#define HDP3_INTERRUPT_ID                 3
+#define HDP4_INTERRUPT_ID                 4
+#define HDP5_INTERRUPT_ID                 5
+#define HDP6_INTERRUPT_ID                 6
+#define SW_INTERRUPT_ID                   11   
+
+// ucAction
+#define INTERRUPT_SERVICE_GEN_SW_INT      1
+#define INTERRUPT_SERVICE_GET_STATUS      2
+
+ // ucStatus
+#define INTERRUPT_STATUS__INT_TRIGGER     1
+#define INTERRUPT_STATUS__HPD_HIGH        2
+
+typedef struct _INDIRECT_IO_ACCESS
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    IOAccessSequence[256];
+} INDIRECT_IO_ACCESS;
+
+#define INDIRECT_READ              0x00
+#define INDIRECT_WRITE             0x80
+
+#define INDIRECT_IO_MM             0
+#define INDIRECT_IO_PLL            1
+#define INDIRECT_IO_MC             2
+#define INDIRECT_IO_PCIE           3
+#define INDIRECT_IO_PCIEP          4
+#define INDIRECT_IO_NBMISC         5
+#define INDIRECT_IO_SMU            5
+
+#define INDIRECT_IO_PLL_READ       INDIRECT_IO_PLL   | INDIRECT_READ
+#define INDIRECT_IO_PLL_WRITE      INDIRECT_IO_PLL   | INDIRECT_WRITE
+#define INDIRECT_IO_MC_READ        INDIRECT_IO_MC    | INDIRECT_READ
+#define INDIRECT_IO_MC_WRITE       INDIRECT_IO_MC    | INDIRECT_WRITE
+#define INDIRECT_IO_PCIE_READ      INDIRECT_IO_PCIE  | INDIRECT_READ
+#define INDIRECT_IO_PCIE_WRITE     INDIRECT_IO_PCIE  | INDIRECT_WRITE
+#define INDIRECT_IO_PCIEP_READ     INDIRECT_IO_PCIEP | INDIRECT_READ
+#define INDIRECT_IO_PCIEP_WRITE    INDIRECT_IO_PCIEP | INDIRECT_WRITE
+#define INDIRECT_IO_NBMISC_READ    INDIRECT_IO_NBMISC | INDIRECT_READ
+#define INDIRECT_IO_NBMISC_WRITE   INDIRECT_IO_NBMISC | INDIRECT_WRITE
+#define INDIRECT_IO_SMU_READ       INDIRECT_IO_SMU | INDIRECT_READ
+#define INDIRECT_IO_SMU_WRITE      INDIRECT_IO_SMU | INDIRECT_WRITE
+
+typedef struct _ATOM_OEM_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+   UCHAR	ucVMode_Num;			  //Video mode number
+   UCHAR	ucTV_Mode_Num;			//Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+   USHORT	usTV_Mode_LUT_Offset;	// Pointer to standard to internal number conversion table
+   USHORT	usTV_FIFO_Offset;		  // Pointer to FIFO entry table
+   USHORT	usNTSC_Tbl_Offset;		// Pointer to SDTV_Mode_NTSC table
+   USHORT	usPAL_Tbl_Offset;		  // Pointer to SDTV_Mode_PAL table 
+   USHORT	usCV_Tbl_Offset;		  // Pointer to SDTV_Mode_PAL table 
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+   USHORT	ucFilter0_Offset;		//Pointer to filter format 0 coefficients
+   USHORT	usFilter1_Offset;		//Pointer to filter format 0 coefficients
+   UCHAR	ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT 				 aModeTimings[16];      // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{ 
+  USHORT    usSTD_HDisp;
+  USHORT    usSTD_VDisp;
+  USHORT    usSTD_RefreshRate;
+  USHORT    usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+  USHORT  usVESA_ModeNumber;
+  USHORT  usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{ 
+  ATOM_COMMON_TABLE_HEADER   sHeader;  
+  ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
+
+/*************** ATOM Memory Related Data Structure ***********************/
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+	UCHAR												ucMemoryType;
+	UCHAR												ucMemoryVendor;
+	UCHAR												ucAdjMCId;
+	UCHAR												ucDynClkId;
+	ULONG												ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
+#if ATOM_BIG_ENDIAN
+	ULONG												ucMemBlkId:8;
+	ULONG												ulMemClockRange:24;
+#else
+	ULONG												ulMemClockRange:24;
+	ULONG												ucMemBlkId:8;
+#endif
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+  ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+  ULONG                         ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+	ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS			ulMemoryID;
+	ULONG															        aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+	 USHORT											usRegIndex;                                     // MC register index
+	 UCHAR											ucPreRegDataLength;                             // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+	USHORT													usRegIndexTblSize;													//size of asRegIndexBuf
+	USHORT													usRegDataBlkSize;														//size of ATOM_MEMORY_SETTING_DATA_BLOCK
+	ATOM_INIT_REG_INDEX_FORMAT			asRegIndexBuf[1];
+	ATOM_MEMORY_SETTING_DATA_BLOCK	asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
+
+#define END_OF_REG_INDEX_BLOCK  0x0ffff
+#define END_OF_REG_DATA_BLOCK   0x00000000
+#define ATOM_INIT_REG_MASK_FLAG 0x80               //Not used in BIOS
+#define	CLOCK_RANGE_HIGHEST			0x00ffffff
+
+#define VALUE_DWORD             SIZEOF ULONG
+#define VALUE_SAME_AS_ABOVE     0
+#define VALUE_MASK_DWORD        0x84
+
+#define INDEX_ACCESS_RANGE_BEGIN	    (VALUE_DWORD + 1)
+#define INDEX_ACCESS_RANGE_END		    (INDEX_ACCESS_RANGE_BEGIN + 1)
+#define VALUE_INDEX_ACCESS_SINGLE	    (INDEX_ACCESS_RANGE_END + 1)
+//#define ACCESS_MCIODEBUGIND            0x40       //defined in BIOS code
+#define ACCESS_PLACEHOLDER             0x80
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{ 
+  ATOM_COMMON_TABLE_HEADER		sHeader;
+  USHORT											usAdjustARB_SEQDataOffset;
+  USHORT											usMCInitMemTypeTblOffset;
+  USHORT											usMCInitCommonTblOffset;
+  USHORT											usMCInitPowerDownTblOffset;
+	ULONG												ulARB_SEQDataBuf[32];
+	ATOM_INIT_REG_BLOCK					asMCInitMemType;
+	ATOM_INIT_REG_BLOCK					asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
+
+#define _4Mx16              0x2
+#define _4Mx32              0x3
+#define _8Mx16              0x12
+#define _8Mx32              0x13
+#define _16Mx16             0x22
+#define _16Mx32             0x23
+#define _32Mx16             0x32
+#define _32Mx32             0x33
+#define _64Mx8              0x41
+#define _64Mx16             0x42
+#define _64Mx32             0x43
+#define _128Mx8             0x51
+#define _128Mx16            0x52
+#define _128Mx32            0x53
+#define _256Mx8             0x61
+#define _256Mx16            0x62
+#define _512Mx8             0x71
+
+#define SAMSUNG             0x1
+#define INFINEON            0x2
+#define ELPIDA              0x3
+#define ETRON               0x4
+#define NANYA               0x5
+#define HYNIX               0x6
+#define MOSEL               0x7
+#define WINBOND             0x8
+#define ESMT                0x9
+#define MICRON              0xF
+
+#define QIMONDA             INFINEON
+#define PROMOS              MOSEL
+#define KRETON              INFINEON
+#define ELIXIR              NANYA
+#define MEZZA               ELPIDA
+
+
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
+
+#define UCODE_ROM_START_ADDRESS		0x1b800
+#define	UCODE_SIGNATURE			0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+  ULONG  ulSignature;
+  UCHAR  ucRevision;
+  UCHAR  ucChecksum;
+  UCHAR  ucReserved1;
+  UCHAR  ucReserved2;
+  USHORT usParametersLength;
+  USHORT usUCodeLength;
+  USHORT usReserved1;
+  USHORT usReserved2;
+} MCuCodeHeader;
+
+//////////////////////////////////////////////////////////////////////////////////
+
+#define ATOM_MAX_NUMBER_OF_VRAM_MODULE	16
+
+#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK	0xF
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+  ULONG                      ulReserved;
+  USHORT                     usEMRSValue;  
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender 
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+  ULONG                      ulReserved;
+  ULONG                      ulFlags;     			// To enable/disable functionalities based on memory type
+  ULONG                      ulEngineClock;     // Override of default engine clock for particular memory type
+  ULONG                      ulMemoryClock;     // Override of default memory clock for particular memory type
+  USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRSValue;  
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucRefreshRateFactor;
+  UCHAR                      ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+  union{
+	  USHORT										 usMRS;							// mode register						
+    USHORT                     usDDR3_MR0;
+  };
+  union{
+	  USHORT										 usEMRS;						// extended mode register
+    USHORT                     usDDR3_MR1;
+  };
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+  union 
+  {
+    struct {
+	    UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+	    UCHAR											 ucReserved;						
+    };
+    USHORT                   usDDR3_MR2;
+  };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+	USHORT										 usMRS;							// mode register						
+	USHORT										 usEMRS;						// extended mode register
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+	UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+////////////////////////////////////GDDR parameters///////////////////////////////////
+	UCHAR											 uctCCDL;						// 
+	UCHAR											 uctCRCRL;						// 
+	UCHAR											 uctCRCWL;						// 
+	UCHAR											 uctCKE;						// 
+	UCHAR											 uctCKRSE;						// 
+	UCHAR											 uctCKRSX;						// 
+	UCHAR											 uctFAW32;						// 
+	UCHAR											 ucMR5lo;					// 
+	UCHAR											 ucMR5hi;					// 
+	UCHAR											 ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+	USHORT										 usMRS;							// mode register						
+	USHORT										 usEMRS;						// extended mode register
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+	UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+////////////////////////////////////GDDR parameters///////////////////////////////////
+	UCHAR											 uctCCDL;						// 
+	UCHAR											 uctCRCRL;						// 
+	UCHAR											 uctCRCWL;						// 
+	UCHAR											 uctCKE;						// 
+	UCHAR											 uctCKRSE;						// 
+	UCHAR											 uctCKRSX;						// 
+	UCHAR											 uctFAW32;						// 
+	UCHAR											 ucMR4lo;					// 
+	UCHAR											 ucMR4hi;					// 
+	UCHAR											 ucMR5lo;					// 
+	UCHAR											 ucMR5hi;					// 
+	UCHAR											 ucTerminator;
+	UCHAR											 ucReserved;	
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+typedef	struct _ATOM_MEMORY_FORMAT
+{
+	ULONG											 ulDllDisClock;			// memory DLL will be disable when target memory clock is below this clock
+  union{
+    USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_Reserved;   // Not used for DDR3 memory
+  };
+  union{
+    USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_MR3;        // Used for DDR3 memory
+  };
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+	UCHAR											 ucBurstSize;				// burst size, 0= burst size=4  1= burst size=8
+  UCHAR                      ucDllDisBit;				// position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+  UCHAR                      ucRefreshRateFactor;	// memory refresh rate in unit of ms	
+	UCHAR											 ucDensity;					// _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR											 ucPreamble;				//[7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR											 ucMemAttrib;				// Memory Device Addribute, like RDBI/WDBI etc
+	ATOM_MEMORY_TIMING_FORMAT	 asMemTiming[5];		//Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+	ULONG											 ulChannelMapCfg;		// board dependent paramenter:Channel combination
+	USHORT										 usSize;						// size of ATOM_VRAM_MODULE_V3
+  USHORT                     usDefaultMVDDQ;		// board dependent parameter:Default Memory Core Voltage
+  USHORT                     usDefaultMVDDC;		// board dependent parameter:Default Memory IO Voltage
+	UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucChannelNum;      // board dependent parameter:Number of channel;
+	UCHAR											 ucChannelSize;			// board dependent parameter:32bit or 64bit	
+	UCHAR											 ucVREFI;						// board dependnt parameter: EXT or INT +160mv to -140mv
+	UCHAR											 ucNPL_RT;					// board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+	UCHAR											 ucFlag;						// To enable/disable functionalities based on memory type
+	ATOM_MEMORY_FORMAT				 asMemory;					// describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
+#define NPL_RT_MASK															0x0f
+#define BATTERY_ODT_MASK												0xc0
+
+#define ATOM_VRAM_MODULE		 ATOM_VRAM_MODULE_V3
+
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  union{
+    USHORT	usEMRS2Value;                   // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_Reserved;
+  };
+  union{
+    USHORT	usEMRS3Value;                   // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_MR3;                     // Used for DDR3 memory
+  };  
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR   ucReserved2[2];
+  ATOM_MEMORY_TIMING_FORMAT  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
+
+#define VRAM_MODULE_V4_MISC_RANK_MASK       0x3
+#define VRAM_MODULE_V4_MISC_DUAL_RANK       0x1
+#define VRAM_MODULE_V4_MISC_BL_MASK         0x4
+#define VRAM_MODULE_V4_MISC_BL8             0x4
+#define VRAM_MODULE_V4_MISC_DUAL_CS         0x10
+
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT	usEMRS2Value;      		            // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT	usEMRS3Value;      		            // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;			                // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;		   // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V1  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT	usEMRS2Value;      		            // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT	usEMRS3Value;      		            // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;			                // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;		   // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V2  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+  ULONG	  ulChannelMapCfg;	                // mmMC_SHARED_CHREMAP
+  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
+  USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usEnableChannels;                 // bit vector which indicate which channels are enabled
+  UCHAR   ucExtMemoryID;                    // Current memory module ID
+  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
+  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR	  ucReserve;                        // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+  UCHAR	  ucMisc;                           // RANK_OF_THISMEMORY etc.
+  UCHAR	  ucVREFI;                          // Not used.
+  UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+  UCHAR	  ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  USHORT  usSEQSettingOffset;
+  UCHAR   ucReserved;
+// Memory Module specific values
+  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value. 
+  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
+  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  char    strMemPNString[20];               // part number end with '0'. 
+}ATOM_VRAM_MODULE_V7;
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE           aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
+
+typedef struct _ATOM_VRAM_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+	USHORT										 usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+	USHORT										 usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+	USHORT										 usRerseved;
+	UCHAR           	         aVID_PinsShift[9];															 // 8 bit strap maximum+terminator
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE		       aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+	ATOM_INIT_REG_BLOCK				 asMemPatch;																		 // for allocation
+																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
+}ATOM_VRAM_INFO_V3;
+
+#define	ATOM_VRAM_INFO_LAST	     ATOM_VRAM_INFO_V3
+
+typedef struct _ATOM_VRAM_INFO_V4
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT										 usRerseved;
+  UCHAR           	         ucMemDQ7_0ByteRemap;													   // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+  ULONG                      ulMemDQ7_0BitRemap;                             // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+  UCHAR                      ucReservde[4]; 
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE_V4		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+	ATOM_INIT_REG_BLOCK				 asMemPatch;																		 // for allocation
+																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                     usPerBytePresetOffset;                          // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
+  USHORT                     usReserved[3];
+  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
+  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
+  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
+  UCHAR                      ucReserved; 
+  ATOM_VRAM_MODULE_V7		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR           	         aVID_PinsShift[9];   //8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+	ATOM_COMMON_TABLE_HEADER   sHeader;
+	UCHAR											 ucTrainingLoop;
+	UCHAR											 ucReserved[3];
+	ATOM_INIT_REG_BLOCK				 asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+  UCHAR    ucControl;
+  UCHAR    ucData; 
+  UCHAR    ucSatus; 
+  UCHAR    ucTemp; 
+} SW_I2C_CNTL_DATA_PARAMETERS;
+
+#define SW_I2C_CNTL_DATA_PS_ALLOCATION  SW_I2C_CNTL_DATA_PARAMETERS
+
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{                               
+  USHORT   GPIO_Info;
+  UCHAR    ucAct; 
+  UCHAR    ucData; 
+ } SW_I2C_IO_DATA_PARAMETERS;
+
+#define SW_I2C_IO_DATA_PS_ALLOCATION  SW_I2C_IO_DATA_PARAMETERS
+
+/****************************SW I2C CNTL DEFINITIONS**********************/
+#define SW_I2C_IO_RESET       0
+#define SW_I2C_IO_GET         1
+#define SW_I2C_IO_DRIVE       2
+#define SW_I2C_IO_SET         3
+#define SW_I2C_IO_START       4
+
+#define SW_I2C_IO_CLOCK       0
+#define SW_I2C_IO_DATA        0x80
+
+#define SW_I2C_IO_ZERO        0
+#define SW_I2C_IO_ONE         0x100
+
+#define SW_I2C_CNTL_READ      0
+#define SW_I2C_CNTL_WRITE     1
+#define SW_I2C_CNTL_START     2
+#define SW_I2C_CNTL_STOP      3
+#define SW_I2C_CNTL_OPEN      4
+#define SW_I2C_CNTL_CLOSE     5
+#define SW_I2C_CNTL_WRITE1BIT 6
+
+//==============================VESA definition Portion===============================
+#define VESA_OEM_PRODUCT_REV			            "01.00"
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT	     0xBB	//refer to VBE spec p.32, no TTY support
+#define VESA_MODE_WIN_ATTRIBUTE						     7
+#define VESA_WIN_SIZE											     64
+
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+	USHORT	Offset16;			
+	USHORT	Segment16;				
+} PTR_32_BIT_STRUCTURE;
+
+typedef union _PTR_32_BIT_UNION
+{
+	PTR_32_BIT_STRUCTURE	SegmentOffset;
+	ULONG					        Ptr32_Bit;
+} PTR_32_BIT_UNION;
+
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+	UCHAR				      VbeSignature[4];
+	USHORT				    VbeVersion;
+	PTR_32_BIT_UNION	OemStringPtr;
+	UCHAR				      Capabilities[4];
+	PTR_32_BIT_UNION	VideoModePtr;
+	USHORT				    TotalMemory;
+} VBE_1_2_INFO_BLOCK_UPDATABLE;
+
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+	VBE_1_2_INFO_BLOCK_UPDATABLE	CommonBlock;
+	USHORT							    OemSoftRev;
+	PTR_32_BIT_UNION				OemVendorNamePtr;
+	PTR_32_BIT_UNION				OemProductNamePtr;
+	PTR_32_BIT_UNION				OemProductRevPtr;
+} VBE_2_0_INFO_BLOCK_UPDATABLE;
+
+typedef union _VBE_VERSION_UNION
+{
+	VBE_2_0_INFO_BLOCK_UPDATABLE	VBE_2_0_InfoBlock;
+	VBE_1_2_INFO_BLOCK_UPDATABLE	VBE_1_2_InfoBlock;
+} VBE_VERSION_UNION;
+
+typedef struct _VBE_INFO_BLOCK
+{
+	VBE_VERSION_UNION			UpdatableVBE_Info;
+	UCHAR						      Reserved[222];
+	UCHAR						      OemData[256];
+} VBE_INFO_BLOCK;
+
+typedef struct _VBE_FP_INFO
+{
+  USHORT	HSize;
+	USHORT	VSize;
+	USHORT	FPType;
+	UCHAR		RedBPP;
+	UCHAR		GreenBPP;
+	UCHAR		BlueBPP;
+	UCHAR		ReservedBPP;
+	ULONG		RsvdOffScrnMemSize;
+	ULONG		RsvdOffScrnMEmPtr;
+	UCHAR		Reserved[14];
+} VBE_FP_INFO;
+
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+  USHORT    ModeAttributes;  //			dw	?	; mode attributes
+	UCHAR     WinAAttributes;  //			db	?	; window A attributes
+	UCHAR     WinBAttributes;  //			db	?	; window B attributes
+	USHORT    WinGranularity;  //			dw	?	; window granularity
+	USHORT    WinSize;         //			dw	?	; window size
+	USHORT    WinASegment;     //			dw	?	; window A start segment
+	USHORT    WinBSegment;     //			dw	?	; window B start segment
+	ULONG     WinFuncPtr;      //			dd	?	; real mode pointer to window function
+	USHORT    BytesPerScanLine;//			dw	?	; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+  USHORT    XResolution;      //			dw	?	; horizontal resolution in pixels or characters
+	USHORT    YResolution;      //			dw	?	; vertical resolution in pixels or characters
+	UCHAR     XCharSize;        //			db	?	; character cell width in pixels
+	UCHAR     YCharSize;        //			db	?	; character cell height in pixels
+	UCHAR     NumberOfPlanes;   //			db	?	; number of memory planes
+	UCHAR     BitsPerPixel;     //			db	?	; bits per pixel
+	UCHAR     NumberOfBanks;    //			db	?	; number of banks
+	UCHAR     MemoryModel;      //			db	?	; memory model type
+	UCHAR     BankSize;         //			db	?	; bank size in KB
+	UCHAR     NumberOfImagePages;//		  db	?	; number of images
+	UCHAR     ReservedForPageFunction;//db	1	; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+	UCHAR			RedMaskSize;        //		db	?	; size of direct color red mask in bits
+	UCHAR			RedFieldPosition;   //		db	?	; bit position of lsb of red mask
+	UCHAR			GreenMaskSize;      //		db	?	; size of direct color green mask in bits
+	UCHAR			GreenFieldPosition; //		db	?	; bit position of lsb of green mask
+	UCHAR			BlueMaskSize;       //		db	?	; size of direct color blue mask in bits
+	UCHAR			BlueFieldPosition;  //		db	?	; bit position of lsb of blue mask
+	UCHAR			RsvdMaskSize;       //		db	?	; size of direct color reserved mask in bits
+	UCHAR			RsvdFieldPosition;  //		db	?	; bit position of lsb of reserved mask
+	UCHAR			DirectColorModeInfo;//		db	?	; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+	ULONG			PhysBasePtr;        //		dd	?	; physical address for flat memory frame buffer
+	ULONG			Reserved_1;         //		dd	0	; reserved - always set to 0
+	USHORT		Reserved_2;         //	  dw	0	; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+	USHORT		LinBytesPerScanLine;  //	dw	?	; bytes per scan line for linear modes
+	UCHAR			BnkNumberOfImagePages;//	db	?	; number of images for banked modes
+	UCHAR			LinNumberOfImagPages; //	db	?	; number of images for linear modes
+	UCHAR			LinRedMaskSize;       //	db	?	; size of direct color red mask(linear modes)
+	UCHAR			LinRedFieldPosition;  //	db	?	; bit position of lsb of red mask(linear modes)
+	UCHAR			LinGreenMaskSize;     //	db	?	; size of direct color green mask(linear modes)
+	UCHAR			LinGreenFieldPosition;//	db	?	; bit position of lsb of green mask(linear modes)
+	UCHAR			LinBlueMaskSize;      //	db	?	; size of direct color blue mask(linear modes)
+	UCHAR			LinBlueFieldPosition; //	db	?	; bit position of lsb of blue mask(linear modes)
+	UCHAR			LinRsvdMaskSize;      //	db	?	; size of direct color reserved mask(linear modes)
+	UCHAR			LinRsvdFieldPosition; //	db	?	; bit position of lsb of reserved mask(linear modes)
+	ULONG			MaxPixelClock;        //	dd	?	; maximum pixel clock(in Hz) for graphics mode
+	UCHAR			Reserved;             //	db	190 dup (0)
+} VESA_MODE_INFO_BLOCK;
+
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE        0xA0	        // ATI Extended Function code
+#define ATOM_BIOS_FUNCTION_COP_MODE             0x00
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY1         0x04
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY2         0x05
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY3         0x06
+#define ATOM_BIOS_FUNCTION_GET_DDC              0x0B   
+#define ATOM_BIOS_FUNCTION_ASIC_DSTATE          0x0E
+#define ATOM_BIOS_FUNCTION_DEBUG_PLAY           0x0F
+#define ATOM_BIOS_FUNCTION_STV_STD              0x16
+#define ATOM_BIOS_FUNCTION_DEVICE_DET           0x17
+#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH        0x18
+
+#define ATOM_BIOS_FUNCTION_PANEL_CONTROL        0x82
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET       0x83
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH    0x84
+#define ATOM_BIOS_FUNCTION_HW_ICON              0x8A 
+#define ATOM_BIOS_FUNCTION_SET_CMOS             0x8B
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO        0x8000          // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO      0x8100          // Sub function 80
+
+#define ATOM_BIOS_FUNCTION_DISPLAY_INFO         0x8D
+#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF        0x8E
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE          0x8F 
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE    0x0300          // Sub function 03  
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE          0x0700          // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE  0x1400          // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300          // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE          0x8500          // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT    0x9400          // Notify caller that ADC is supported
+     
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS            0x4F10          // Set DPMS 
+#define ATOM_SUB_FUNCTION_SET_DPMS              0x0001          // BL: Sub function 01 
+#define ATOM_SUB_FUNCTION_GET_DPMS              0x0002          // BL: Sub function 02 
+#define ATOM_PARAMETER_VESA_DPMS_ON             0x0000          // BH Parameter for DPMS ON.  
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY        0x0100          // BH Parameter for DPMS STANDBY  
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND        0x0200          // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF            0x0400          // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON      0x0800          // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
+
+#define ATOM_BIOS_RETURN_CODE_MASK              0x0000FF00L
+#define ATOM_BIOS_REG_HIGH_MASK                 0x0000FF00L
+#define ATOM_BIOS_REG_LOW_MASK                  0x000000FFL
+
+// structure used for VBIOS only
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
+	USHORT usTransmitterObjId;
+	USHORT usSupportDevice;
+  UCHAR  ucTransmitterCmdTblId;
+	UCHAR  ucConfig;
+	UCHAR  ucEncoderID;					 //available 1st encoder ( default )
+	UCHAR  ucOptionEncoderID;    //available 2nd encoder ( optional )
+	UCHAR  uc2ndEncoderID;
+	UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE          0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE         0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK    0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A             0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B             0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C             0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D             0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E             0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F             0x84
+
+typedef struct _ASIC_ENCODER_INFO
+{
+	UCHAR ucEncoderID;
+	UCHAR ucEncoderConfig;
+  USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+	ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+	ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary. 
+	ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+	ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
+
+
+typedef struct _ATOM_DISP_CLOCK_ID {
+  UCHAR ucPpllId; 
+  UCHAR ucPpllAttribute;
+}ATOM_DISP_CLOCK_ID;
+
+// ucPpllAttribute
+#define CLOCK_SOURCE_SHAREABLE            0x01
+#define CLOCK_SOURCE_DP_MODE              0x02
+#define CLOCK_SOURCE_NONE_DP_MODE         0x04
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO_V2
+{
+	USHORT usTransmitterObjId;
+	USHORT usDispClkIdOffset;    // point to clock source id list supported by Encoder Object
+  UCHAR  ucTransmitterCmdTblId;
+	UCHAR  ucConfig;
+	UCHAR  ucEncoderID;					 // available 1st encoder ( default )
+	UCHAR  ucOptionEncoderID;    // available 2nd encoder ( optional )
+	UCHAR  uc2ndEncoderID;
+	UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO_V2;
+
+typedef struct _ATOM_DISP_OUT_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary. 
+  USHORT usReserved;
+  UCHAR  ucDCERevision;   
+  UCHAR  ucMaxDispEngineNum;
+  UCHAR  ucMaxActiveDispEngineNum;
+  UCHAR  ucMaxPPLLNum;
+  UCHAR  ucCoreRefClkSource;                    // value of CORE_REF_CLK_SOURCE
+  UCHAR  ucDispCaps;
+  UCHAR  ucReserved[2];
+  ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
+}ATOM_DISP_OUT_INFO_V3;
+
+//ucDispCaps
+#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL        0x01
+#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED  0x02
+
+typedef enum CORE_REF_CLK_SOURCE{
+  CLOCK_SRC_XTALIN=0,
+  CLOCK_SRC_XO_IN=1,
+  CLOCK_SRC_XO_IN2=2,
+}CORE_REF_CLK_SOURCE;
+
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT asDevicePriority[16];
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+	USHORT	lpAuxRequest;
+	USHORT  lpDataOut;
+	UCHAR		ucChannelID;
+	union
+	{
+  UCHAR   ucReplyStatus;
+	UCHAR   ucDelay;
+	};
+  UCHAR   ucDataOutLen;
+	UCHAR   ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+	USHORT	lpAuxRequest;
+	USHORT  lpDataOut;
+	UCHAR		ucChannelID;
+	union
+	{
+  UCHAR   ucReplyStatus;
+	UCHAR   ucDelay;
+	};
+  UCHAR   ucDataOutLen;
+	UCHAR   ucHPD_ID;                                       //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
+
+#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION			PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+
+//GetSinkType
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
+	USHORT ucLinkClock;
+	union 
+	{
+	UCHAR ucConfig;				// for DP training command
+	UCHAR ucI2cId;				// use for GET_SINK_TYPE command
+	};
+	UCHAR ucAction;
+	UCHAR ucStatus;
+	UCHAR ucLaneNum;
+	UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS;
+
+// ucAction
+#define ATOM_DP_ACTION_GET_SINK_TYPE							0x01
+/* obselete */
+#define ATOM_DP_ACTION_TRAINING_START							0x02
+#define ATOM_DP_ACTION_TRAINING_COMPLETE					0x03
+#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL				0x04
+#define ATOM_DP_ACTION_SET_VSWING_PREEMP					0x05
+#define ATOM_DP_ACTION_GET_VSWING_PREEMP					0x06
+#define ATOM_DP_ACTION_BLANKING                   0x07
+
+// ucConfig
+#define ATOM_DP_CONFIG_ENCODER_SEL_MASK						0x03
+#define ATOM_DP_CONFIG_DIG1_ENCODER								0x00
+#define ATOM_DP_CONFIG_DIG2_ENCODER								0x01
+#define ATOM_DP_CONFIG_EXTERNAL_ENCODER						0x02
+#define ATOM_DP_CONFIG_LINK_SEL_MASK							0x04
+#define ATOM_DP_CONFIG_LINK_A											0x00
+#define ATOM_DP_CONFIG_LINK_B											0x04
+/* /obselete */
+#define DP_ENCODER_SERVICE_PS_ALLOCATION				WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+	USHORT usExtEncoderObjId;   // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+  UCHAR  ucAuxId;
+  UCHAR  ucAction;
+  UCHAR  ucSinkType;          // Iput and Output parameters. 
+  UCHAR  ucHPDId;             // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+	UCHAR  ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+  DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE							0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION			    0x02
+
+
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR				ATOM_DP_TRAINING_TBL_ADDR		
+#define DPCD_SET_SS_CNTL_TBL_ADDR													(ATOM_DP_TRAINING_TBL_ADDR + 8 )
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 24 )
+#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 32)
+#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 40)
+#define	DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 48)
+#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 60)
+#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR											(ATOM_DP_TRAINING_TBL_ADDR + 64)
+#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 72)
+#define DP_I2C_AUX_DDC_READ_TBL_ADDR											(ATOM_DP_TRAINING_TBL_ADDR + 76)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR                 (ATOM_DP_TRAINING_TBL_ADDR + 80) 
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR									(ATOM_DP_TRAINING_TBL_ADDR + 84)
+
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+	UCHAR   ucI2CSpeed;
+ 	union
+	{
+   UCHAR ucRegIndex;
+   UCHAR ucStatus;
+	};
+	USHORT  lpI2CDataOut;
+  UCHAR   ucFlag;               
+  UCHAR   ucTransBytes;
+  UCHAR   ucSlaveAddr;
+  UCHAR   ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION       PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+
+//ucFlag
+#define HW_I2C_WRITE        1
+#define HW_I2C_READ         0
+#define I2C_2BYTE_ADDR      0x02
+
+/****************************************************************************/	
+// Structures used by HW_Misc_OperationTable
+/****************************************************************************/	
+typedef struct  _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucCmd;                //  Input: To tell which action to take
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1; 
+
+typedef struct  _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucReturnCode;        // Output: Return value base on action was taken
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
+
+// Actions code
+#define  ATOM_GET_SDI_SUPPORT              0xF0
+
+// Return code 
+#define  ATOM_UNKNOWN_CMD                   0
+#define  ATOM_FEATURE_NOT_SUPPORTED         1
+#define  ATOM_FEATURE_SUPPORTED             2
+
+typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
+{
+	ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1        sInput_Output;
+	PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS         sReserved; 
+}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
+
+/****************************************************************************/	
+
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+   UCHAR ucHWBlkInst;                // HW block instance, 0, 1, 2, ...
+   UCHAR ucReserved[3]; 
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK       0x07
+#define HWBLKINST_HWBLK_MASK          0xF0
+#define HWBLKINST_HWBLK_SHIFT         0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE            0
+#define SELECT_DISP_PLL               1
+#define SELECT_DCIO_UNIPHY_LINK0      2
+#define SELECT_DCIO_UNIPHY_LINK1      3
+#define SELECT_DCIO_IMPCAL            4
+#define SELECT_DCIO_DIG               6
+#define SELECT_CRTC_PIXEL_RATE        7
+#define SELECT_VGA_BLK                8
+
+// DIGTransmitterInfoTable structure used to program UNIPHY settings 
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{  
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock 
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info 
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info 
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_1;
+
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{  
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock 
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info 
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info 
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+  USHORT usDPSSRegListOffset;            // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info 
+  USHORT usDPSSSettingOffset;            // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_2;
+
+typedef struct _CLOCK_CONDITION_REGESTER_INFO{
+  USHORT usRegisterIndex;
+  UCHAR  ucStartBit;
+  UCHAR  ucEndBit;
+}CLOCK_CONDITION_REGESTER_INFO;
+
+typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
+  USHORT usMaxClockFreq;
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  ULONG  ulAnalogSetting[1];
+}CLOCK_CONDITION_SETTING_ENTRY;
+
+typedef struct _CLOCK_CONDITION_SETTING_INFO{
+  USHORT usEntrySize;
+  CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+}CLOCK_CONDITION_SETTING_INFO;
+
+typedef struct _PHY_CONDITION_REG_VAL{
+  ULONG  ulCondition;
+  ULONG  ulRegVal;
+}PHY_CONDITION_REG_VAL;
+
+typedef struct _PHY_CONDITION_REG_VAL_V2{
+  ULONG  ulCondition;
+  UCHAR  ucCondition2;
+  ULONG  ulRegVal;
+}PHY_CONDITION_REG_VAL_V2;
+
+typedef struct _PHY_CONDITION_REG_INFO{
+  USHORT usRegIndex;
+  USHORT usSize;
+  PHY_CONDITION_REG_VAL asRegVal[1];
+}PHY_CONDITION_REG_INFO;
+
+typedef struct _PHY_CONDITION_REG_INFO_V2{
+  USHORT usRegIndex;
+  USHORT usSize;
+  PHY_CONDITION_REG_VAL_V2 asRegVal[1];
+}PHY_CONDITION_REG_INFO_V2;
+
+typedef struct _PHY_ANALOG_SETTING_INFO{
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  USHORT usSize;
+  PHY_CONDITION_REG_INFO  asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO;
+
+typedef struct _PHY_ANALOG_SETTING_INFO_V2{
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  USHORT usSize;
+  PHY_CONDITION_REG_INFO_V2  asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO_V2;
+
+typedef struct _GFX_HAVESTING_PARAMETERS {
+  UCHAR ucGfxBlkId;                        //GFX blk id to be harvested, like CU, RB or PRIM
+  UCHAR ucReserved;                        //reserved 
+  UCHAR ucActiveUnitNumPerSH;              //requested active CU/RB/PRIM number per shader array
+  UCHAR ucMaxUnitNumPerSH;                 //max CU/RB/PRIM number per shader array   
+} GFX_HAVESTING_PARAMETERS;
+
+//ucGfxBlkId
+#define GFX_HARVESTING_CU_ID               0
+#define GFX_HARVESTING_RB_ID               1
+#define GFX_HARVESTING_PRIM_ID             2
+
+/****************************************************************************/	
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
+/****************************************************************************/
+
+#define MC_MISC0__MEMORY_TYPE_MASK    0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1  0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2   0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3  0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4  0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5  0x50000000
+#define MC_MISC0__MEMORY_TYPE__HBM    0x60000000
+#define MC_MISC0__MEMORY_TYPE__DDR3   0xB0000000
+
+#define ATOM_MEM_TYPE_DDR_STRING      "DDR"
+#define ATOM_MEM_TYPE_DDR2_STRING     "DDR2"
+#define ATOM_MEM_TYPE_GDDR3_STRING    "GDDR3"
+#define ATOM_MEM_TYPE_GDDR4_STRING    "GDDR4"
+#define ATOM_MEM_TYPE_GDDR5_STRING    "GDDR5"
+#define ATOM_MEM_TYPE_HBM_STRING      "HBM"
+#define ATOM_MEM_TYPE_DDR3_STRING     "DDR3"
+
+/****************************************************************************/	
+//Portion VI: Definitinos being oboselete
+/****************************************************************************/
+
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT                   usMaxFrequency;      // in 10kHz unit
+  USHORT                   usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct  _COMPASSIONATE_DATA           
+{
+  ATOM_COMMON_TABLE_HEADER sHeader; 
+
+  //==============================  DAC1 portion
+  UCHAR   ucDAC1_BG_Adjustment;
+  UCHAR   ucDAC1_DAC_Adjustment;
+  USHORT  usDAC1_FORCE_Data;
+  //==============================  DAC2 portion
+  UCHAR   ucDAC2_CRT2_BG_Adjustment;
+  UCHAR   ucDAC2_CRT2_DAC_Adjustment;
+  USHORT  usDAC2_CRT2_FORCE_Data;
+  USHORT  usDAC2_CRT2_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CRT2_MUX_RegisterInfo;     //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_NTSC_BG_Adjustment;
+  UCHAR   ucDAC2_NTSC_DAC_Adjustment;
+  USHORT  usDAC2_TV1_FORCE_Data;
+  USHORT  usDAC2_TV1_MUX_RegisterIndex;
+  UCHAR   ucDAC2_TV1_MUX_RegisterInfo;      //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_CV_BG_Adjustment;
+  UCHAR   ucDAC2_CV_DAC_Adjustment;
+  USHORT  usDAC2_CV_FORCE_Data;
+  USHORT  usDAC2_CV_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CV_MUX_RegisterInfo;       //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_PAL_BG_Adjustment;
+  UCHAR   ucDAC2_PAL_DAC_Adjustment;
+  USHORT  usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
+
+/****************************Supported Device Info Table Definitions**********************/
+//  ucConnectInfo:
+//    [7:4] - connector type
+//      = 1   - VGA connector   
+//      = 2   - DVI-I
+//      = 3   - DVI-D
+//      = 4   - DVI-A
+//      = 5   - SVIDEO
+//      = 6   - COMPOSITE
+//      = 7   - LVDS
+//      = 8   - DIGITAL LINK
+//      = 9   - SCART
+//      = 0xA - HDMI_type A
+//      = 0xB - HDMI_type B
+//      = 0xE - Special case1 (DVI+DIN)
+//      Others=TBD
+//    [3:0] - DAC Associated
+//      = 0   - no DAC
+//      = 1   - DACA
+//      = 2   - DACB
+//      = 3   - External DAC
+//      Others=TBD
+//    
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfConnectorType:4;
+  UCHAR   bfAssociatedDAC:4;
+#else
+  UCHAR   bfAssociatedDAC:4;
+  UCHAR   bfConnectorType:4;
+#endif
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+  ATOM_CONNECTOR_INFO sbfAccess;
+  UCHAR               ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
+
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+  ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
+
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C   asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
+
+#define NO_INT_SRC_MAPPED       0xFF
+
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+  UCHAR   ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{ 
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{ 
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
+
+#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
+
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+   USHORT usFrequency;
+   UCHAR  ucPLL_ChargePump;				                // PLL charge-pump gain control
+   UCHAR  ucPLL_DutyCycle;				                // PLL duty cycle control
+   UCHAR  ucPLL_VCO_Gain;				                  // PLL VCO gain control
+   UCHAR  ucPLL_VoltageSwing;			                // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;  
+
+
+#define ATOM_MAX_MISC_INFO       4
+
+typedef struct _ATOM_TMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT							usMaxFrequency;             // in 10Khz
+  ATOM_MISC_CONTROL_INFO				asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
+
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+  UCHAR ucTVStandard;     //Same as TV standards defined above, 
+  UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
+
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+  UCHAR ucAttribute;      //Same as other digital encoder attributes defined above
+  UCHAR ucPadding[1];		
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+  ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+  ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
+
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock; 
+  USHORT usEncoderID; 
+  UCHAR  ucDeviceType;												//Use ATOM_DEVICE_xxx1_Index to indicate device type only.	
+  UCHAR  ucAction;														//ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  ATOM_ENCODER_ATTRIBUTE usDevAttr;     		
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{                               
+  DVO_ENCODER_CONTROL_PARAMETERS    sDVOEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
+
+
+#define ATOM_XTMDS_ASIC_SI164_ID        1
+#define ATOM_XTMDS_ASIC_SI178_ID        2
+#define ATOM_XTMDS_ASIC_TFP513_ID       3
+#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
+#define ATOM_XTMDS_SUPPORTED_DUALLINK   0x00000002
+#define ATOM_XTMDS_MVPU_FPGA            0x00000004
+
+                           
+typedef struct _ATOM_XTMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;  
+  USHORT                     usSingleLinkMaxFrequency; 
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;           //Point the ID on which I2C is used to control external chip
+  UCHAR                      ucXtransimitterID;          
+  UCHAR                      ucSupportedLink;    // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+  UCHAR                      ucSequnceAlterID;   // Even with the same external TMDS asic, it's possible that the program seqence alters 
+                                                 // due to design. This ID is used to alert driver that the sequence is not "standard"!              
+  UCHAR                      ucMasterAddress;    // Address to control Master xTMDS Chip
+  UCHAR                      ucSlaveAddress;     // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{  
+  UCHAR ucEnable;                     // ATOM_ENABLE=On or ATOM_DISABLE=Off
+  UCHAR ucDevice;                     // ATOM_DEVICE_DFP1_INDEX....
+  UCHAR ucPadding[2];             
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+/****************************Legacy Power Play Table Definitions **********************/
+
+//Definitions for ulPowerPlayMiscInfo
+#define ATOM_PM_MISCINFO_SPLIT_CLOCK                     0x00000000L
+#define ATOM_PM_MISCINFO_USING_MCLK_SRC                  0x00000001L
+#define ATOM_PM_MISCINFO_USING_SCLK_SRC                  0x00000002L
+
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT            0x00000004L
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH        0x00000008L
+
+#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN             0x00000010L
+
+#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN          0x00000020L
+#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN          0x00000040L
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE                 0x00000080L  //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program  
+ 
+#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN      0x00000100L
+#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN         0x00000200L
+#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN              0x00000400L
+#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN                 0x00000800L
+#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE     0x00001000L
+#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
+#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE            0x00004000L
+
+#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE             0x00008000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE                 0x00010000L 
+#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE                 0x00020000L
+#define ATOM_PM_MISCINFO_POWER_SAVING_MODE               0x00040000L
+#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE              0x00080000L
+
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK           0x00300000L  //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT          20 
+
+#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE                 0x00400000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2      0x00800000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4      0x01000000L
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN            0x02000000L  //When set, Dynamic 
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN        0x04000000L  //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN              0x08000000L  //When set, This mode is for acceleated 3D mode
+
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK   0x70000000L  //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) 
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT  28
+#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS                0x80000000L
+
+#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE            0x00000001L
+#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT          0x00000002L
+#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN           0x00000004L
+#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO            0x00000008L
+#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE              0x00000010L
+#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN       0x00000020L
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE         0x00000040L  //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. 
+                                                                      //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
+#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC                0x00000080L
+#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN                0x00000100L
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE               0x00000200L 
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulReserved1;                // must set to 0
+  ULONG     ulReserved2;                // must set to 0
+  USHORT    usEngineClock;
+  USHORT    usMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO_V2
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;                
+  ULONG     ulEngineClock;                
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct  _ATOM_POWERMODE_INFO_V3
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;                
+  ULONG     ulEngineClock;                
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to Core (VDDC) votage table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+  UCHAR     ucVDDCI_VoltageDropIndex;   // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
+
+#define ATOM_MAX_NUMBEROF_POWER_BLOCK  8
+
+#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN            0x01
+#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE         0x02
+
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63      0x01
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032   0x02
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030   0x03
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649   0x04
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64      0x05
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375    0x06
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512   0x07	// Andigilog
+
+
+typedef struct  _ATOM_POWERPLAY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct  _ATOM_POWERPLAY_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+  
+typedef struct  _ATOM_POWERPLAY_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
+
+
+// Following definitions are for compatibility issue in different SW components. 
+#define ATOM_MASTER_DATA_TABLE_REVISION   0x01
+#define Object_Info												Object_Header			
+#define	AdjustARB_SEQ											MC_InitParameter
+#define	VRAM_GPIO_DetectionInfo						VoltageObjectInfo
+#define	ASIC_VDDCI_Info                   ASIC_ProfilingInfo														
+#define ASIC_MVDDQ_Info										MemoryTrainingInfo
+#define SS_Info                           PPLL_SS_Info                      
+#define ASIC_MVDDC_Info                   ASIC_InternalSS_Info
+#define DispDevicePriorityInfo						SaveRestoreInfo
+#define DispOutInfo												TV_VideoMode
+
+
+#define ATOM_ENCODER_OBJECT_TABLE         ATOM_OBJECT_TABLE
+#define ATOM_CONNECTOR_OBJECT_TABLE       ATOM_OBJECT_TABLE
+
+//New device naming, remove them when both DAL/VBIOS is ready
+#define DFP2I_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1X_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1I_OUTPUT_CONTROL_PARAMETERS    DFP1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define ATOM_DEVICE_DFP1I_SUPPORT          ATOM_DEVICE_DFP1_SUPPORT
+#define ATOM_DEVICE_DFP1X_SUPPORT          ATOM_DEVICE_DFP2_SUPPORT
+
+#define ATOM_DEVICE_DFP1I_INDEX            ATOM_DEVICE_DFP1_INDEX
+#define ATOM_DEVICE_DFP1X_INDEX            ATOM_DEVICE_DFP2_INDEX
+ 
+#define ATOM_DEVICE_DFP2I_INDEX            0x00000009
+#define ATOM_DEVICE_DFP2I_SUPPORT          (0x1L << ATOM_DEVICE_DFP2I_INDEX)
+
+#define ATOM_S0_DFP1I                      ATOM_S0_DFP1
+#define ATOM_S0_DFP1X                      ATOM_S0_DFP2
+
+#define ATOM_S0_DFP2I                      0x00200000L
+#define ATOM_S0_DFP2Ib2                    0x20
+
+#define ATOM_S2_DFP1I_DPMS_STATE           ATOM_S2_DFP1_DPMS_STATE
+#define ATOM_S2_DFP1X_DPMS_STATE           ATOM_S2_DFP2_DPMS_STATE
+
+#define ATOM_S2_DFP2I_DPMS_STATE           0x02000000L
+#define ATOM_S2_DFP2I_DPMS_STATEb3         0x02
+
+#define ATOM_S3_DFP2I_ACTIVEb1             0x02
+
+#define ATOM_S3_DFP1I_ACTIVE               ATOM_S3_DFP1_ACTIVE 
+#define ATOM_S3_DFP1X_ACTIVE               ATOM_S3_DFP2_ACTIVE
+
+#define ATOM_S3_DFP2I_ACTIVE               0x00000200L
+
+#define ATOM_S3_DFP1I_CRTC_ACTIVE          ATOM_S3_DFP1_CRTC_ACTIVE
+#define ATOM_S3_DFP1X_CRTC_ACTIVE          ATOM_S3_DFP2_CRTC_ACTIVE
+#define ATOM_S3_DFP2I_CRTC_ACTIVE          0x02000000L
+
+#define ATOM_S3_DFP2I_CRTC_ACTIVEb3        0x02
+#define ATOM_S5_DOS_REQ_DFP2Ib1            0x02
+
+#define ATOM_S5_DOS_REQ_DFP2I              0x0200
+#define ATOM_S6_ACC_REQ_DFP1I              ATOM_S6_ACC_REQ_DFP1
+#define ATOM_S6_ACC_REQ_DFP1X              ATOM_S6_ACC_REQ_DFP2
+
+#define ATOM_S6_ACC_REQ_DFP2Ib3            0x02
+#define ATOM_S6_ACC_REQ_DFP2I              0x02000000L
+
+#define TMDS1XEncoderControl               DVOEncoderControl           
+#define DFP1XOutputControl                 DVOOutputControl
+
+#define ExternalDFPOutputControl           DFP1XOutputControl
+#define EnableExternalTMDS_Encoder         TMDS1XEncoderControl
+
+#define DFP1IOutputControl                 TMDSAOutputControl
+#define DFP2IOutputControl                 LVTMAOutputControl      
+
+#define DAC1_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define DAC2_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define ucDac1Standard  ucDacStandard
+#define ucDac2Standard  ucDacStandard  
+
+#define TMDS1EncoderControl TMDSAEncoderControl
+#define TMDS2EncoderControl LVTMAEncoderControl
+
+#define DFP1OutputControl   TMDSAOutputControl
+#define DFP2OutputControl   LVTMAOutputControl
+#define CRT1OutputControl   DAC1OutputControl
+#define CRT2OutputControl   DAC2OutputControl
+
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
+#define EnableLVDS_SS   EnableSpreadSpectrumOnPPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3  ENABLE_SPREAD_SPECTRUM_ON_PPLL  
+
+//#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+//#define ATOM_S2_LCD1_DPMS_STATE	        ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_TV1_DPMS_STATE          ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_DFP1_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_CRT2_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2             0x00400000L
+#define ATOM_DEVICE_TV2_INDEX           0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT         (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2                     0x00100000L
+#define ATOM_S3_TV2_ACTIVE              ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE         ATOM_S3_DFP6_CRTC_ACTIVE
+
+//
+#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE	        0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE          0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE         0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE         0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE         0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE          0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE         0x00800000L
+#define ATOM_S2_CV_DPMS_STATE           0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE					0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE					0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE					0x08000000L
+
+#define ATOM_S2_CRT1_DPMS_STATEb2       0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2       0x02
+#define ATOM_S2_TV1_DPMS_STATEb2        0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2       0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2       0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2       0x20
+#define ATOM_S2_TV2_DPMS_STATEb2        0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2       0x80
+#define ATOM_S2_CV_DPMS_STATEb3         0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3				0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3				0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3				0x08
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3	0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3  0x80
+
+/*********************************************************************************/
+
+#pragma pack() // BIOS data must use byte alignment
+
+//
+// AMD ACPI Table
+//
+#pragma pack(1)
+
+typedef struct {
+  ULONG Signature;
+  ULONG TableLength;      //Length
+  UCHAR Revision;
+  UCHAR Checksum;
+  UCHAR OemId[6];
+  UCHAR OemTableId[8];    //UINT64  OemTableId;
+  ULONG OemRevision;
+  ULONG CreatorId;
+  ULONG CreatorRevision;
+} AMD_ACPI_DESCRIPTION_HEADER;
+/*
+//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
+typedef struct {
+  UINT32  Signature;       //0x0
+  UINT32  Length;          //0x4
+  UINT8   Revision;        //0x8
+  UINT8   Checksum;        //0x9
+  UINT8   OemId[6];        //0xA
+  UINT64  OemTableId;      //0x10
+  UINT32  OemRevision;     //0x18
+  UINT32  CreatorId;       //0x1C
+  UINT32  CreatorRevision; //0x20
+}EFI_ACPI_DESCRIPTION_HEADER;
+*/
+typedef struct {
+  AMD_ACPI_DESCRIPTION_HEADER SHeader;
+  UCHAR TableUUID[16];    //0x24
+  ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
+  ULONG Lib1ImageOffset;  //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
+  ULONG Reserved[4];      //0x3C
+}UEFI_ACPI_VFCT;
+
+typedef struct {
+  ULONG  PCIBus;          //0x4C
+  ULONG  PCIDevice;       //0x50
+  ULONG  PCIFunction;     //0x54
+  USHORT VendorID;        //0x58
+  USHORT DeviceID;        //0x5A
+  USHORT SSVID;           //0x5C
+  USHORT SSID;            //0x5E
+  ULONG  Revision;        //0x60
+  ULONG  ImageLength;     //0x64
+}VFCT_IMAGE_HEADER;
+
+
+typedef struct {
+  VFCT_IMAGE_HEADER	VbiosHeader;
+  UCHAR	VbiosContent[1];
+}GOP_VBIOS_CONTENT;
+
+typedef struct {
+  VFCT_IMAGE_HEADER	Lib1Header;
+  UCHAR	Lib1Content[1];
+}GOP_LIB1_CONTENT;
+
+#pragma pack()
+
+
+#endif /* _ATOMBIOS_H */
+
+#include "pptable.h"
+
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
new file mode 100644
index 0000000..efbd581
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -0,0 +1,2248 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/radeon_drm.h>
+#include <drm/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+#include "atom-bits.h"
+
+static void atombios_overscan_setup(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	SET_CRTC_OVERSCAN_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
+	int a1, a2;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	switch (radeon_crtc->rmx_type) {
+	case RMX_CENTER:
+		args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		break;
+	case RMX_ASPECT:
+		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
+		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
+
+		if (a1 > a2) {
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+		} else if (a2 > a1) {
+			args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+			args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+		}
+		break;
+	case RMX_FULL:
+	default:
+		args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+		args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
+		break;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_scaler_setup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	ENABLE_SCALER_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	/* fixme - fill in enc_priv for atom dac */
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+	bool is_tv = false, is_cv = false;
+
+	if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
+		return;
+
+	if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		tv_std = tv_dac->tv_std;
+		is_tv = true;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucScaler = radeon_crtc->crtc_id;
+
+	if (is_tv) {
+		switch (tv_std) {
+		case TV_STD_NTSC:
+		default:
+			args.ucTVStandard = ATOM_TV_NTSC;
+			break;
+		case TV_STD_PAL:
+			args.ucTVStandard = ATOM_TV_PAL;
+			break;
+		case TV_STD_PAL_M:
+			args.ucTVStandard = ATOM_TV_PALM;
+			break;
+		case TV_STD_PAL_60:
+			args.ucTVStandard = ATOM_TV_PAL60;
+			break;
+		case TV_STD_NTSC_J:
+			args.ucTVStandard = ATOM_TV_NTSCJ;
+			break;
+		case TV_STD_SCART_PAL:
+			args.ucTVStandard = ATOM_TV_PAL; /* ??? */
+			break;
+		case TV_STD_SECAM:
+			args.ucTVStandard = ATOM_TV_SECAM;
+			break;
+		case TV_STD_PAL_CN:
+			args.ucTVStandard = ATOM_TV_PALCN;
+			break;
+		}
+		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+	} else if (is_cv) {
+		args.ucTVStandard = ATOM_TV_CV;
+		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+	} else {
+		switch (radeon_crtc->rmx_type) {
+		case RMX_FULL:
+			args.ucEnable = ATOM_SCALER_EXPANSION;
+			break;
+		case RMX_CENTER:
+			args.ucEnable = ATOM_SCALER_CENTER;
+			break;
+		case RMX_ASPECT:
+			args.ucEnable = ATOM_SCALER_EXPANSION;
+			break;
+		default:
+			if (ASIC_IS_AVIVO(rdev))
+				args.ucEnable = ATOM_SCALER_DISABLE;
+			else
+				args.ucEnable = ATOM_SCALER_CENTER;
+			break;
+		}
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	if ((is_tv || is_cv)
+	    && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
+		atom_rv515_force_tv_scaler(rdev, radeon_crtc);
+	}
+}
+
+static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index =
+	    GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = lock;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static const u32 vga_control_regs[6] =
+{
+	AVIVO_D1VGA_CONTROL,
+	AVIVO_D2VGA_CONTROL,
+	EVERGREEN_D3VGA_CONTROL,
+	EVERGREEN_D4VGA_CONTROL,
+	EVERGREEN_D5VGA_CONTROL,
+	EVERGREEN_D6VGA_CONTROL,
+};
+
+static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+	BLANK_CRTC_PS_ALLOCATION args;
+	u32 vga_control = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE8(rdev)) {
+		vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
+		WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
+	}
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucBlanking = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	if (ASIC_IS_DCE8(rdev)) {
+		WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
+	}
+}
+
+static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
+	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucDispPipeId = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		atombios_enable_crtc(crtc, ATOM_ENABLE);
+		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+		atombios_blank_crtc(crtc, ATOM_DISABLE);
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_crtc_vblank_on(crtc);
+		radeon_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_crtc_vblank_off(crtc);
+		if (radeon_crtc->enabled)
+			atombios_blank_crtc(crtc, ATOM_ENABLE);
+		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+		atombios_enable_crtc(crtc, ATOM_DISABLE);
+		radeon_crtc->enabled = false;
+		break;
+	}
+	/* adjust pm to dpms */
+	radeon_pm_compute_clocks(rdev);
+}
+
+static void
+atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+			     struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+	u16 misc = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
+	args.usH_Blanking_Time =
+		cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
+	args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
+	args.usV_Blanking_Time =
+		cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
+	args.usH_SyncOffset =
+		cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
+	args.usH_SyncWidth =
+		cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+	args.usV_SyncOffset =
+		cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
+	args.usV_SyncWidth =
+		cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+	args.ucH_Border = radeon_crtc->h_border;
+	args.ucV_Border = radeon_crtc->v_border;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= ATOM_VSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= ATOM_HSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_CSYNC)
+		misc |= ATOM_COMPOSITESYNC;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		misc |= ATOM_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		misc |= ATOM_DOUBLE_CLOCK_MODE;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+				     struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+	u16 misc = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.usH_Total = cpu_to_le16(mode->crtc_htotal);
+	args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
+	args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
+	args.usH_SyncWidth =
+		cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+	args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
+	args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
+	args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
+	args.usV_SyncWidth =
+		cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+	args.ucOverscanRight = radeon_crtc->h_border;
+	args.ucOverscanLeft = radeon_crtc->h_border;
+	args.ucOverscanBottom = radeon_crtc->v_border;
+	args.ucOverscanTop = radeon_crtc->v_border;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= ATOM_VSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= ATOM_HSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_CSYNC)
+		misc |= ATOM_COMPOSITESYNC;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		misc |= ATOM_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		misc |= ATOM_DOUBLE_CLOCK_MODE;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
+{
+	u32 ss_cntl;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	}
+}
+
+
+union atom_enable_ss {
+	ENABLE_LVDS_SS_PARAMETERS lvds_ss;
+	ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
+};
+
+static void atombios_crtc_program_ss(struct radeon_device *rdev,
+				     int enable,
+				     int pll_id,
+				     int crtc_id,
+				     struct radeon_atom_ss *ss)
+{
+	unsigned i;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+	union atom_enable_ss args;
+
+	if (enable) {
+		/* Don't mess with SS if percentage is 0 or external ss.
+		 * SS is already disabled previously, and disabling it
+		 * again can cause display problems if the pll is already
+		 * programmed.
+		 */
+		if (ss->percentage == 0)
+			return;
+		if (ss->type & ATOM_EXTERNAL_SS_MASK)
+			return;
+	} else {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->mode_info.crtcs[i] &&
+			    rdev->mode_info.crtcs[i]->enabled &&
+			    i != crtc_id &&
+			    pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+				/* one other crtc is using this pll don't turn
+				 * off spread spectrum as it might turn off
+				 * display on active crtc
+				 */
+				return;
+			}
+		}
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE5(rdev)) {
+		args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
+		args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+			break;
+		case ATOM_PPLL2:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+			break;
+		case ATOM_DCPLL:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+		args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+		args.v3.ucEnable = enable;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
+			break;
+		case ATOM_PPLL2:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
+			break;
+		case ATOM_DCPLL:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+		args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+		args.v2.ucEnable = enable;
+	} else if (ASIC_IS_DCE3(rdev)) {
+		args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.v1.ucSpreadSpectrumStep = ss->step;
+		args.v1.ucSpreadSpectrumDelay = ss->delay;
+		args.v1.ucSpreadSpectrumRange = ss->range;
+		args.v1.ucPpll = pll_id;
+		args.v1.ucEnable = enable;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+			atombios_disable_ss(rdev, pll_id);
+			return;
+		}
+		args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
+		args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
+		args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
+		args.lvds_ss_2.ucEnable = enable;
+	} else {
+		if (enable == ATOM_DISABLE) {
+			atombios_disable_ss(rdev, pll_id);
+			return;
+		}
+		args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
+		args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
+		args.lvds_ss.ucEnable = enable;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union adjust_pixel_clock {
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
+};
+
+static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder = radeon_crtc->encoder;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	u32 adjusted_clock = mode->clock;
+	int encoder_mode = atombios_get_encoder_mode(encoder);
+	u32 dp_clock = mode->clock;
+	u32 clock = mode->clock;
+	int bpc = radeon_crtc->bpc;
+	bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
+
+	/* reset the pll flags */
+	radeon_crtc->pll_flags = 0;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		if ((rdev->family == CHIP_RS600) ||
+		    (rdev->family == CHIP_RS690) ||
+		    (rdev->family == CHIP_RS740))
+			radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
+				RADEON_PLL_PREFER_CLOSEST_LOWER);
+
+		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+		else
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+		if (rdev->family < CHIP_RV770)
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+		/* use frac fb div on APUs */
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+		/* use frac fb div on RS780/RS880 */
+		if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+		    && !radeon_crtc->ss_enabled)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+		if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+	} else {
+		radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
+
+		if (mode->clock > 200000)	/* range limits??? */
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+		else
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+	}
+
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+		if (connector) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			struct radeon_connector_atom_dig *dig_connector =
+				radeon_connector->con_priv;
+
+			dp_clock = dig_connector->dp_clock;
+		}
+	}
+
+	if (radeon_encoder->is_mst_encoder) {
+		struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
+		struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+	}
+
+	/* use recommended ref_div for ss */
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (radeon_crtc->ss_enabled) {
+			if (radeon_crtc->ss.refdiv) {
+				radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+				radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
+				if (ASIC_IS_AVIVO(rdev) &&
+				    rdev->family != CHIP_RS780 &&
+				    rdev->family != CHIP_RS880)
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+			}
+		}
+	}
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+			adjusted_clock = mode->clock * 2;
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
+	} else {
+		if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+			radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+		if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+	}
+
+	/* adjust pll for deep color modes */
+	if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+		switch (bpc) {
+		case 8:
+		default:
+			break;
+		case 10:
+			clock = (clock * 5) / 4;
+			break;
+		case 12:
+			clock = (clock * 3) / 2;
+			break;
+		case 16:
+			clock = clock * 2;
+			break;
+		}
+	}
+
+	/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
+	 * accordingly based on the encoder/transmitter to work around
+	 * special hw requirements.
+	 */
+	if (ASIC_IS_DCE3(rdev)) {
+		union adjust_pixel_clock args;
+		u8 frev, crev;
+		int index;
+
+		index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+					   &crev))
+			return adjusted_clock;
+
+		memset(&args, 0, sizeof(args));
+
+		switch (frev) {
+		case 1:
+			switch (crev) {
+			case 1:
+			case 2:
+				args.v1.usPixelClock = cpu_to_le16(clock / 10);
+				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
+				args.v1.ucEncodeMode = encoder_mode;
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+					args.v1.ucConfig |=
+						ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+
+				atom_execute_table(rdev->mode_info.atom_context,
+						   index, (uint32_t *)&args);
+				adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+				break;
+			case 3:
+				args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
+				args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+				args.v3.sInput.ucEncodeMode = encoder_mode;
+				args.v3.sInput.ucDispPllConfig = 0;
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_SS_ENABLE;
+				if (ENCODER_MODE_IS_DP(encoder_mode)) {
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_COHERENT_MODE;
+					/* 16200 or 27000 */
+					args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+				} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+					if (dig->coherent_mode)
+						args.v3.sInput.ucDispPllConfig |=
+							DISPPLL_CONFIG_COHERENT_MODE;
+					if (is_duallink)
+						args.v3.sInput.ucDispPllConfig |=
+							DISPPLL_CONFIG_DUAL_LINK;
+				}
+				if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+				    ENCODER_OBJECT_ID_NONE)
+					args.v3.sInput.ucExtTransmitterID =
+						radeon_encoder_get_dp_bridge_encoder_id(encoder);
+				else
+					args.v3.sInput.ucExtTransmitterID = 0;
+
+				atom_execute_table(rdev->mode_info.atom_context,
+						   index, (uint32_t *)&args);
+				adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+				if (args.v3.sOutput.ucRefDiv) {
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+					radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
+				}
+				if (args.v3.sOutput.ucPostDiv) {
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
+					radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
+				}
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+				return adjusted_clock;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return adjusted_clock;
+		}
+	}
+	return adjusted_clock;
+}
+
+union set_pixel_clock {
+	SET_PIXEL_CLOCK_PS_ALLOCATION base;
+	PIXEL_CLOCK_PARAMETERS v1;
+	PIXEL_CLOCK_PARAMETERS_V2 v2;
+	PIXEL_CLOCK_PARAMETERS_V3 v3;
+	PIXEL_CLOCK_PARAMETERS_V5 v5;
+	PIXEL_CLOCK_PARAMETERS_V6 v6;
+};
+
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
+				    u32 dispclk)
+{
+	u8 frev, crev;
+	int index;
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 5:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v5.ucCRTC = ATOM_CRTC_INVALID;
+			args.v5.usPixelClock = cpu_to_le16(dispclk);
+			args.v5.ucPpll = ATOM_DCPLL;
+			break;
+		case 6:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+			if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
+				args.v6.ucPpll = ATOM_EXT_PLL1;
+			else if (ASIC_IS_DCE6(rdev))
+				args.v6.ucPpll = ATOM_PPLL0;
+			else
+				args.v6.ucPpll = ATOM_DCPLL;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+				      u32 crtc_id,
+				      int pll_id,
+				      u32 encoder_mode,
+				      u32 encoder_id,
+				      u32 clock,
+				      u32 ref_div,
+				      u32 fb_div,
+				      u32 frac_fb_div,
+				      u32 post_div,
+				      int bpc,
+				      bool ss_enabled,
+				      struct radeon_atom_ss *ss)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u8 frev, crev;
+	int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			if (clock == ATOM_DISABLE)
+				return;
+			args.v1.usPixelClock = cpu_to_le16(clock / 10);
+			args.v1.usRefDiv = cpu_to_le16(ref_div);
+			args.v1.usFbDiv = cpu_to_le16(fb_div);
+			args.v1.ucFracFbDiv = frac_fb_div;
+			args.v1.ucPostDiv = post_div;
+			args.v1.ucPpll = pll_id;
+			args.v1.ucCRTC = crtc_id;
+			args.v1.ucRefDivSrc = 1;
+			break;
+		case 2:
+			args.v2.usPixelClock = cpu_to_le16(clock / 10);
+			args.v2.usRefDiv = cpu_to_le16(ref_div);
+			args.v2.usFbDiv = cpu_to_le16(fb_div);
+			args.v2.ucFracFbDiv = frac_fb_div;
+			args.v2.ucPostDiv = post_div;
+			args.v2.ucPpll = pll_id;
+			args.v2.ucCRTC = crtc_id;
+			args.v2.ucRefDivSrc = 1;
+			break;
+		case 3:
+			args.v3.usPixelClock = cpu_to_le16(clock / 10);
+			args.v3.usRefDiv = cpu_to_le16(ref_div);
+			args.v3.usFbDiv = cpu_to_le16(fb_div);
+			args.v3.ucFracFbDiv = frac_fb_div;
+			args.v3.ucPostDiv = post_div;
+			args.v3.ucPpll = pll_id;
+			if (crtc_id == ATOM_CRTC2)
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+			else
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+			args.v3.ucTransmitterId = encoder_id;
+			args.v3.ucEncoderMode = encoder_mode;
+			break;
+		case 5:
+			args.v5.ucCRTC = crtc_id;
+			args.v5.usPixelClock = cpu_to_le16(clock / 10);
+			args.v5.ucRefDiv = ref_div;
+			args.v5.usFbDiv = cpu_to_le16(fb_div);
+			args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v5.ucPostDiv = post_div;
+			args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+				switch (bpc) {
+				case 8:
+				default:
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+					break;
+				case 10:
+					/* yes this is correct, the atom define is wrong */
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+					break;
+				case 12:
+					/* yes this is correct, the atom define is wrong */
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+					break;
+				}
+			}
+			args.v5.ucTransmitterID = encoder_id;
+			args.v5.ucEncoderMode = encoder_mode;
+			args.v5.ucPpll = pll_id;
+			break;
+		case 6:
+			args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
+			args.v6.ucRefDiv = ref_div;
+			args.v6.usFbDiv = cpu_to_le16(fb_div);
+			args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v6.ucPostDiv = post_div;
+			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+				switch (bpc) {
+				case 8:
+				default:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+					break;
+				case 10:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+					break;
+				case 12:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+					break;
+				case 16:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+					break;
+				}
+			}
+			args.v6.ucTransmitterID = encoder_id;
+			args.v6.ucEncoderMode = encoder_mode;
+			args.v6.ucPpll = pll_id;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+	radeon_crtc->bpc = 8;
+	radeon_crtc->ss_enabled = false;
+
+	if (radeon_encoder->is_mst_encoder) {
+		radeon_dp_mst_prepare_pll(crtc, mode);
+	} else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		struct drm_connector *connector =
+			radeon_get_connector_for_encoder(radeon_crtc->encoder);
+		struct radeon_connector *radeon_connector =
+			to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+		int dp_clock;
+
+		/* Assign mode clock for hdmi deep color max clock limit check */
+		radeon_connector->pixelclock_for_modeset = mode->clock;
+		radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
+
+		switch (encoder_mode) {
+		case ATOM_ENCODER_MODE_DP_MST:
+		case ATOM_ENCODER_MODE_DP:
+			/* DP/eDP */
+			dp_clock = dig_connector->dp_clock / 10;
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_DP,
+									 dp_clock);
+			else {
+				if (dp_clock == 16200) {
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
+										 ATOM_DP_SS_ID2);
+					if (!radeon_crtc->ss_enabled)
+						radeon_crtc->ss_enabled =
+							radeon_atombios_get_ppll_ss_info(rdev,
+											 &radeon_crtc->ss,
+											 ATOM_DP_SS_ID1);
+				} else {
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
+										 ATOM_DP_SS_ID1);
+				}
+				/* disable spread spectrum on DCE3 DP */
+				radeon_crtc->ss_enabled = false;
+			}
+			break;
+		case ATOM_ENCODER_MODE_LVDS:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id,
+									 mode->clock / 10);
+			else
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_ppll_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id);
+			break;
+		case ATOM_ENCODER_MODE_DVI:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_TMDS,
+									 mode->clock / 10);
+			break;
+		case ATOM_ENCODER_MODE_HDMI:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_HDMI,
+									 mode->clock / 10);
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* adjust pixel clock as needed */
+	radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
+
+	return true;
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_clock = mode->clock;
+	u32 clock = mode->clock;
+	u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+	struct radeon_pll *pll;
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+	/* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
+	if (ASIC_IS_DCE5(rdev) &&
+	    (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
+	    (radeon_crtc->bpc > 8))
+		clock = radeon_crtc->adjusted_clock;
+
+	switch (radeon_crtc->pll_id) {
+	case ATOM_PPLL1:
+		pll = &rdev->clock.p1pll;
+		break;
+	case ATOM_PPLL2:
+		pll = &rdev->clock.p2pll;
+		break;
+	case ATOM_DCPLL:
+	case ATOM_PPLL_INVALID:
+	default:
+		pll = &rdev->clock.dcpll;
+		break;
+	}
+
+	/* update pll params */
+	pll->flags = radeon_crtc->pll_flags;
+	pll->reference_div = radeon_crtc->pll_reference_div;
+	pll->post_div = radeon_crtc->pll_post_div;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		/* TV seems to prefer the legacy algo on some boards */
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
+	else if (ASIC_IS_AVIVO(rdev))
+		radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					 &fb_div, &frac_fb_div, &ref_div, &post_div);
+	else
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
+
+	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
+				 radeon_crtc->crtc_id, &radeon_crtc->ss);
+
+	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+				  encoder_mode, radeon_encoder->encoder_id, clock,
+				  ref_div, fb_div, frac_fb_div, post_div,
+				  radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
+
+	if (radeon_crtc->ss_enabled) {
+		/* calculate ss amount and step size */
+		if (ASIC_IS_DCE4(rdev)) {
+			u32 step_size;
+			u32 amount = (((fb_div * 10) + frac_fb_div) *
+				      (u32)radeon_crtc->ss.percentage) /
+				(100 * (u32)radeon_crtc->ss.percentage_divider);
+			radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+			radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+				ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+			if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+				step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
+					(125 * 25 * pll->reference_freq / 100);
+			else
+				step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
+					(125 * 25 * pll->reference_freq / 100);
+			radeon_crtc->ss.step = step_size;
+		}
+
+		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
+					 radeon_crtc->crtc_id, &radeon_crtc->ss);
+	}
+}
+
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 int x, int y, int atomic)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	uint64_t fb_location;
+	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	unsigned bankw, bankh, mtaspect, tile_split;
+	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+	u32 tmp, viewport_w, viewport_h;
+	int r;
+	bool bypass_lut = false;
+	struct drm_format_name_buf format_name;
+
+	/* no fb bound */
+	if (!atomic && !crtc->primary->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic)
+		target_fb = fb;
+	else
+		target_fb = crtc->primary->fb;
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	obj = target_fb->obj[0];
+	rbo = gem_to_radeon_bo(obj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	if (atomic)
+		fb_location = radeon_bo_gpu_offset(rbo);
+	else {
+		r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			radeon_bo_unreserve(rbo);
+			return -EINVAL;
+		}
+	}
+
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	switch (target_fb->format->format) {
+	case DRM_FORMAT_C8:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+		break;
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_ARGB4444:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_BGRA5551:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_RGB565:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+		bypass_lut = true;
+		break;
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_BGRA1010102:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+		bypass_lut = true;
+		break;
+	default:
+		DRM_ERROR("Unsupported screen format %s\n",
+		          drm_get_format_name(target_fb->format->format, &format_name));
+		return -EINVAL;
+	}
+
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+
+		/* Set NUM_BANKS. */
+		if (rdev->family >= CHIP_TAHITI) {
+			unsigned index, num_banks;
+
+			if (rdev->family >= CHIP_BONAIRE) {
+				unsigned tileb, tile_split_bytes;
+
+				/* Calculate the macrotile mode index. */
+				tile_split_bytes = 64 << tile_split;
+				tileb = 8 * 8 * target_fb->format->cpp[0];
+				tileb = min(tile_split_bytes, tileb);
+
+				for (index = 0; tileb > 64; index++)
+					tileb >>= 1;
+
+				if (index >= 16) {
+					DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+						  target_fb->format->cpp[0] * 8,
+						  tile_split);
+					return -EINVAL;
+				}
+
+				num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+			} else {
+				switch (target_fb->format->cpp[0] * 8) {
+				case 8:
+					index = 10;
+					break;
+				case 16:
+					index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
+					break;
+				default:
+				case 32:
+					index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
+					break;
+				}
+
+				num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
+			}
+
+			fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+		} else {
+			/* NI and older. */
+			if (rdev->family >= CHIP_CAYMAN)
+				tmp = rdev->config.cayman.tile_config;
+			else
+				tmp = rdev->config.evergreen.tile_config;
+
+			switch ((tmp & 0xf0) >> 4) {
+			case 0: /* 4 banks */
+				fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+				break;
+			case 1: /* 8 banks */
+			default:
+				fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+				break;
+			case 2: /* 16 banks */
+				fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+				break;
+			}
+		}
+
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+		fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+		fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+		fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+		fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+		if (rdev->family >= CHIP_BONAIRE) {
+			/* XXX need to know more about the surface tiling mode */
+			fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING);
+		}
+	} else if (tiling_flags & RADEON_TILING_MICRO)
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+	if (rdev->family >= CHIP_BONAIRE) {
+		/* Read the pipe config from the 2D TILED SCANOUT mode.
+		 * It should be the same for the other modes too, but not all
+		 * modes set the pipe config field. */
+		u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
+
+		fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
+	} else if ((rdev->family == CHIP_TAHITI) ||
+		   (rdev->family == CHIP_PITCAIRN))
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+	else if ((rdev->family == CHIP_VERDE) ||
+		 (rdev->family == CHIP_OLAND) ||
+		 (rdev->family == CHIP_HAINAN)) /* for completeness.  HAINAN has no display hw */
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
+	switch (radeon_crtc->crtc_id) {
+	case 0:
+		WREG32(AVIVO_D1VGA_CONTROL, 0);
+		break;
+	case 1:
+		WREG32(AVIVO_D2VGA_CONTROL, 0);
+		break;
+	case 2:
+		WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+		break;
+	case 3:
+		WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+		break;
+	case 4:
+		WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+		break;
+	case 5:
+		WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+		break;
+	default:
+		break;
+	}
+
+	/* Make sure surface address is updated at vertical blank rather than
+	 * horizontal blank
+	 */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+	/*
+	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+	 * retain the full precision throughout the pipeline.
+	 */
+	WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
+		 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+		 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+	if (bypass_lut)
+		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+	WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
+	WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+	WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+	if (rdev->family >= CHIP_BONAIRE)
+		WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+		       target_fb->height);
+	else
+		WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+		       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+	if ((rdev->family >= CHIP_BONAIRE) &&
+	    (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
+		viewport_h *= 2;
+	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	if (!atomic && fb && fb != crtc->primary->fb) {
+		rbo = gem_to_radeon_bo(fb->obj[0]);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  int x, int y, int atomic)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	struct drm_framebuffer *target_fb;
+	uint64_t fb_location;
+	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
+	u32 viewport_w, viewport_h;
+	int r;
+	bool bypass_lut = false;
+	struct drm_format_name_buf format_name;
+
+	/* no fb bound */
+	if (!atomic && !crtc->primary->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic)
+		target_fb = fb;
+	else
+		target_fb = crtc->primary->fb;
+
+	obj = target_fb->obj[0];
+	rbo = gem_to_radeon_bo(obj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	if (atomic)
+		fb_location = radeon_bo_gpu_offset(rbo);
+	else {
+		r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			radeon_bo_unreserve(rbo);
+			return -EINVAL;
+		}
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	switch (target_fb->format->format) {
+	case DRM_FORMAT_C8:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
+		    AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
+		break;
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_ARGB4444:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+		    AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+		break;
+	case DRM_FORMAT_XRGB1555:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+		    AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+		break;
+	case DRM_FORMAT_RGB565:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+		    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+		    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+		    AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+		bypass_lut = true;
+		break;
+	default:
+		DRM_ERROR("Unsupported screen format %s\n",
+		          drm_get_format_name(target_fb->format->format, &format_name));
+		return -EINVAL;
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
+		else if (tiling_flags & RADEON_TILING_MICRO)
+			fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
+	} else {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
+
+		if (tiling_flags & RADEON_TILING_MICRO)
+			fb_format |= AVIVO_D1GRPH_TILED;
+	}
+
+	if (radeon_crtc->crtc_id == 0)
+		WREG32(AVIVO_D1VGA_CONTROL, 0);
+	else
+		WREG32(AVIVO_D2VGA_CONTROL, 0);
+
+	/* Make sure surface address is update at vertical blank rather than
+	 * horizontal blank
+	 */
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	if (rdev->family >= CHIP_RV770) {
+		if (radeon_crtc->crtc_id) {
+			WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+			WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+		} else {
+			WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+			WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+		}
+	}
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32) fb_location);
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
+	       radeon_crtc->crtc_offset, (u32) fb_location);
+	WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+	/* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
+	WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
+		 (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
+
+	if (bypass_lut)
+		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+	WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
+	WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+	WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+	WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+	       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* set pageflip to happen only at start of vblank interval (front porch) */
+	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+
+	if (!atomic && fb && fb != crtc->primary->fb) {
+		rbo = gem_to_radeon_bo(fb->obj[0]);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+			   struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev))
+		return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	else if (ASIC_IS_AVIVO(rdev))
+		return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	else
+		return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  int x, int y, enum mode_set_atomic state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev))
+		return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
+	else if (ASIC_IS_AVIVO(rdev))
+		return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
+	else
+		return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+/* properly set additional regs when using atombios */
+static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	u32 disp_merge_cntl;
+
+	switch (radeon_crtc->crtc_id) {
+	case 0:
+		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+		break;
+	case 1:
+		disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+		WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+		WREG32(RADEON_FP_H2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+		WREG32(RADEON_FP_V2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+		break;
+	}
+}
+
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 pll_in_use = 0;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+			pll_in_use |= (1 << test_radeon_crtc->pll_id);
+	}
+	return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode.  For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
+			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+			    test_radeon_crtc->pll_id == ATOM_PPLL2)
+				continue;
+			/* for DP use the same PLL for all */
+			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
+ *
+ * @crtc: drm crtc
+ * @encoder: drm encoder
+ *
+ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
+ * be shared (i.e., same clock).
+ */
+static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 adjusted_clock, test_adjusted_clock;
+
+	adjusted_clock = radeon_crtc->adjusted_clock;
+
+	if (adjusted_clock == 0)
+		return ATOM_PPLL_INVALID;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
+			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+			    test_radeon_crtc->pll_id == ATOM_PPLL2)
+				continue;
+			/* check if we are already driving this connector with another crtc */
+			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+				/* if we are, return that pll */
+				if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+					return test_radeon_crtc->pll_id;
+			}
+			/* for non-DP check the clock */
+			test_adjusted_clock = test_radeon_crtc->adjusted_clock;
+			if ((crtc->mode.clock == test_crtc->mode.clock) &&
+			    (adjusted_clock == test_adjusted_clock) &&
+			    (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+			    (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 8.x
+ * KB/KV
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
+ * CI
+ * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_in_use;
+	int pll;
+
+	if (ASIC_IS_DCE8(rdev)) {
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
+		}
+		/* otherwise, pick one of the plls */
+		if ((rdev->family == CHIP_KABINI) ||
+		    (rdev->family == CHIP_MULLINS)) {
+			/* KB/ML has PPLL1 and PPLL2 */
+			pll_in_use = radeon_get_pll_use_mask(crtc);
+			if (!(pll_in_use & (1 << ATOM_PPLL2)))
+				return ATOM_PPLL2;
+			if (!(pll_in_use & (1 << ATOM_PPLL1)))
+				return ATOM_PPLL1;
+			DRM_ERROR("unable to allocate a PPLL\n");
+			return ATOM_PPLL_INVALID;
+		} else {
+			/* CI/KV has PPLL0, PPLL1, and PPLL2 */
+			pll_in_use = radeon_get_pll_use_mask(crtc);
+			if (!(pll_in_use & (1 << ATOM_PPLL2)))
+				return ATOM_PPLL2;
+			if (!(pll_in_use & (1 << ATOM_PPLL1)))
+				return ATOM_PPLL1;
+			if (!(pll_in_use & (1 << ATOM_PPLL0)))
+				return ATOM_PPLL0;
+			DRM_ERROR("unable to allocate a PPLL\n");
+			return ATOM_PPLL_INVALID;
+		}
+	} else if (ASIC_IS_DCE61(rdev)) {
+		struct radeon_encoder_atom_dig *dig =
+			radeon_encoder->enc_priv;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
+		    (dig->linkb == false))
+			/* UNIPHY A uses PPLL2 */
+			return ATOM_PPLL2;
+		else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			/* UNIPHY B/C/D/E/F */
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
+		}
+		/* UNIPHY B/C/D/E/F */
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL0)))
+			return ATOM_PPLL0;
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else if (ASIC_IS_DCE41(rdev)) {
+		/* Don't share PLLs on DCE4.1 chips */
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+		}
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		if (!(pll_in_use & (1 << ATOM_PPLL2)))
+			return ATOM_PPLL2;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+		 * depending on the asic:
+		 * DCE4: PPLL or ext clock
+		 * DCE5: PPLL, DCPLL, or ext clock
+		 * DCE6: PPLL, PPLL0, or ext clock
+		 *
+		 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+		 * PPLL/DCPLL programming and only program the DP DTO for the
+		 * crtc virtual pixel clock.
+		 */
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else if (ASIC_IS_DCE6(rdev))
+				/* use PPLL0 for all DP */
+				return ATOM_PPLL0;
+			else if (ASIC_IS_DCE5(rdev))
+				/* use DCPLL for all DP */
+				return ATOM_DCPLL;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
+		}
+		/* all other cases */
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		if (!(pll_in_use & (1 << ATOM_PPLL2)))
+			return ATOM_PPLL2;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else {
+		/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+		/* some atombios (observed in some DCE2/DCE3) code have a bug,
+		 * the matching btw pll and crtc is done through
+		 * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+		 * pll (1 or 2) to select which register to write. ie if using
+		 * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+		 * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+		 * choose which value to write. Which is reverse order from
+		 * register logic. So only case that works is when pllid is
+		 * same as crtcid or when both pll and crtc are enabled and
+		 * both use same clock.
+		 *
+		 * So just return crtc id as if crtc and pll were hard linked
+		 * together even if they aren't
+		 */
+		return radeon_crtc->crtc_id;
+	}
+}
+
+void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
+{
+	/* always set DCPLL */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+	else if (ASIC_IS_DCE4(rdev)) {
+		struct radeon_atom_ss ss;
+		bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+								   ASIC_INTERNAL_SS_ON_DCPLL,
+								   rdev->clock.default_dispclk);
+		if (ss_enabled)
+			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
+		/* XXX: DCE5, make sure voltage, dispclk is high enough */
+		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+		if (ss_enabled)
+			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
+	}
+
+}
+
+int atombios_crtc_mode_set(struct drm_crtc *crtc,
+			   struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode,
+			   int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	bool is_tvcv = false;
+
+	if (radeon_encoder->active_device &
+	    (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+		is_tvcv = true;
+
+	if (!radeon_crtc->adjusted_clock)
+		return -EINVAL;
+
+	atombios_crtc_set_pll(crtc, adjusted_mode);
+
+	if (ASIC_IS_DCE4(rdev))
+		atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+	else if (ASIC_IS_AVIVO(rdev)) {
+		if (is_tvcv)
+			atombios_crtc_set_timing(crtc, adjusted_mode);
+		else
+			atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+	} else {
+		atombios_crtc_set_timing(crtc, adjusted_mode);
+		if (radeon_crtc->crtc_id == 0)
+			atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+		radeon_legacy_atom_fixup(crtc);
+	}
+	atombios_crtc_set_base(crtc, x, y, old_fb);
+	atombios_overscan_setup(crtc, mode, adjusted_mode);
+	atombios_scaler_setup(crtc);
+	radeon_cursor_reset(crtc);
+	/* update the hw version fpr dpm */
+	radeon_crtc->hw_mode = *adjusted_mode;
+
+	return 0;
+}
+
+static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the radeon crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			radeon_crtc->encoder = encoder;
+			radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
+		radeon_crtc->encoder = NULL;
+		radeon_crtc->connector = NULL;
+		return false;
+	}
+	if (radeon_crtc->encoder) {
+		struct radeon_encoder *radeon_encoder =
+			to_radeon_encoder(radeon_crtc->encoder);
+
+		radeon_crtc->output_csc = radeon_encoder->output_csc;
+	}
+	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
+		return false;
+	/* pick pll */
+	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+	/* if we can't get a PPLL for a non-DP encoder, fail */
+	if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
+	    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
+		return false;
+
+	return true;
+}
+
+static void atombios_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* disable crtc pair power gating before programming */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
+	atombios_lock_crtc(crtc, ATOM_ENABLE);
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void atombios_crtc_commit(struct drm_crtc *crtc)
+{
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	atombios_lock_crtc(crtc, ATOM_DISABLE);
+}
+
+static void atombios_crtc_disable(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_atom_ss ss;
+	int i;
+
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->primary->fb) {
+		int r;
+		struct radeon_bo *rbo;
+
+		rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve rbo before unpin\n");
+		else {
+			radeon_bo_unpin(rbo);
+			radeon_bo_unreserve(rbo);
+		}
+	}
+	/* disable the GRPH */
+	if (ASIC_IS_DCE4(rdev))
+		WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+	else if (ASIC_IS_AVIVO(rdev))
+		WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_ENABLE);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i] &&
+		    rdev->mode_info.crtcs[i]->enabled &&
+		    i != radeon_crtc->crtc_id &&
+		    radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+			/* one other crtc is using this pll don't turn
+			 * off the pll
+			 */
+			goto done;
+		}
+	}
+
+	switch (radeon_crtc->pll_id) {
+	case ATOM_PPLL1:
+	case ATOM_PPLL2:
+		/* disable the ppll */
+		atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	case ATOM_PPLL0:
+		/* disable the ppll */
+		if ((rdev->family == CHIP_ARUBA) ||
+		    (rdev->family == CHIP_KAVERI) ||
+		    (rdev->family == CHIP_BONAIRE) ||
+		    (rdev->family == CHIP_HAWAII))
+			atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	default:
+		break;
+	}
+done:
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
+}
+
+static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+	.dpms = atombios_crtc_dpms,
+	.mode_fixup = atombios_crtc_mode_fixup,
+	.mode_set = atombios_crtc_mode_set,
+	.mode_set_base = atombios_crtc_set_base,
+	.mode_set_base_atomic = atombios_crtc_set_base_atomic,
+	.prepare = atombios_crtc_prepare,
+	.commit = atombios_crtc_commit,
+	.disable = atombios_crtc_disable,
+};
+
+void radeon_atombios_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+		default:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+			break;
+		case 1:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+			break;
+		case 2:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+			break;
+		case 3:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+			break;
+		case 4:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+			break;
+		case 5:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+			break;
+		}
+	} else {
+		if (radeon_crtc->crtc_id == 1)
+			radeon_crtc->crtc_offset =
+				AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+		else
+			radeon_crtc->crtc_offset = 0;
+	}
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
+	drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 0000000..3e79859
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,861 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include <drm/drm_dp_helper.h>
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
+
+static char *voltage_names[] = {
+	"0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+	"0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
+ */
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+	u32 src_tmp[5], dst_tmp[5];
+	int i;
+	u8 align_num_bytes = ALIGN(num_bytes, 4);
+
+	if (to_le) {
+		memcpy(src_tmp, src, num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+		memcpy(dst, dst_tmp, align_num_bytes);
+	} else {
+		memcpy(src_tmp, src, align_num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+		memcpy(dst, dst_tmp, num_bytes);
+	}
+#else
+	memcpy(dst, src, num_bytes);
+#endif
+}
+
+union aux_channel_transaction {
+	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
+
+static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
+				 u8 *send, int send_bytes,
+				 u8 *recv, int recv_size,
+				 u8 delay, u8 *ack)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	union aux_channel_transaction args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+	unsigned char *base;
+	int recv_bytes;
+	int r = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	mutex_lock(&chan->mutex);
+	mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
+
+	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
+
+	radeon_atom_copy_swap(base, send, send_bytes, true);
+
+	args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+	args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
+	args.v1.ucDataOutLen = 0;
+	args.v1.ucChannelID = chan->rec.i2c_id;
+	args.v1.ucDelay = delay / 10;
+	if (ASIC_IS_DCE4(rdev))
+		args.v2.ucHPD_ID = chan->rec.hpd;
+
+	atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	*ack = args.v1.ucReplyStatus;
+
+	/* timeout */
+	if (args.v1.ucReplyStatus == 1) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
+		r = -ETIMEDOUT;
+		goto done;
+	}
+
+	/* flags not zero */
+	if (args.v1.ucReplyStatus == 2) {
+		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
+		r = -EIO;
+		goto done;
+	}
+
+	/* error */
+	if (args.v1.ucReplyStatus == 3) {
+		DRM_DEBUG_KMS("dp_aux_ch error\n");
+		r = -EIO;
+		goto done;
+	}
+
+	recv_bytes = args.v1.ucDataOutLen;
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+
+	if (recv && recv_size)
+		radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
+
+	r = recv_bytes;
+done:
+	mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
+	mutex_unlock(&chan->mutex);
+
+	return r;
+}
+
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static ssize_t
+radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+	struct radeon_i2c_chan *chan =
+		container_of(aux, struct radeon_i2c_chan, aux);
+	int ret;
+	u8 tx_buf[20];
+	size_t tx_size;
+	u8 ack, delay = 0;
+
+	if (WARN_ON(msg->size > 16))
+		return -E2BIG;
+
+	tx_buf[0] = msg->address & 0xff;
+	tx_buf[1] = (msg->address >> 8) & 0xff;
+	tx_buf[2] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
+	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
+
+	switch (msg->request & ~DP_AUX_I2C_MOT) {
+	case DP_AUX_NATIVE_WRITE:
+	case DP_AUX_I2C_WRITE:
+	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+		/* The atom implementation only supports writes with a max payload of
+		 * 12 bytes since it uses 4 bits for the total count (header + payload)
+		 * in the parameter space.  The atom interface supports 16 byte
+		 * payloads for reads. The hw itself supports up to 16 bytes of payload.
+		 */
+		if (WARN_ON_ONCE(msg->size > 12))
+			return -E2BIG;
+		/* tx_size needs to be 4 even for bare address packets since the atom
+		 * table needs the info in tx_buf[3].
+		 */
+		tx_size = HEADER_SIZE + msg->size;
+		if (msg->size == 0)
+			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
+		else
+			tx_buf[3] |= tx_size << 4;
+		memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
+		ret = radeon_process_aux_ch(chan,
+					    tx_buf, tx_size, NULL, 0, delay, &ack);
+		if (ret >= 0)
+			/* Return payload size. */
+			ret = msg->size;
+		break;
+	case DP_AUX_NATIVE_READ:
+	case DP_AUX_I2C_READ:
+		/* tx_size needs to be 4 even for bare address packets since the atom
+		 * table needs the info in tx_buf[3].
+		 */
+		tx_size = HEADER_SIZE;
+		if (msg->size == 0)
+			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
+		else
+			tx_buf[3] |= tx_size << 4;
+		ret = radeon_process_aux_ch(chan,
+					    tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret >= 0)
+		msg->reply = ack >> 4;
+
+	return ret;
+}
+
+void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
+{
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ret;
+
+	radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
+	radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
+	if (ASIC_IS_DCE5(rdev)) {
+		if (radeon_auxch)
+			radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native;
+		else
+			radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
+	} else {
+		radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
+	}
+
+	ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
+	if (!ret)
+		radeon_connector->ddc_bus->has_aux = true;
+
+	WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
+}
+
+/***** general DP utility functions *****/
+
+#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
+#define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPH_LEVEL_3
+
+static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
+				int lane_count,
+				u8 train_set[4])
+{
+	u8 v = 0;
+	u8 p = 0;
+	int lane;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+			  lane,
+			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	if (v >= DP_VOLTAGE_MAX)
+		v |= DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p >= DP_PRE_EMPHASIS_MAX)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
+		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+	for (lane = 0; lane < 4; lane++)
+		train_set[lane] = v | p;
+}
+
+/* convert bits per color to bits per pixel */
+/* get bpc from the EDID */
+static int convert_bpc_to_bpp(int bpc)
+{
+	if (bpc == 0)
+		return 24;
+	else
+		return bpc * 3;
+}
+
+/***** radeon specific DP functions *****/
+
+static int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+					const u8 dpcd[DP_DPCD_SIZE],
+					unsigned pix_clock,
+					unsigned *dp_lanes, unsigned *dp_rate)
+{
+	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+	static const unsigned link_rates[3] = { 162000, 270000, 540000 };
+	unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
+	unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
+	unsigned lane_num, i, max_pix_clock;
+
+	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+	    ENCODER_OBJECT_ID_NUTMEG) {
+		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+			max_pix_clock = (lane_num * 270000 * 8) / bpp;
+			if (max_pix_clock >= pix_clock) {
+				*dp_lanes = lane_num;
+				*dp_rate = 270000;
+				return 0;
+			}
+		}
+	} else {
+		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+				if (max_pix_clock >= pix_clock) {
+					*dp_lanes = lane_num;
+					*dp_rate = link_rates[i];
+					return 0;
+				}
+			}
+		}
+	}
+
+	return -EINVAL;
+}
+
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+				    int action, int dp_clock,
+				    u8 ucconfig, u8 lane_num)
+{
+	DP_ENCODER_SERVICE_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+	memset(&args, 0, sizeof(args));
+	args.ucLinkClock = dp_clock / 10;
+	args.ucConfig = ucconfig;
+	args.ucAction = action;
+	args.ucLaneNum = lane_num;
+	args.ucStatus = 0;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+					 radeon_connector->ddc_bus->rec.i2c_id, 0);
+}
+
+static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 buf[3];
+
+	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
+		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+
+	if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
+		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[DP_DPCD_SIZE];
+	int ret;
+
+	ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+			       DP_DPCD_SIZE);
+	if (ret == DP_DPCD_SIZE) {
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+
+		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+			      dig_connector->dpcd);
+
+		radeon_dp_probe_oui(radeon_connector);
+
+		return true;
+	}
+
+	dig_connector->dpcd[0] = 0;
+	return false;
+}
+
+int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+			     struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+	u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
+	u8 tmp;
+
+	if (!ASIC_IS_DCE4(rdev))
+		return panel_mode;
+
+	if (!radeon_connector->con_priv)
+		return panel_mode;
+
+	dig_connector = radeon_connector->con_priv;
+
+	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+		/* DP bridge chips */
+		if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+			if (tmp & 1)
+				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+			else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+				 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+				panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+			else
+				panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+		}
+	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		/* eDP */
+		if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+			if (tmp & 1)
+				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+		}
+	}
+
+	return panel_mode;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+			       const struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	int ret;
+
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+		ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+						   mode->clock,
+						   &dig_connector->dp_lane_count,
+						   &dig_connector->dp_clock);
+		if (ret) {
+			dig_connector->dp_clock = 0;
+			dig_connector->dp_lane_count = 0;
+		}
+	}
+}
+
+int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	unsigned dp_clock, dp_lanes;
+	int ret;
+
+	if ((mode->clock > 340000) &&
+	    (!radeon_connector_is_dp12_capable(connector)))
+		return MODE_CLOCK_HIGH;
+
+	if (!radeon_connector->con_priv)
+		return MODE_CLOCK_HIGH;
+	dig_connector = radeon_connector->con_priv;
+
+	ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+					   mode->clock,
+					   &dp_lanes,
+					   &dp_clock);
+	if (ret)
+		return MODE_CLOCK_HIGH;
+
+	if ((dp_clock == 540000) &&
+	    (!radeon_connector_is_dp12_capable(connector)))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+	if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
+	    <= 0)
+		return false;
+	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
+		return false;
+	return true;
+}
+
+void radeon_dp_set_rx_power_state(struct drm_connector *connector,
+				  u8 power_state)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+
+	if (!radeon_connector->con_priv)
+		return;
+
+	dig_connector = radeon_connector->con_priv;
+
+	/* power up/down the sink */
+	if (dig_connector->dpcd[0] >= 0x11) {
+		drm_dp_dpcd_writeb(&radeon_connector->ddc_bus->aux,
+				   DP_SET_POWER, power_state);
+		usleep_range(1000, 2000);
+	}
+}
+
+
+struct radeon_dp_link_train_info {
+	struct radeon_device *rdev;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int enc_id;
+	int dp_clock;
+	int dp_lane_count;
+	bool tp3_supported;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 train_set[4];
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	u8 tries;
+	bool use_dpencoder;
+	struct drm_dp_aux *aux;
+};
+
+static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
+{
+	/* set the initial vs/emph on the source */
+	atombios_dig_transmitter_setup(dp_info->encoder,
+				       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+				       0, dp_info->train_set[0]); /* sets all lanes at once */
+
+	/* set the vs/emph on the sink */
+	drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
+			  dp_info->train_set, dp_info->dp_lane_count);
+}
+
+static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
+{
+	int rtp = 0;
+
+	/* set training pattern on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
+		switch (tp) {
+		case DP_TRAINING_PATTERN_1:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
+			break;
+		}
+		atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
+	} else {
+		switch (tp) {
+		case DP_TRAINING_PATTERN_1:
+			rtp = 0;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			rtp = 1;
+			break;
+		}
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+					  dp_info->dp_clock, dp_info->enc_id, rtp);
+	}
+
+	/* enable training pattern on the sink */
+	drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
+}
+
+static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u8 tmp;
+
+	/* power up the sink */
+	radeon_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
+
+	/* possibly enable downspread on the sink */
+	if (dp_info->dpcd[3] & 0x1)
+		drm_dp_dpcd_writeb(dp_info->aux,
+				   DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
+	else
+		drm_dp_dpcd_writeb(dp_info->aux,
+				   DP_DOWNSPREAD_CTRL, 0);
+
+	if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
+		drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
+
+	/* set the lane count on the sink */
+	tmp = dp_info->dp_lane_count;
+	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
+		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
+
+	/* set the link rate on the sink */
+	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
+	drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
+
+	/* start training on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+		atombios_dig_encoder_setup(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
+	else
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
+					  dp_info->dp_clock, dp_info->enc_id, 0);
+
+	/* disable the training pattern on the sink */
+	drm_dp_dpcd_writeb(dp_info->aux,
+			   DP_TRAINING_PATTERN_SET,
+			   DP_TRAINING_PATTERN_DISABLE);
+
+	return 0;
+}
+
+static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
+{
+	udelay(400);
+
+	/* disable the training pattern on the sink */
+	drm_dp_dpcd_writeb(dp_info->aux,
+			   DP_TRAINING_PATTERN_SET,
+			   DP_TRAINING_PATTERN_DISABLE);
+
+	/* disable the training pattern on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+		atombios_dig_encoder_setup(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
+	else
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+					  dp_info->dp_clock, dp_info->enc_id, 0);
+
+	return 0;
+}
+
+static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
+{
+	bool clock_recovery;
+ 	u8 voltage;
+	int i;
+
+	radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
+	memset(dp_info->train_set, 0, 4);
+	radeon_dp_update_vs_emph(dp_info);
+
+	udelay(400);
+
+	/* clock recovery loop */
+	clock_recovery = false;
+	dp_info->tries = 0;
+	voltage = 0xff;
+	while (1) {
+		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+
+		if (drm_dp_dpcd_read_link_status(dp_info->aux,
+						 dp_info->link_status) <= 0) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			clock_recovery = true;
+			break;
+		}
+
+		for (i = 0; i < dp_info->dp_lane_count; i++) {
+			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		}
+		if (i == dp_info->dp_lane_count) {
+			DRM_ERROR("clock recovery reached max voltage\n");
+			break;
+		}
+
+		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++dp_info->tries;
+			if (dp_info->tries == 5) {
+				DRM_ERROR("clock recovery tried 5 times\n");
+				break;
+			}
+		} else
+			dp_info->tries = 0;
+
+		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+		radeon_dp_update_vs_emph(dp_info);
+	}
+	if (!clock_recovery) {
+		DRM_ERROR("clock recovery failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
+{
+	bool channel_eq;
+
+	if (dp_info->tp3_supported)
+		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
+	else
+		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
+
+	/* channel equalization loop */
+	dp_info->tries = 0;
+	channel_eq = false;
+	while (1) {
+		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+
+		if (drm_dp_dpcd_read_link_status(dp_info->aux,
+						 dp_info->link_status) <= 0) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times */
+		if (dp_info->tries > 5) {
+			DRM_ERROR("channel eq failed: 5 tries\n");
+			break;
+		}
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+		radeon_dp_update_vs_emph(dp_info);
+		dp_info->tries++;
+	}
+
+	if (!channel_eq) {
+		DRM_ERROR("channel eq failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+void radeon_dp_link_train(struct drm_encoder *encoder,
+			  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+	struct radeon_dp_link_train_info dp_info;
+	int index;
+	u8 tmp, frev, crev;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+	dig = radeon_encoder->enc_priv;
+
+	radeon_connector = to_radeon_connector(connector);
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
+	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
+		return;
+
+	/* DPEncoderService newer than 1.1 can't program properly the
+	 * training pattern. When facing such version use the
+	 * DIGXEncoderControl (X== 1 | 2)
+	 */
+	dp_info.use_dpencoder = true;
+	index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+	if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
+		if (crev > 1) {
+			dp_info.use_dpencoder = false;
+		}
+	}
+
+	dp_info.enc_id = 0;
+	if (dig->dig_encoder)
+		dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+	else
+		dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+	if (dig->linkb)
+		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
+	else
+		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
+
+	if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
+	    == 1) {
+		if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
+			dp_info.tp3_supported = true;
+		else
+			dp_info.tp3_supported = false;
+	} else {
+		dp_info.tp3_supported = false;
+	}
+
+	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
+	dp_info.rdev = rdev;
+	dp_info.encoder = encoder;
+	dp_info.connector = connector;
+	dp_info.dp_lane_count = dig_connector->dp_lane_count;
+	dp_info.dp_clock = dig_connector->dp_clock;
+	dp_info.aux = &radeon_connector->ddc_bus->aux;
+
+	if (radeon_dp_link_train_init(&dp_info))
+		goto done;
+	if (radeon_dp_link_train_cr(&dp_info))
+		goto done;
+	if (radeon_dp_link_train_ce(&dp_info))
+		goto done;
+done:
+	if (radeon_dp_link_train_finish(&dp_info))
+		return;
+}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
new file mode 100644
index 0000000..e67ed38
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -0,0 +1,2850 @@
+/*
+ * Copyright 2007-11 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_audio.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include <linux/backlight.h>
+#include <linux/dmi.h>
+
+extern int atom_debug;
+
+static u8
+radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
+{
+	u8 backlight_level;
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
+			   ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+	return backlight_level;
+}
+
+static void
+radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
+				       u8 backlight_level)
+{
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+	bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+			   ATOM_S2_CURRENT_BL_LEVEL_MASK);
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+	else
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
+
+u8
+atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return 0;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+void
+atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+	struct drm_encoder *encoder = &radeon_encoder->base;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dig *dig;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+	    radeon_encoder->enc_priv) {
+		dig = radeon_encoder->enc_priv;
+		dig->backlight_level = level;
+		radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+			if (dig->backlight_level == 0) {
+				args.ucAction = ATOM_LCD_BLOFF;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			} else {
+				args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+				args.ucAction = ATOM_LCD_BLON;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+			if (dig->backlight_level == 0)
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+			else {
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static u8 radeon_atom_bl_level(struct backlight_device *bd)
+{
+	u8 level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	return level;
+}
+
+static int radeon_atom_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
+
+	return 0;
+}
+
+static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+static const struct backlight_ops radeon_atom_backlight_ops = {
+	.get_brightness = radeon_atom_backlight_get_brightness,
+	.update_status	= radeon_atom_backlight_update_status,
+};
+
+void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+				struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct radeon_backlight_privdata *pdata;
+	struct radeon_encoder_atom_dig *dig;
+	char bl_name[16];
+
+	/* Mac laptops with multiple GPUs use the gmux driver for backlight
+	 * so don't register a backlight device
+	 */
+	if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+	    (rdev->pdev->device == 0x6741))
+		return;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	snprintf(bl_name, sizeof(bl_name),
+		 "radeon_bl%d", dev->primary->index);
+	bd = backlight_device_register(bl_name, drm_connector->kdev,
+				       pdata, &radeon_atom_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = radeon_encoder;
+
+	dig = radeon_encoder->enc_priv;
+	dig->bl_dev = bd;
+
+	bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+	/* Set a reasonable default here if the level is 0 otherwise
+	 * fbdev will attempt to turn the backlight on after console
+	 * unblanking and it will try and restore 0 which turns the backlight
+	 * off again.
+	 */
+	if (bd->props.brightness == 0)
+		bd->props.brightness = RADEON_MAX_BL_LEVEL;
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("radeon atom DIG backlight initialized\n");
+	rdev->mode_info.bl_encoder = radeon_encoder;
+
+	return;
+
+error:
+	kfree(pdata);
+	return;
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+	struct radeon_encoder_atom_dig *dig;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	dig = radeon_encoder->enc_priv;
+	bd = dig->bl_dev;
+	dig->bl_dev = NULL;
+
+	if (bd) {
+		struct radeon_legacy_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		kfree(pdata);
+
+		DRM_INFO("radeon atom LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_atom_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+				struct drm_display_mode *mode);
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* set the active encoder to connector routing */
+	radeon_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* hw bug */
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+	/* vertical FP must be at least 1 */
+	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+		adjusted_mode->crtc_vsync_start++;
+
+	/* get the native mode for scaling */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+	} else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		if (tv_dac) {
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J ||
+			    tv_dac->tv_std == TV_STD_PAL_M)
+				radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+			else
+				radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+		}
+	} else if (radeon_encoder->rmx_type != RMX_OFF) {
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+	}
+
+	if (ASIC_IS_DCE3(rdev) &&
+	    ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	     (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+		radeon_dp_set_link_config(connector, adjusted_mode);
+	}
+
+	return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+		break;
+	}
+
+	args.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_PS2;
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_PAL:
+		case TV_STD_PAL_M:
+		case TV_STD_SCART_PAL:
+		case TV_STD_SECAM:
+		case TV_STD_PAL_CN:
+			args.ucDacStandard = ATOM_DAC1_PAL;
+			break;
+		case TV_STD_NTSC:
+		case TV_STD_NTSC_J:
+		case TV_STD_PAL_60:
+		default:
+			args.ucDacStandard = ATOM_DAC1_NTSC;
+			break;
+		}
+	}
+	args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	TV_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+	args.sTVEncoder.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_NTSC:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		case TV_STD_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+			break;
+		case TV_STD_PAL_M:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+			break;
+		case TV_STD_PAL_60:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+			break;
+		case TV_STD_NTSC_J:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+			break;
+		case TV_STD_SCART_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+			break;
+		case TV_STD_SECAM:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+			break;
+		case TV_STD_PAL_CN:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+			break;
+		default:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		}
+	}
+
+	args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
+{
+	int bpc = 8;
+
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		bpc = radeon_crtc->bpc;
+	}
+
+	switch (bpc) {
+	case 0:
+		return PANEL_BPC_UNDEFINE;
+	case 6:
+		return PANEL_6BIT_PER_COLOR;
+	case 8:
+	default:
+		return PANEL_8BIT_PER_COLOR;
+	case 10:
+		return PANEL_10BIT_PER_COLOR;
+	case 12:
+		return PANEL_12BIT_PER_COLOR;
+	case 16:
+		return PANEL_16BIT_PER_COLOR;
+	}
+}
+
+union dvo_encoder_control {
+	ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
+};
+
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	union dvo_encoder_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+	uint8_t frev, crev;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	/* some R4xx chips have the wrong frev */
+	if (rdev->family <= CHIP_RV410)
+		frev = 1;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			/* R4xx, R5xx */
+			args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+			args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			break;
+		case 2:
+			/* RS600/690/740 */
+			args.dvo.sDVOEncoder.ucAction = action;
+			args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			/* DFP1, CRT1, TV1 depending on the type of port */
+			args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+			break;
+		case 3:
+			/* R6xx */
+			args.dvo_v3.ucAction = action;
+			args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.dvo_v3.ucDVOConfig = 0; /* XXX */
+			break;
+		case 4:
+			/* DCE8 */
+			args.dvo_v4.ucAction = action;
+			args.dvo_v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.dvo_v4.ucDVOConfig = 0; /* XXX */
+			args.dvo_v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union lvds_encoder_control {
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION    v1;
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	union lvds_encoder_control args;
+	int index = 0;
+	int hdmi_detected = 0;
+	uint8_t frev, crev;
+
+	if (!dig)
+		return;
+
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+		hdmi_detected = 1;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+	case 2:
+		switch (crev) {
+		case 1:
+			args.v1.ucMisc = 0;
+			args.v1.ucAction = action;
+			if (hdmi_detected)
+				args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+					args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			} else {
+				if (dig->linkb)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				/*if (pScrn->rgbBits == 8) */
+				args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			}
+			break;
+		case 2:
+		case 3:
+			args.v2.ucMisc = 0;
+			args.v2.ucAction = action;
+			if (crev == 3) {
+				if (dig->coherent_mode)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+			}
+			if (hdmi_detected)
+				args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v2.ucTruncate = 0;
+			args.v2.ucSpatial = 0;
+			args.v2.ucTemporal = 0;
+			args.v2.ucFRC = 0;
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
+					args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+				}
+				if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
+					args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+					if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+				}
+			} else {
+				if (dig->linkb)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+	struct radeon_encoder_atom_dig *dig_enc;
+
+	if (radeon_encoder_is_digital(encoder)) {
+		dig_enc = radeon_encoder->enc_priv;
+		if (dig_enc->active_mst_links)
+			return ATOM_ENCODER_MODE_DP_MST;
+	}
+	if (radeon_encoder->is_mst_encoder || radeon_encoder->offset)
+		return ATOM_ENCODER_MODE_DP_MST;
+	/* dp bridges are always DP */
+	if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
+		return ATOM_ENCODER_MODE_DP;
+
+	/* DVO is always DVO */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+		return ATOM_ENCODER_MODE_DVO;
+
+	connector = radeon_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	radeon_connector = to_radeon_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+		if (radeon_audio != 0) {
+			if (radeon_connector->use_digital &&
+			    (radeon_connector->audio == RADEON_AUDIO_ENABLE))
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
+				 (radeon_connector->audio == RADEON_AUDIO_AUTO))
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (radeon_connector->use_digital)
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_CRT;
+		} else if (radeon_connector->use_digital) {
+			return ATOM_ENCODER_MODE_DVI;
+		} else {
+			return ATOM_ENCODER_MODE_CRT;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	default:
+		if (radeon_audio != 0) {
+			if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
+				 (radeon_connector->audio == RADEON_AUDIO_AUTO))
+				return ATOM_ENCODER_MODE_HDMI;
+			else
+				return ATOM_ENCODER_MODE_DVI;
+		} else {
+			return ATOM_ENCODER_MODE_DVI;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+			if (radeon_audio != 0 &&
+			    drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+			    ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+				return ATOM_ENCODER_MODE_DP_AUDIO;
+			return ATOM_ENCODER_MODE_DP;
+		} else if (radeon_audio != 0) {
+			if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
+				 (radeon_connector->audio == RADEON_AUDIO_AUTO))
+				return ATOM_ENCODER_MODE_HDMI;
+			else
+				return ATOM_ENCODER_MODE_DVI;
+		} else {
+			return ATOM_ENCODER_MODE_DVI;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+		if (radeon_audio != 0 &&
+		    drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+		    ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+			return ATOM_ENCODER_MODE_DP_AUDIO;
+		return ATOM_ENCODER_MODE_DP;
+	case DRM_MODE_CONNECTOR_DVIA:
+	case DRM_MODE_CONNECTOR_VGA:
+		return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		/* fix me */
+		return ATOM_ENCODER_MODE_TV;
+		/*return ATOM_ENCODER_MODE_CV;*/
+		break;
+	}
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * DCE 4.0/5.0/6.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * llano
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ * ontario
+ * DIG1 drives UNIPHY0/1/2 link A
+ * DIG2 drives UNIPHY0/1/2 link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	union dig_encoder_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int hpd_id = RADEON_HPD_NONE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		hpd_id = radeon_connector->hpd.hpd;
+	}
+
+	/* no dig encoder assigned */
+	if (dig->dig_encoder == -1)
+		return;
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE4(rdev))
+		index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+	else {
+		if (dig->dig_encoder)
+			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+				args.v1.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v1.ucLaneNum = 8;
+			else
+				args.v1.ucLaneNum = 4;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+				break;
+			}
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
+			break;
+		case 2:
+		case 3:
+			args.v3.ucAction = action;
+			args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+			if (enc_override != -1)
+				args.v3.acConfig.ucDigSel = enc_override;
+			else
+				args.v3.acConfig.ucDigSel = dig->dig_encoder;
+			args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v4.ucPanelMode = panel_mode;
+			else
+				args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
+				if (dp_clock == 540000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+				else if (dp_clock == 324000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
+				else if (dp_clock == 270000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+				else
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
+			}
+
+			if (enc_override != -1)
+				args.v4.acConfig.ucDigSel = enc_override;
+			else
+				args.v4.acConfig.ucDigSel = dig->dig_encoder;
+			args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			if (hpd_id == RADEON_HPD_NONE)
+				args.v4.ucHPD_ID = 0;
+			else
+				args.v4.ucHPD_ID = hpd_id + 1;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+	atombios_dig_encoder_setup2(encoder, action, panel_mode, -1);
+}
+
+union dig_transmitter_control {
+	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+};
+
+void
+atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set, int fe)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector;
+	union dig_transmitter_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	bool is_dp = false;
+	int pll_id = 0;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	int igp_lane_info = 0;
+	int dig_encoder = dig->dig_encoder;
+	int hpd_id = RADEON_HPD_NONE;
+
+	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+		connector = radeon_get_connector_for_encoder_init(encoder);
+		/* just needed to avoid bailing in the encoder check.  the encoder
+		 * isn't used for init
+		 */
+		dig_encoder = 0;
+	} else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		hpd_id = radeon_connector->hpd.hpd;
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+		igp_lane_info = dig_connector->igp_lane_info;
+	}
+
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		pll_id = radeon_crtc->pll_id;
+	}
+
+	/* no dig encoder assigned */
+	if (dig_encoder == -1)
+		return;
+
+	if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
+		is_dp = true;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v1.asMode.ucLaneSel = lane_num;
+				args.v1.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+			if (dig_encoder)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+			if ((rdev->flags & RADEON_IS_IGP) &&
+			    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+				if (is_dp ||
+				    !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
+					if (igp_lane_info & 0x1)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+					else if (igp_lane_info & 0x2)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+					else if (igp_lane_info & 0x4)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+					else if (igp_lane_info & 0x8)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+				} else {
+					if (igp_lane_info & 0x3)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+					else if (igp_lane_info & 0xc)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+				}
+			}
+
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+			if (is_dp)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+			}
+			break;
+		case 2:
+			args.v2.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v2.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v2.asMode.ucLaneSel = lane_num;
+				args.v2.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v2.acConfig.ucEncoderSel = dig_encoder;
+			if (dig->linkb)
+				args.v2.acConfig.ucLinkSel = 1;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v2.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v2.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v2.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp) {
+				args.v2.acConfig.fCoherentMode = 1;
+				args.v2.acConfig.fDPConnector = 1;
+			} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v2.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 3:
+			args.v3.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v3.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v3.asMode.ucLaneSel = lane_num;
+				args.v3.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v3.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v3.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE4, if there is an external clock, it generates the DP ref clock */
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v3.acConfig.ucRefClkSource = 2; /* external src */
+			else
+				args.v3.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v3.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v3.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v3.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v3.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v3.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v4.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v4.asMode.ucLaneSel = lane_num;
+				args.v4.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v4.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v4.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE5 DCPLL usually generates the DP ref clock */
+			if (is_dp) {
+				if (rdev->clock.dp_extclk)
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
+				else
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+			} else
+				args.v4.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v4.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v4.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v4.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v4.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v4.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 5:
+			args.v5.ucAction = action;
+			if (is_dp)
+				args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
+			else
+				args.v5.usSymClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+				args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
+				break;
+			}
+			if (is_dp)
+				args.v5.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v5.ucLaneNum = 8;
+			else
+				args.v5.ucLaneNum = 4;
+			args.v5.ucConnObjId = connector_object_id;
+			args.v5.ucDigMode = atombios_get_encoder_mode(encoder);
+
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
+			else
+				args.v5.asConfig.ucPhyClkSrcId = pll_id;
+
+			if (is_dp)
+				args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v5.asConfig.ucCoherentMode = 1;
+			}
+			if (hpd_id == RADEON_HPD_NONE)
+				args.v5.asConfig.ucHPDSel = 0;
+			else
+				args.v5.asConfig.ucHPDSel = hpd_id + 1;
+			args.v5.ucDigEncoderSel = (fe != -1) ? (1 << fe) : (1 << dig_encoder);
+			args.v5.ucDPLaneSet = lane_set;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+	atombios_dig_transmitter_setup2(encoder, action, lane_num, lane_set, -1);
+}
+
+bool
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	union dig_transmitter_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+	uint8_t frev, crev;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+		goto done;
+
+	if (!ASIC_IS_DCE4(rdev))
+		goto done;
+
+	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+		goto done;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		goto done;
+
+	memset(&args, 0, sizeof(args));
+
+	args.v1.ucAction = action;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* wait for the panel to power up */
+	if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+		int i;
+
+		for (i = 0; i < 300; i++) {
+			if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+				return true;
+			mdelay(1);
+		}
+		return false;
+	}
+done:
+	return true;
+}
+
+union external_encoder_control {
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+				struct drm_encoder *ext_encoder,
+				int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+	union external_encoder_control args;
+	struct drm_connector *connector;
+	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+	u8 frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		/* no params on frev 1 */
+		break;
+	case 2:
+		switch (crev) {
+		case 1:
+		case 2:
+			args.v1.sDigEncoder.ucAction = action;
+			args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+				args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v1.sDigEncoder.ucLaneNum = 8;
+			else
+				args.v1.sDigEncoder.ucLaneNum = 4;
+			break;
+		case 3:
+			args.v3.sExtEncoder.ucAction = action;
+			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+			else
+				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.sExtEncoder.ucLaneNum = 8;
+			else
+				args.v3.sExtEncoder.ucLaneNum = 4;
+			switch (ext_enum) {
+			case GRAPH_OBJECT_ENUM_ID1:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+				break;
+			case GRAPH_OBJECT_ENUM_ID2:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+				break;
+			case GRAPH_OBJECT_ENUM_ID3:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+				break;
+			}
+			args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			break;
+		default:
+			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	ENABLE_YUV_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+	uint32_t temp, reg;
+
+	memset(&args, 0, sizeof(args));
+
+	if (rdev->family >= CHIP_R600)
+		reg = R600_BIOS_3_SCRATCH;
+	else
+		reg = RADEON_BIOS_3_SCRATCH;
+
+	/* XXX: fix up scratch reg handling */
+	temp = RREG32(reg);
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+			     (radeon_crtc->crtc_id << 18)));
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+	else
+		WREG32(reg, 0);
+
+	if (enable)
+		args.ucEnable = ATOM_ENABLE;
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	WREG32(reg, temp);
+}
+
+static void
+radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+		break;
+	default:
+		return;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		args.ucAction = ATOM_ENABLE;
+		/* workaround for DVOOutputControl on some RS690 systems */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+			u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg);
+		} else
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			if (rdev->mode_info.bl_encoder) {
+				struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+				atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+			} else {
+				args.ucAction = ATOM_LCD_BLON;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			}
+		}
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		args.ucAction = ATOM_DISABLE;
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			args.ucAction = ATOM_LCD_BLOFF;
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		}
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_connector *radeon_connector = NULL;
+	struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+	bool travis_quirk = false;
+
+	if (connector) {
+		radeon_connector = to_radeon_connector(connector);
+		radeon_dig_connector = radeon_connector->con_priv;
+		if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+		     ENCODER_OBJECT_ID_TRAVIS) &&
+		    (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+		    !ASIC_IS_DCE5(rdev))
+			travis_quirk = true;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			if (!connector)
+				dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+			else
+				dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+			/* setup and enable the encoder */
+			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+			atombios_dig_encoder_setup(encoder,
+						   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+						   dig->panel_mode);
+			if (ext_encoder) {
+				if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+					atombios_external_encoder_setup(encoder, ext_encoder,
+									EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+			}
+		} else if (ASIC_IS_DCE4(rdev)) {
+			/* setup and enable the encoder */
+			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+		} else {
+			/* setup and enable the encoder and transmitter */
+			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+		}
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+				radeon_dig_connector->edp_on = true;
+			}
+		}
+		/* enable the transmitter */
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			/* DP_SET_POWER_D0 is set in radeon_dp_link_train */
+			radeon_dp_link_train(encoder, connector);
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			if (rdev->mode_info.bl_encoder)
+				atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+			else
+				atombios_dig_transmitter_setup(encoder,
+							       ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+		}
+		if (ext_encoder)
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+
+		/* don't power off encoders with active MST links */
+		if (dig->active_mst_links)
+			return;
+
+		if (ASIC_IS_DCE4(rdev)) {
+			if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+		}
+		if (ext_encoder)
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			atombios_dig_transmitter_setup(encoder,
+						       ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) &&
+		    connector && !travis_quirk)
+			radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
+		if (ASIC_IS_DCE4(rdev)) {
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder,
+						       ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		} else {
+			/* disable the encoder and transmitter */
+			atombios_dig_transmitter_setup(encoder,
+						       ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+		}
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (travis_quirk)
+				radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+				radeon_dig_connector->edp_on = false;
+			}
+		}
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	int encoder_mode = atombios_get_encoder_mode(encoder);
+
+	DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+		  radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+		  radeon_encoder->active_device);
+
+	if ((radeon_audio != 0) &&
+	    ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+	     ENCODER_MODE_IS_DP(encoder_mode)))
+		radeon_audio_dpms(encoder, mode);
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		radeon_atom_encoder_dpms_dig(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dvo_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dvo_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else if (ASIC_IS_DCE3(rdev))
+			radeon_atom_encoder_dpms_dig(encoder, mode);
+		else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dac_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dac_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	default:
+		return;
+	}
+
+	radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+union crtc_source_param {
+	SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+	SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	union crtc_source_param args;
+	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+	uint8_t frev, crev;
+	struct radeon_encoder_atom_dig *dig;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+		default:
+			if (ASIC_IS_AVIVO(rdev))
+				args.v1.ucCRTC = radeon_crtc->crtc_id;
+			else {
+				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+					args.v1.ucCRTC = radeon_crtc->crtc_id;
+				} else {
+					args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+				}
+			}
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+			case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+				if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+					args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+			case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+				break;
+			}
+			break;
+		case 2:
+			args.v2.ucCRTC = radeon_crtc->crtc_id;
+			if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+				else
+					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+			} else {
+				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			}
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				dig = radeon_encoder->enc_priv;
+				switch (dig->dig_encoder) {
+				case 0:
+					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+					break;
+				case 1:
+					args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					break;
+				case 2:
+					args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+					break;
+				case 3:
+					args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+					break;
+				case 4:
+					args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+					break;
+				case 5:
+					args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+					break;
+				case 6:
+					args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
+					break;
+				}
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+				break;
+			}
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* update scratch regs with new routing */
+	radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+void
+atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+	uint8_t frev, crev;
+	union crtc_source_param args;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	if (frev != 1 && crev != 2)
+		DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev);
+
+	args.v2.ucCRTC = radeon_crtc->crtc_id;
+	args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST;
+
+	switch (fe) {
+	case 0:
+		args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+		break;
+	case 1:
+		args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+		break;
+	case 2:
+		args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+		break;
+	case 3:
+		args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+		break;
+	case 4:
+		args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+		break;
+	case 5:
+		args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+		break;
+	case 6:
+		args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
+		break;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+	/* Funky macbooks */
+	if ((dev->pdev->device == 0x71C5) &&
+	    (dev->pdev->subsystem_vendor == 0x106b) &&
+	    (dev->pdev->subsystem_device == 0x0080)) {
+		if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+			uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+			WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+		}
+	}
+
+	/* set scaler clears this on some chips */
+	if (ASIC_IS_AVIVO(rdev) &&
+	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+		if (ASIC_IS_DCE8(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       CIK_INTERLEAVE_EN);
+			else
+				WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else if (ASIC_IS_DCE4(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       EVERGREEN_INTERLEAVE_EN);
+			else
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       AVIVO_D1MODE_INTERLEAVE_EN);
+			else
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		}
+	}
+}
+
+void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx)
+{
+	if (enc_idx < 0)
+		return;
+	rdev->mode_info.active_encoders &= ~(1 << enc_idx);
+}
+
+int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *test_encoder;
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t dig_enc_in_use = 0;
+	int enc_idx = -1;
+
+	if (fe_idx >= 0) {
+		enc_idx = fe_idx;
+		goto assigned;
+	}
+	if (ASIC_IS_DCE6(rdev)) {
+		/* DCE6 */
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			if (dig->linkb)
+				enc_idx = 1;
+			else
+				enc_idx = 0;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			if (dig->linkb)
+				enc_idx = 3;
+			else
+				enc_idx = 2;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->linkb)
+				enc_idx = 5;
+			else
+				enc_idx = 4;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+			enc_idx = 6;
+			break;
+		}
+		goto assigned;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* DCE4/5 */
+		if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
+			/* ontario follows DCE4 */
+			if (rdev->family == CHIP_PALM) {
+				if (dig->linkb)
+					enc_idx = 1;
+				else
+					enc_idx = 0;
+			} else
+				/* llano follows DCE3.2 */
+				enc_idx = radeon_crtc->crtc_id;
+		} else {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					enc_idx = 1;
+				else
+					enc_idx = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					enc_idx = 3;
+				else
+					enc_idx = 2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					enc_idx = 5;
+				else
+					enc_idx = 4;
+				break;
+			}
+		}
+		goto assigned;
+	}
+
+	/*
+	 * On DCE32 any encoder can drive any block so usually just use crtc id,
+	 * but Apple thinks different at least on iMac10,1, so there use linkb,
+	 * otherwise the internal eDP panel will stay dark.
+	 */
+	if (ASIC_IS_DCE32(rdev)) {
+		if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+			enc_idx = (dig->linkb) ? 1 : 0;
+		else
+			enc_idx = radeon_crtc->crtc_id;
+
+		goto assigned;
+	}
+
+	/* on DCE3 - LVTMA can only be driven by DIGB */
+	list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_test_encoder;
+
+		if (encoder == test_encoder)
+			continue;
+
+		if (!radeon_encoder_is_digital(test_encoder))
+			continue;
+
+		radeon_test_encoder = to_radeon_encoder(test_encoder);
+		dig = radeon_test_encoder->enc_priv;
+
+		if (dig->dig_encoder >= 0)
+			dig_enc_in_use |= (1 << dig->dig_encoder);
+	}
+
+	if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+		if (dig_enc_in_use & 0x2)
+			DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+		return 1;
+	}
+	if (!(dig_enc_in_use & 1))
+		return 0;
+	return 1;
+
+assigned:
+	if (enc_idx == -1) {
+		DRM_ERROR("Got encoder index incorrect - returning 0\n");
+		return 0;
+	}
+	if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
+		DRM_ERROR("chosen encoder in use %d\n", enc_idx);
+	}
+	rdev->mode_info.active_encoders |= (1 << enc_idx);
+	return enc_idx;
+}
+
+/* This only needs to be called once at startup */
+void
+radeon_atom_encoder_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+			break;
+		default:
+			break;
+		}
+
+		if (ext_encoder && (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)))
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+	}
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int encoder_mode;
+
+	radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+	/* need to call this here rather than in prepare() since we need some crtc info */
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+		if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+			atombios_yuv_setup(encoder, true);
+		else
+			atombios_yuv_setup(encoder, false);
+	}
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		/* handled in dpms */
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_ENABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+				atombios_tv_setup(encoder, ATOM_ENABLE);
+			else
+				atombios_tv_setup(encoder, ATOM_DISABLE);
+		}
+		break;
+	}
+
+	atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+	encoder_mode = atombios_get_encoder_mode(encoder);
+	if (connector && (radeon_audio != 0) &&
+	    ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+	     ENCODER_MODE_IS_DP(encoder_mode)))
+		radeon_audio_mode_set(encoder, adjusted_mode);
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+				       ATOM_DEVICE_CV_SUPPORT |
+				       ATOM_DEVICE_CRT_SUPPORT)) {
+		DAC_LOAD_DETECTION_PS_ALLOCATION args;
+		int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+		uint8_t frev, crev;
+
+		memset(&args, 0, sizeof(args));
+
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+			return false;
+
+		args.sDacload.ucMisc = 0;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+			args.sDacload.ucDacType = ATOM_DAC_A;
+		else
+			args.sDacload.ucDacType = ATOM_DAC_B;
+
+		if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		}
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		return true;
+	} else
+		return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	uint32_t bios_0_scratch;
+
+	if (!atombios_dac_load_detect(encoder, connector)) {
+		DRM_DEBUG_KMS("detect returned false \n");
+		return connector_status_unknown;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+	else
+		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+	u32 bios_0_scratch;
+
+	if (!ASIC_IS_DCE4(rdev))
+		return connector_status_unknown;
+
+	if (!ext_encoder)
+		return connector_status_unknown;
+
+	if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+		return connector_status_unknown;
+
+	/* load detect on the dp bridge */
+	atombios_external_encoder_setup(encoder, ext_encoder,
+					EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+	bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	if (ext_encoder)
+		/* ddc_setup on the dp bridge */
+		atombios_external_encoder_setup(encoder, ext_encoder,
+						EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+	if ((radeon_encoder->active_device &
+	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+	     ENCODER_OBJECT_ID_NONE)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		if (dig) {
+			if (dig->dig_encoder >= 0)
+				radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
+			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder, -1);
+			if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+				if (rdev->family >= CHIP_R600)
+					dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+				else
+					/* RS600/690/740 have only 1 afmt block */
+					dig->afmt = rdev->mode_info.afmt[0];
+			}
+		}
+	}
+
+	radeon_atom_output_lock(encoder, true);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		/* select the clock/data port if it uses a router */
+		if (radeon_connector->router.cd_valid)
+			radeon_router_select_cd_port(radeon_connector);
+
+		/* turn eDP panel on for mode set */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+	}
+
+	/* this is needed for the pll/ss setup to work correctly in some cases */
+	atombios_set_encoder_crtc_source(encoder);
+	/* set up the FMT blocks */
+	if (ASIC_IS_DCE8(rdev))
+		dce8_program_fmt(encoder);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_program_fmt(encoder);
+	else if (ASIC_IS_DCE3(rdev))
+		dce3_program_fmt(encoder);
+	else if (ASIC_IS_AVIVO(rdev))
+		avivo_program_fmt(encoder);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+	/* need to call this here as we need the crtc set up */
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+	radeon_atom_output_lock(encoder, false);
+}
+
+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+
+	/* check for pre-DCE3 cards with shared encoders;
+	 * can't really use the links individually, so don't disable
+	 * the encoder if it's in use by another connector
+	 */
+	if (!ASIC_IS_DCE3(rdev)) {
+		struct drm_encoder *other_encoder;
+		struct radeon_encoder *other_radeon_encoder;
+
+		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+			other_radeon_encoder = to_radeon_encoder(other_encoder);
+			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+			    drm_helper_encoder_in_use(other_encoder))
+				goto disable_done;
+		}
+	}
+
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		/* handled in dpms */
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_DISABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+			atombios_tv_setup(encoder, ATOM_DISABLE);
+		break;
+	}
+
+disable_done:
+	if (radeon_encoder_is_digital(encoder)) {
+		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+			if (rdev->asic->display.hdmi_enable)
+				radeon_hdmi_enable(rdev, encoder, false);
+		}
+		if (atombios_get_encoder_mode(encoder) != ATOM_ENCODER_MODE_DP_MST) {
+			dig = radeon_encoder->enc_priv;
+			radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
+			dig->dig_encoder = -1;
+			radeon_encoder->active_device = 0;
+		}
+	} else
+		radeon_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+	.dpms = radeon_atom_ext_dpms,
+	.prepare = radeon_atom_ext_prepare,
+	.mode_set = radeon_atom_ext_mode_set,
+	.commit = radeon_atom_ext_commit,
+	.disable = radeon_atom_ext_disable,
+	/* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.disable = radeon_atom_encoder_disable,
+	.detect = radeon_atom_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_atom_backlight_exit(radeon_encoder);
+	kfree(radeon_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static struct radeon_encoder_atom_dac *
+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
+
+	if (!dac)
+		return NULL;
+
+	dac->tv_std = radeon_atombios_get_tv_info(rdev);
+	return dac;
+}
+
+static struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+	int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+	if (!dig)
+		return NULL;
+
+	/* coherent mode by default */
+	dig->coherent_mode = true;
+	dig->dig_encoder = -1;
+
+	if (encoder_enum == 2)
+		dig->linkb = true;
+	else
+		dig->linkb = false;
+
+	return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev,
+			uint32_t encoder_enum,
+			uint32_t supported_device,
+			u16 caps)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->encoder_enum == encoder_enum) {
+			radeon_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+	if (!radeon_encoder)
+		return;
+
+	encoder = &radeon_encoder->base;
+	switch (rdev->num_crtc) {
+	case 1:
+		encoder->possible_crtcs = 0x1;
+		break;
+	case 2:
+	default:
+		encoder->possible_crtcs = 0x3;
+		break;
+	case 4:
+		encoder->possible_crtcs = 0xf;
+		break;
+	case 6:
+		encoder->possible_crtcs = 0x3f;
+		break;
+	}
+
+	radeon_encoder->enc_priv = NULL;
+
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	radeon_encoder->devices = supported_device;
+	radeon_encoder->rmx_type = RMX_OFF;
+	radeon_encoder->underscan_type = UNDERSCAN_OFF;
+	radeon_encoder->is_ext_encoder = false;
+	radeon_encoder->caps = caps;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_LVDS, NULL);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_TMDS, NULL);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+				 DRM_MODE_ENCODER_DAC, NULL);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+				 DRM_MODE_ENCODER_TVDAC, NULL);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_LVDS, NULL);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_DAC, NULL);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_TMDS, NULL);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_SI170B:
+	case ENCODER_OBJECT_ID_CH7303:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+	case ENCODER_OBJECT_ID_TITFP513:
+	case ENCODER_OBJECT_ID_VT1623:
+	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
+		/* these are handled by the primary encoders */
+		radeon_encoder->is_ext_encoder = true;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_LVDS, NULL);
+		else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_DAC, NULL);
+		else
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+					 DRM_MODE_ENCODER_TMDS, NULL);
+		drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+		break;
+	}
+}
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
new file mode 100644
index 0000000..4157780
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+#define TARGET_HW_I2C_CLOCK 50
+
+/* these are a limitation of ProcessI2cChannelTransaction not the hw */
+#define ATOM_MAX_HW_I2C_WRITE 3
+#define ATOM_MAX_HW_I2C_READ  255
+
+static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+				 u8 slave_addr, u8 flags,
+				 u8 *buf, u8 num)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+	unsigned char *base;
+	u16 out = cpu_to_le16(0);
+	int r = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	mutex_lock(&chan->mutex);
+	mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
+
+	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+	if (flags & HW_I2C_WRITE) {
+		if (num > ATOM_MAX_HW_I2C_WRITE) {
+			DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
+			r = -EINVAL;
+			goto done;
+		}
+		if (buf == NULL)
+			args.ucRegIndex = 0;
+		else
+			args.ucRegIndex = buf[0];
+		if (num)
+			num--;
+		if (num)
+			memcpy(&out, &buf[1], num);
+		args.lpI2CDataOut = cpu_to_le16(out);
+	} else {
+		if (num > ATOM_MAX_HW_I2C_READ) {
+			DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
+			r = -EINVAL;
+			goto done;
+		}
+		args.ucRegIndex = 0;
+		args.lpI2CDataOut = 0;
+	}
+
+	args.ucFlag = flags;
+	args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+	args.ucTransBytes = num;
+	args.ucSlaveAddr = slave_addr << 1;
+	args.ucLineNumber = chan->rec.i2c_id;
+
+	atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* error */
+	if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
+		DRM_DEBUG_KMS("hw_i2c error\n");
+		r = -EIO;
+		goto done;
+	}
+
+	if (!(flags & HW_I2C_WRITE))
+		radeon_atom_copy_swap(buf, base, num, false);
+
+done:
+	mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
+	mutex_unlock(&chan->mutex);
+
+	return r;
+}
+
+int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			    struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct i2c_msg *p;
+	int i, remaining, current_count, buffer_offset, max_bytes, ret;
+	u8 flags;
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		ret = radeon_process_i2c_ch(i2c,
+					    p->addr, HW_I2C_WRITE,
+					    NULL, 0);
+		if (ret)
+			return ret;
+		else
+			return num;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		remaining = p->len;
+		buffer_offset = 0;
+		/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
+		if (p->flags & I2C_M_RD) {
+			max_bytes = ATOM_MAX_HW_I2C_READ;
+			flags = HW_I2C_READ;
+		} else {
+			max_bytes = ATOM_MAX_HW_I2C_WRITE;
+			flags = HW_I2C_WRITE;
+		}
+		while (remaining) {
+			if (remaining > max_bytes)
+				current_count = max_bytes;
+			else
+				current_count = remaining;
+			ret = radeon_process_i2c_ch(i2c,
+						    p->addr, flags,
+						    &p->buf[buffer_offset], current_count);
+			if (ret)
+				return ret;
+			remaining -= current_count;
+			buffer_offset += current_count;
+		}
+	}
+
+	return num;
+}
+
+u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
new file mode 100644
index 0000000..3c391e7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef AVIVOD_H
+#define AVIVOD_H
+
+
+#define	D1CRTC_CONTROL					0x6080
+#define		CRTC_EN						(1 << 0)
+#define	D1CRTC_STATUS					0x609c
+#define	D1CRTC_UPDATE_LOCK				0x60E8
+#define	D1GRPH_PRIMARY_SURFACE_ADDRESS			0x6110
+#define	D1GRPH_SECONDARY_SURFACE_ADDRESS		0x6118
+
+#define	D2CRTC_CONTROL					0x6880
+#define	D2CRTC_STATUS					0x689c
+#define	D2CRTC_UPDATE_LOCK				0x68E8
+#define	D2GRPH_PRIMARY_SURFACE_ADDRESS			0x6910
+#define	D2GRPH_SECONDARY_SURFACE_ADDRESS		0x6918
+
+#define	D1VGA_CONTROL					0x0330
+#define		DVGA_CONTROL_MODE_ENABLE			(1 << 0)
+#define		DVGA_CONTROL_TIMING_SELECT			(1 << 8)
+#define		DVGA_CONTROL_SYNC_POLARITY_SELECT		(1 << 9)
+#define		DVGA_CONTROL_OVERSCAN_TIMING_SELECT		(1 << 10)
+#define		DVGA_CONTROL_OVERSCAN_COLOR_EN			(1 << 16)
+#define		DVGA_CONTROL_ROTATE				(1 << 24)
+#define D2VGA_CONTROL					0x0338
+
+#define	VGA_HDP_CONTROL					0x328
+#define		VGA_MEM_PAGE_SELECT_EN				(1 << 0)
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+#define		VGA_RBBM_LOCK_DISABLE				(1 << 8)
+#define		VGA_SOFT_RESET					(1 << 16)
+#define	VGA_MEMORY_BASE_ADDRESS				0x0310
+#define	VGA_RENDER_CONTROL				0x0300
+#define		VGA_VSTATUS_CNTL_MASK				0x00030000
+
+#endif
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
new file mode 100644
index 0000000..0aef493
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -0,0 +1,2826 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "btcd.h"
+#include "r600_dpm.h"
+#include "cypress_dpm.h"
+#include "btc_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0       0x05
+#define MC_CG_SEQ_DRAMCONF_S1       0x06
+#define MC_CG_SEQ_YCLK_SUSPEND      0x04
+#define MC_CG_SEQ_YCLK_RESUME       0x0a
+
+#define SMC_RAM_END 0x8000
+
+#ifndef BTC_MGCG_SEQUENCE
+#define BTC_MGCG_SEQUENCE  300
+
+struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
+struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
+struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
+//********* BARTS **************//
+static const u32 barts_cgcg_cgls_default[] =
+{
+	/* Register,   Value,     Mask bits */
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32))
+
+static const u32 barts_cgcg_cgls_disable[] =
+{
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00000644, 0x000f7912, 0x001f4180,
+	0x00000644, 0x000f3812, 0x001f4180
+};
+#define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32))
+
+static const u32 barts_cgcg_cgls_enable[] =
+{
+	/* 0x0000c124, 0x84180000, 0x00180000, */
+	0x00000644, 0x000f7892, 0x001f4080,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff
+};
+#define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32))
+
+static const u32 barts_mgcg_default[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x00005448, 0x00000100, 0xffffffff,
+	0x000055e4, 0x00600100, 0xffffffff,
+	0x0000160c, 0x00000100, 0xffffffff,
+	0x0000c164, 0x00000100, 0xffffffff,
+	0x00008a18, 0x00000100, 0xffffffff,
+	0x0000897c, 0x06000100, 0xffffffff,
+	0x00008b28, 0x00000100, 0xffffffff,
+	0x00009144, 0x00000100, 0xffffffff,
+	0x00009a60, 0x00000100, 0xffffffff,
+	0x00009868, 0x00000100, 0xffffffff,
+	0x00008d58, 0x00000100, 0xffffffff,
+	0x00009510, 0x00000100, 0xffffffff,
+	0x0000949c, 0x00000100, 0xffffffff,
+	0x00009654, 0x00000100, 0xffffffff,
+	0x00009030, 0x00000100, 0xffffffff,
+	0x00009034, 0x00000100, 0xffffffff,
+	0x00009038, 0x00000100, 0xffffffff,
+	0x0000903c, 0x00000100, 0xffffffff,
+	0x00009040, 0x00000100, 0xffffffff,
+	0x0000a200, 0x00000100, 0xffffffff,
+	0x0000a204, 0x00000100, 0xffffffff,
+	0x0000a208, 0x00000100, 0xffffffff,
+	0x0000a20c, 0x00000100, 0xffffffff,
+	0x0000977c, 0x00000100, 0xffffffff,
+	0x00003f80, 0x00000100, 0xffffffff,
+	0x0000a210, 0x00000100, 0xffffffff,
+	0x0000a214, 0x00000100, 0xffffffff,
+	0x000004d8, 0x00000100, 0xffffffff,
+	0x00009784, 0x00000100, 0xffffffff,
+	0x00009698, 0x00000100, 0xffffffff,
+	0x000004d4, 0x00000200, 0xffffffff,
+	0x000004d0, 0x00000000, 0xffffffff,
+	0x000030cc, 0x00000100, 0xffffffff,
+	0x0000d0c0, 0xff000100, 0xffffffff,
+	0x0000802c, 0x40000000, 0xffffffff,
+	0x0000915c, 0x00010000, 0xffffffff,
+	0x00009160, 0x00030002, 0xffffffff,
+	0x00009164, 0x00050004, 0xffffffff,
+	0x00009168, 0x00070006, 0xffffffff,
+	0x00009178, 0x00070000, 0xffffffff,
+	0x0000917c, 0x00030002, 0xffffffff,
+	0x00009180, 0x00050004, 0xffffffff,
+	0x0000918c, 0x00010006, 0xffffffff,
+	0x00009190, 0x00090008, 0xffffffff,
+	0x00009194, 0x00070000, 0xffffffff,
+	0x00009198, 0x00030002, 0xffffffff,
+	0x0000919c, 0x00050004, 0xffffffff,
+	0x000091a8, 0x00010006, 0xffffffff,
+	0x000091ac, 0x00090008, 0xffffffff,
+	0x000091b0, 0x00070000, 0xffffffff,
+	0x000091b4, 0x00030002, 0xffffffff,
+	0x000091b8, 0x00050004, 0xffffffff,
+	0x000091c4, 0x00010006, 0xffffffff,
+	0x000091c8, 0x00090008, 0xffffffff,
+	0x000091cc, 0x00070000, 0xffffffff,
+	0x000091d0, 0x00030002, 0xffffffff,
+	0x000091d4, 0x00050004, 0xffffffff,
+	0x000091e0, 0x00010006, 0xffffffff,
+	0x000091e4, 0x00090008, 0xffffffff,
+	0x000091e8, 0x00000000, 0xffffffff,
+	0x000091ec, 0x00070000, 0xffffffff,
+	0x000091f0, 0x00030002, 0xffffffff,
+	0x000091f4, 0x00050004, 0xffffffff,
+	0x00009200, 0x00010006, 0xffffffff,
+	0x00009204, 0x00090008, 0xffffffff,
+	0x00009208, 0x00070000, 0xffffffff,
+	0x0000920c, 0x00030002, 0xffffffff,
+	0x00009210, 0x00050004, 0xffffffff,
+	0x0000921c, 0x00010006, 0xffffffff,
+	0x00009220, 0x00090008, 0xffffffff,
+	0x00009224, 0x00070000, 0xffffffff,
+	0x00009228, 0x00030002, 0xffffffff,
+	0x0000922c, 0x00050004, 0xffffffff,
+	0x00009238, 0x00010006, 0xffffffff,
+	0x0000923c, 0x00090008, 0xffffffff,
+	0x00009294, 0x00000000, 0xffffffff,
+	0x0000802c, 0x40010000, 0xffffffff,
+	0x0000915c, 0x00010000, 0xffffffff,
+	0x00009160, 0x00030002, 0xffffffff,
+	0x00009164, 0x00050004, 0xffffffff,
+	0x00009168, 0x00070006, 0xffffffff,
+	0x00009178, 0x00070000, 0xffffffff,
+	0x0000917c, 0x00030002, 0xffffffff,
+	0x00009180, 0x00050004, 0xffffffff,
+	0x0000918c, 0x00010006, 0xffffffff,
+	0x00009190, 0x00090008, 0xffffffff,
+	0x00009194, 0x00070000, 0xffffffff,
+	0x00009198, 0x00030002, 0xffffffff,
+	0x0000919c, 0x00050004, 0xffffffff,
+	0x000091a8, 0x00010006, 0xffffffff,
+	0x000091ac, 0x00090008, 0xffffffff,
+	0x000091b0, 0x00070000, 0xffffffff,
+	0x000091b4, 0x00030002, 0xffffffff,
+	0x000091b8, 0x00050004, 0xffffffff,
+	0x000091c4, 0x00010006, 0xffffffff,
+	0x000091c8, 0x00090008, 0xffffffff,
+	0x000091cc, 0x00070000, 0xffffffff,
+	0x000091d0, 0x00030002, 0xffffffff,
+	0x000091d4, 0x00050004, 0xffffffff,
+	0x000091e0, 0x00010006, 0xffffffff,
+	0x000091e4, 0x00090008, 0xffffffff,
+	0x000091e8, 0x00000000, 0xffffffff,
+	0x000091ec, 0x00070000, 0xffffffff,
+	0x000091f0, 0x00030002, 0xffffffff,
+	0x000091f4, 0x00050004, 0xffffffff,
+	0x00009200, 0x00010006, 0xffffffff,
+	0x00009204, 0x00090008, 0xffffffff,
+	0x00009208, 0x00070000, 0xffffffff,
+	0x0000920c, 0x00030002, 0xffffffff,
+	0x00009210, 0x00050004, 0xffffffff,
+	0x0000921c, 0x00010006, 0xffffffff,
+	0x00009220, 0x00090008, 0xffffffff,
+	0x00009224, 0x00070000, 0xffffffff,
+	0x00009228, 0x00030002, 0xffffffff,
+	0x0000922c, 0x00050004, 0xffffffff,
+	0x00009238, 0x00010006, 0xffffffff,
+	0x0000923c, 0x00090008, 0xffffffff,
+	0x00009294, 0x00000000, 0xffffffff,
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32))
+
+static const u32 barts_mgcg_disable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x00009150, 0x00600000, 0xffffffff
+};
+#define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32))
+
+static const u32 barts_mgcg_enable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00009150, 0x81944000, 0xffffffff
+};
+#define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32))
+
+//********* CAICOS **************//
+static const u32 caicos_cgcg_cgls_default[] =
+{
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32))
+
+static const u32 caicos_cgcg_cgls_disable[] =
+{
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00000644, 0x000f7912, 0x001f4180,
+	0x00000644, 0x000f3812, 0x001f4180
+};
+#define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32))
+
+static const u32 caicos_cgcg_cgls_enable[] =
+{
+	/* 0x0000c124, 0x84180000, 0x00180000, */
+	0x00000644, 0x000f7892, 0x001f4080,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff
+};
+#define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32))
+
+static const u32 caicos_mgcg_default[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x00005448, 0x00000100, 0xffffffff,
+	0x000055e4, 0x00600100, 0xffffffff,
+	0x0000160c, 0x00000100, 0xffffffff,
+	0x0000c164, 0x00000100, 0xffffffff,
+	0x00008a18, 0x00000100, 0xffffffff,
+	0x0000897c, 0x06000100, 0xffffffff,
+	0x00008b28, 0x00000100, 0xffffffff,
+	0x00009144, 0x00000100, 0xffffffff,
+	0x00009a60, 0x00000100, 0xffffffff,
+	0x00009868, 0x00000100, 0xffffffff,
+	0x00008d58, 0x00000100, 0xffffffff,
+	0x00009510, 0x00000100, 0xffffffff,
+	0x0000949c, 0x00000100, 0xffffffff,
+	0x00009654, 0x00000100, 0xffffffff,
+	0x00009030, 0x00000100, 0xffffffff,
+	0x00009034, 0x00000100, 0xffffffff,
+	0x00009038, 0x00000100, 0xffffffff,
+	0x0000903c, 0x00000100, 0xffffffff,
+	0x00009040, 0x00000100, 0xffffffff,
+	0x0000a200, 0x00000100, 0xffffffff,
+	0x0000a204, 0x00000100, 0xffffffff,
+	0x0000a208, 0x00000100, 0xffffffff,
+	0x0000a20c, 0x00000100, 0xffffffff,
+	0x0000977c, 0x00000100, 0xffffffff,
+	0x00003f80, 0x00000100, 0xffffffff,
+	0x0000a210, 0x00000100, 0xffffffff,
+	0x0000a214, 0x00000100, 0xffffffff,
+	0x000004d8, 0x00000100, 0xffffffff,
+	0x00009784, 0x00000100, 0xffffffff,
+	0x00009698, 0x00000100, 0xffffffff,
+	0x000004d4, 0x00000200, 0xffffffff,
+	0x000004d0, 0x00000000, 0xffffffff,
+	0x000030cc, 0x00000100, 0xffffffff,
+	0x0000d0c0, 0xff000100, 0xffffffff,
+	0x0000915c, 0x00010000, 0xffffffff,
+	0x00009160, 0x00030002, 0xffffffff,
+	0x00009164, 0x00050004, 0xffffffff,
+	0x00009168, 0x00070006, 0xffffffff,
+	0x00009178, 0x00070000, 0xffffffff,
+	0x0000917c, 0x00030002, 0xffffffff,
+	0x00009180, 0x00050004, 0xffffffff,
+	0x0000918c, 0x00010006, 0xffffffff,
+	0x00009190, 0x00090008, 0xffffffff,
+	0x00009194, 0x00070000, 0xffffffff,
+	0x00009198, 0x00030002, 0xffffffff,
+	0x0000919c, 0x00050004, 0xffffffff,
+	0x000091a8, 0x00010006, 0xffffffff,
+	0x000091ac, 0x00090008, 0xffffffff,
+	0x000091e8, 0x00000000, 0xffffffff,
+	0x00009294, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32))
+
+static const u32 caicos_mgcg_disable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x00009150, 0x00600000, 0xffffffff
+};
+#define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32))
+
+static const u32 caicos_mgcg_enable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00009150, 0x46944040, 0xffffffff
+};
+#define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32))
+
+//********* TURKS **************//
+static const u32 turks_cgcg_cgls_default[] =
+{
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define TURKS_CGCG_CGLS_DEFAULT_LENGTH  sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32))
+
+static const u32 turks_cgcg_cgls_disable[] =
+{
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00000644, 0x000f7912, 0x001f4180,
+	0x00000644, 0x000f3812, 0x001f4180
+};
+#define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32))
+
+static const u32 turks_cgcg_cgls_enable[] =
+{
+	/* 0x0000c124, 0x84180000, 0x00180000, */
+	0x00000644, 0x000f7892, 0x001f4080,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff
+};
+#define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32))
+
+// These are the sequences for turks_mgcg_shls
+static const u32 turks_mgcg_default[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x00005448, 0x00000100, 0xffffffff,
+	0x000055e4, 0x00600100, 0xffffffff,
+	0x0000160c, 0x00000100, 0xffffffff,
+	0x0000c164, 0x00000100, 0xffffffff,
+	0x00008a18, 0x00000100, 0xffffffff,
+	0x0000897c, 0x06000100, 0xffffffff,
+	0x00008b28, 0x00000100, 0xffffffff,
+	0x00009144, 0x00000100, 0xffffffff,
+	0x00009a60, 0x00000100, 0xffffffff,
+	0x00009868, 0x00000100, 0xffffffff,
+	0x00008d58, 0x00000100, 0xffffffff,
+	0x00009510, 0x00000100, 0xffffffff,
+	0x0000949c, 0x00000100, 0xffffffff,
+	0x00009654, 0x00000100, 0xffffffff,
+	0x00009030, 0x00000100, 0xffffffff,
+	0x00009034, 0x00000100, 0xffffffff,
+	0x00009038, 0x00000100, 0xffffffff,
+	0x0000903c, 0x00000100, 0xffffffff,
+	0x00009040, 0x00000100, 0xffffffff,
+	0x0000a200, 0x00000100, 0xffffffff,
+	0x0000a204, 0x00000100, 0xffffffff,
+	0x0000a208, 0x00000100, 0xffffffff,
+	0x0000a20c, 0x00000100, 0xffffffff,
+	0x0000977c, 0x00000100, 0xffffffff,
+	0x00003f80, 0x00000100, 0xffffffff,
+	0x0000a210, 0x00000100, 0xffffffff,
+	0x0000a214, 0x00000100, 0xffffffff,
+	0x000004d8, 0x00000100, 0xffffffff,
+	0x00009784, 0x00000100, 0xffffffff,
+	0x00009698, 0x00000100, 0xffffffff,
+	0x000004d4, 0x00000200, 0xffffffff,
+	0x000004d0, 0x00000000, 0xffffffff,
+	0x000030cc, 0x00000100, 0xffffffff,
+	0x0000d0c0, 0x00000100, 0xffffffff,
+	0x0000915c, 0x00010000, 0xffffffff,
+	0x00009160, 0x00030002, 0xffffffff,
+	0x00009164, 0x00050004, 0xffffffff,
+	0x00009168, 0x00070006, 0xffffffff,
+	0x00009178, 0x00070000, 0xffffffff,
+	0x0000917c, 0x00030002, 0xffffffff,
+	0x00009180, 0x00050004, 0xffffffff,
+	0x0000918c, 0x00010006, 0xffffffff,
+	0x00009190, 0x00090008, 0xffffffff,
+	0x00009194, 0x00070000, 0xffffffff,
+	0x00009198, 0x00030002, 0xffffffff,
+	0x0000919c, 0x00050004, 0xffffffff,
+	0x000091a8, 0x00010006, 0xffffffff,
+	0x000091ac, 0x00090008, 0xffffffff,
+	0x000091b0, 0x00070000, 0xffffffff,
+	0x000091b4, 0x00030002, 0xffffffff,
+	0x000091b8, 0x00050004, 0xffffffff,
+	0x000091c4, 0x00010006, 0xffffffff,
+	0x000091c8, 0x00090008, 0xffffffff,
+	0x000091cc, 0x00070000, 0xffffffff,
+	0x000091d0, 0x00030002, 0xffffffff,
+	0x000091d4, 0x00050004, 0xffffffff,
+	0x000091e0, 0x00010006, 0xffffffff,
+	0x000091e4, 0x00090008, 0xffffffff,
+	0x000091e8, 0x00000000, 0xffffffff,
+	0x000091ec, 0x00070000, 0xffffffff,
+	0x000091f0, 0x00030002, 0xffffffff,
+	0x000091f4, 0x00050004, 0xffffffff,
+	0x00009200, 0x00010006, 0xffffffff,
+	0x00009204, 0x00090008, 0xffffffff,
+	0x00009208, 0x00070000, 0xffffffff,
+	0x0000920c, 0x00030002, 0xffffffff,
+	0x00009210, 0x00050004, 0xffffffff,
+	0x0000921c, 0x00010006, 0xffffffff,
+	0x00009220, 0x00090008, 0xffffffff,
+	0x00009294, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32))
+
+static const u32 turks_mgcg_disable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x00009150, 0x00600000, 0xffffffff
+};
+#define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32))
+
+static const u32 turks_mgcg_enable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00009150, 0x6e944000, 0xffffffff
+};
+#define TURKS_MGCG_ENABLE_LENGTH sizeof(turks_mgcg_enable) / (3 * sizeof(u32))
+
+#endif
+
+#ifndef BTC_SYSLS_SEQUENCE
+#define BTC_SYSLS_SEQUENCE  100
+
+
+//********* BARTS **************//
+static const u32 barts_sysls_default[] =
+{
+	/* Register,   Value,     Mask bits */
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff
+};
+#define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32))
+
+static const u32 barts_sysls_disable[] =
+{
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x00041401, 0xffffffff,
+	0x0000264c, 0x00040400, 0xffffffff,
+	0x00002648, 0x00040400, 0xffffffff,
+	0x00002650, 0x00040400, 0xffffffff,
+	0x000020b8, 0x00040400, 0xffffffff,
+	0x000020bc, 0x00040400, 0xffffffff,
+	0x000020c0, 0x00040c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680000, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00007ffd, 0xffffffff,
+	0x00000c7c, 0x0000ff00, 0xffffffff,
+	0x00006dfc, 0x0000007f, 0xffffffff
+};
+#define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32))
+
+static const u32 barts_sysls_enable[] =
+{
+	0x000055e8, 0x00000001, 0xffffffff,
+	0x0000d0bc, 0x00000100, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x000004c8, 0x00000000, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff
+};
+#define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32))
+
+//********* CAICOS **************//
+static const u32 caicos_sysls_default[] =
+{
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff
+};
+#define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32))
+
+static const u32 caicos_sysls_disable[] =
+{
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x00041401, 0xffffffff,
+	0x0000264c, 0x00040400, 0xffffffff,
+	0x00002648, 0x00040400, 0xffffffff,
+	0x00002650, 0x00040400, 0xffffffff,
+	0x000020b8, 0x00040400, 0xffffffff,
+	0x000020bc, 0x00040400, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680000, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00007ffd, 0xffffffff,
+	0x00000c7c, 0x0000ff00, 0xffffffff,
+	0x00006dfc, 0x0000007f, 0xffffffff
+};
+#define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32))
+
+static const u32 caicos_sysls_enable[] =
+{
+	0x000055e8, 0x00000001, 0xffffffff,
+	0x0000d0bc, 0x00000100, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff,
+	0x000004c8, 0x00000000, 0xffffffff
+};
+#define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32))
+
+//********* TURKS **************//
+static const u32 turks_sysls_default[] =
+{
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff
+};
+#define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32))
+
+static const u32 turks_sysls_disable[] =
+{
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x00041401, 0xffffffff,
+	0x0000264c, 0x00040400, 0xffffffff,
+	0x00002648, 0x00040400, 0xffffffff,
+	0x00002650, 0x00040400, 0xffffffff,
+	0x000020b8, 0x00040400, 0xffffffff,
+	0x000020bc, 0x00040400, 0xffffffff,
+	0x000020c0, 0x00040c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680000, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00007ffd, 0xffffffff,
+	0x00000c7c, 0x0000ff00, 0xffffffff,
+	0x00006dfc, 0x0000007f, 0xffffffff
+};
+#define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32))
+
+static const u32 turks_sysls_enable[] =
+{
+	0x000055e8, 0x00000001, 0xffffffff,
+	0x0000d0bc, 0x00000100, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x000004c8, 0x00000000, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff
+};
+#define TURKS_SYSLS_ENABLE_LENGTH sizeof(turks_sysls_enable) / (3 * sizeof(u32))
+
+#endif
+
+u32 btc_valid_sclk[40] =
+{
+	5000,   10000,  15000,  20000,  25000,  30000,  35000,  40000,  45000,  50000,
+	55000,  60000,  65000,  70000,  75000,  80000,  85000,  90000,  95000,  100000,
+	105000, 110000, 11500,  120000, 125000, 130000, 135000, 140000, 145000, 150000,
+	155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000
+};
+
+static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = {
+	{ 10000, 30000, RADEON_SCLK_UP },
+	{ 15000, 30000, RADEON_SCLK_UP },
+	{ 20000, 30000, RADEON_SCLK_UP },
+	{ 25000, 30000, RADEON_SCLK_UP }
+};
+
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+						     u32 *max_clock)
+{
+	u32 i, clock = 0;
+
+	if ((table == NULL) || (table->count == 0)) {
+		*max_clock = clock;
+		return;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if (clock < table->entries[i].clk)
+			clock = table->entries[i].clk;
+	}
+	*max_clock = clock;
+}
+
+void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
+					u32 clock, u16 max_voltage, u16 *voltage)
+{
+	u32 i;
+
+	if ((table == NULL) || (table->count == 0))
+		return;
+
+	for (i= 0; i < table->count; i++) {
+		if (clock <= table->entries[i].clk) {
+			if (*voltage < table->entries[i].v)
+				*voltage = (u16)((table->entries[i].v < max_voltage) ?
+						  table->entries[i].v : max_voltage);
+			return;
+		}
+	}
+
+	*voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+}
+
+static u32 btc_find_valid_clock(struct radeon_clock_array *clocks,
+				u32 max_clock, u32 requested_clock)
+{
+	unsigned int i;
+
+	if ((clocks == NULL) || (clocks->count == 0))
+		return (requested_clock < max_clock) ? requested_clock : max_clock;
+
+	for (i = 0; i < clocks->count; i++) {
+		if (clocks->values[i] >= requested_clock)
+			return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+	}
+
+	return (clocks->values[clocks->count - 1] < max_clock) ?
+		clocks->values[clocks->count - 1] : max_clock;
+}
+
+static u32 btc_get_valid_mclk(struct radeon_device *rdev,
+			      u32 max_mclk, u32 requested_mclk)
+{
+	return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_mclk_values,
+				    max_mclk, requested_mclk);
+}
+
+static u32 btc_get_valid_sclk(struct radeon_device *rdev,
+			      u32 max_sclk, u32 requested_sclk)
+{
+	return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_sclk_values,
+				    max_sclk, requested_sclk);
+}
+
+void btc_skip_blacklist_clocks(struct radeon_device *rdev,
+			       const u32 max_sclk, const u32 max_mclk,
+			       u32 *sclk, u32 *mclk)
+{
+	int i, num_blacklist_clocks;
+
+	if ((sclk == NULL) || (mclk == NULL))
+		return;
+
+	num_blacklist_clocks = ARRAY_SIZE(btc_blacklist_clocks);
+
+	for (i = 0; i < num_blacklist_clocks; i++) {
+		if ((btc_blacklist_clocks[i].sclk == *sclk) &&
+		    (btc_blacklist_clocks[i].mclk == *mclk))
+			break;
+	}
+
+	if (i < num_blacklist_clocks) {
+		if (btc_blacklist_clocks[i].action == RADEON_SCLK_UP) {
+			*sclk = btc_get_valid_sclk(rdev, max_sclk, *sclk + 1);
+
+			if (*sclk < max_sclk)
+				btc_skip_blacklist_clocks(rdev, max_sclk, max_mclk, sclk, mclk);
+		}
+	}
+}
+
+void btc_adjust_clock_combinations(struct radeon_device *rdev,
+				   const struct radeon_clock_and_voltage_limits *max_limits,
+				   struct rv7xx_pl *pl)
+{
+
+	if ((pl->mclk == 0) || (pl->sclk == 0))
+		return;
+
+	if (pl->mclk == pl->sclk)
+		return;
+
+	if (pl->mclk > pl->sclk) {
+		if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > rdev->pm.dpm.dyn_state.mclk_sclk_ratio)
+			pl->sclk = btc_get_valid_sclk(rdev,
+						      max_limits->sclk,
+						      (pl->mclk +
+						       (rdev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+						      rdev->pm.dpm.dyn_state.mclk_sclk_ratio);
+	} else {
+		if ((pl->sclk - pl->mclk) > rdev->pm.dpm.dyn_state.sclk_mclk_delta)
+			pl->mclk = btc_get_valid_mclk(rdev,
+						      max_limits->mclk,
+						      pl->sclk -
+						      rdev->pm.dpm.dyn_state.sclk_mclk_delta);
+	}
+}
+
+static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++) {
+		if (voltage <= table->entries[i].value)
+			return table->entries[i].value;
+	}
+
+	return table->entries[table->count - 1].value;
+}
+
+void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
+				   u16 max_vddc, u16 max_vddci,
+				   u16 *vddc, u16 *vddci)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u16 new_voltage;
+
+	if ((0 == *vddc) || (0 == *vddci))
+		return;
+
+	if (*vddc > *vddci) {
+		if ((*vddc - *vddci) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) {
+			new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+						       (*vddc - rdev->pm.dpm.dyn_state.vddc_vddci_delta));
+			*vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+		}
+	} else {
+		if ((*vddci - *vddc) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) {
+			new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+						       (*vddci - rdev->pm.dpm.dyn_state.vddc_vddci_delta));
+			*vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+		}
+	}
+}
+
+static void btc_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
+					     bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp, bif;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if (enable) {
+		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+			if (!pi->boot_in_gen2) {
+				bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
+				bif |= CG_CLIENT_REQ(0xd);
+				WREG32(CG_BIF_REQ_AND_RSP, bif);
+
+				tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+				tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
+				tmp |= LC_GEN2_EN_STRAP;
+
+				tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+				udelay(10);
+				tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+			}
+		}
+	} else {
+		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+			if (!pi->boot_in_gen2) {
+				bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
+				bif |= CG_CLIENT_REQ(0xd);
+				WREG32(CG_BIF_REQ_AND_RSP, bif);
+
+				tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+				tmp &= ~LC_GEN2_EN_STRAP;
+			}
+			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+		}
+	}
+}
+
+static void btc_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
+					 bool enable)
+{
+	btc_enable_bif_dynamic_pcie_gen2(rdev, enable);
+
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+}
+
+static int btc_disable_ulv(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (eg_pi->ulv.supported) {
+		if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int btc_populate_ulv_state(struct radeon_device *rdev,
+				  RV770_SMC_STATETABLE *table)
+{
+	int ret = -EINVAL;
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
+
+	if (ulv_pl->vddc) {
+		ret = cypress_convert_power_level_to_smc(rdev,
+							 ulv_pl,
+							 &table->ULVState.levels[0],
+							 PPSMC_DISPLAY_WATERMARK_LOW);
+		if (ret == 0) {
+			table->ULVState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
+			table->ULVState.levels[0].ACIndex = 1;
+
+			table->ULVState.levels[1] = table->ULVState.levels[0];
+			table->ULVState.levels[2] = table->ULVState.levels[0];
+
+			table->ULVState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+			WREG32(CG_ULV_CONTROL, BTC_CGULVCONTROL_DFLT);
+			WREG32(CG_ULV_PARAMETER, BTC_CGULVPARAMETER_DFLT);
+		}
+	}
+
+	return ret;
+}
+
+static int btc_populate_smc_acpi_state(struct radeon_device *rdev,
+				       RV770_SMC_STATETABLE *table)
+{
+	int ret = cypress_populate_smc_acpi_state(rdev, table);
+
+	if (ret == 0) {
+		table->ACPIState.levels[0].ACIndex = 0;
+		table->ACPIState.levels[1].ACIndex = 0;
+		table->ACPIState.levels[2].ACIndex = 0;
+	}
+
+	return ret;
+}
+
+void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
+				  const u32 *sequence, u32 count)
+{
+	u32 i, length = count * 3;
+	u32 tmp;
+
+	for (i = 0; i < length; i+=3) {
+		tmp = RREG32(sequence[i]);
+		tmp &= ~sequence[i+2];
+		tmp |= sequence[i+1] & sequence[i+2];
+		WREG32(sequence[i], tmp);
+	}
+}
+
+static void btc_cg_clock_gating_default(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *p = NULL;
+
+	if (rdev->family == CHIP_BARTS) {
+		p = (const u32 *)&barts_cgcg_cgls_default;
+		count = BARTS_CGCG_CGLS_DEFAULT_LENGTH;
+	} else if (rdev->family == CHIP_TURKS) {
+		p = (const u32 *)&turks_cgcg_cgls_default;
+		count = TURKS_CGCG_CGLS_DEFAULT_LENGTH;
+	} else if (rdev->family == CHIP_CAICOS) {
+		p = (const u32 *)&caicos_cgcg_cgls_default;
+		count = CAICOS_CGCG_CGLS_DEFAULT_LENGTH;
+	} else
+		return;
+
+	btc_program_mgcg_hw_sequence(rdev, p, count);
+}
+
+static void btc_cg_clock_gating_enable(struct radeon_device *rdev,
+				       bool enable)
+{
+	u32 count;
+	const u32 *p = NULL;
+
+	if (enable) {
+		if (rdev->family == CHIP_BARTS) {
+			p = (const u32 *)&barts_cgcg_cgls_enable;
+			count = BARTS_CGCG_CGLS_ENABLE_LENGTH;
+		} else if (rdev->family == CHIP_TURKS) {
+			p = (const u32 *)&turks_cgcg_cgls_enable;
+			count = TURKS_CGCG_CGLS_ENABLE_LENGTH;
+		} else if (rdev->family == CHIP_CAICOS) {
+			p = (const u32 *)&caicos_cgcg_cgls_enable;
+			count = CAICOS_CGCG_CGLS_ENABLE_LENGTH;
+		} else
+			return;
+	} else {
+		if (rdev->family == CHIP_BARTS) {
+			p = (const u32 *)&barts_cgcg_cgls_disable;
+			count = BARTS_CGCG_CGLS_DISABLE_LENGTH;
+		} else if (rdev->family == CHIP_TURKS) {
+			p = (const u32 *)&turks_cgcg_cgls_disable;
+			count = TURKS_CGCG_CGLS_DISABLE_LENGTH;
+		} else if (rdev->family == CHIP_CAICOS) {
+			p = (const u32 *)&caicos_cgcg_cgls_disable;
+			count = CAICOS_CGCG_CGLS_DISABLE_LENGTH;
+		} else
+			return;
+	}
+
+	btc_program_mgcg_hw_sequence(rdev, p, count);
+}
+
+static void btc_mg_clock_gating_default(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *p = NULL;
+
+	if (rdev->family == CHIP_BARTS) {
+		p = (const u32 *)&barts_mgcg_default;
+		count = BARTS_MGCG_DEFAULT_LENGTH;
+	} else if (rdev->family == CHIP_TURKS) {
+		p = (const u32 *)&turks_mgcg_default;
+		count = TURKS_MGCG_DEFAULT_LENGTH;
+	} else if (rdev->family == CHIP_CAICOS) {
+		p = (const u32 *)&caicos_mgcg_default;
+		count = CAICOS_MGCG_DEFAULT_LENGTH;
+	} else
+		return;
+
+	btc_program_mgcg_hw_sequence(rdev, p, count);
+}
+
+static void btc_mg_clock_gating_enable(struct radeon_device *rdev,
+				       bool enable)
+{
+	u32 count;
+	const u32 *p = NULL;
+
+	if (enable) {
+		if (rdev->family == CHIP_BARTS) {
+			p = (const u32 *)&barts_mgcg_enable;
+			count = BARTS_MGCG_ENABLE_LENGTH;
+		} else if (rdev->family == CHIP_TURKS) {
+			p = (const u32 *)&turks_mgcg_enable;
+			count = TURKS_MGCG_ENABLE_LENGTH;
+		} else if (rdev->family == CHIP_CAICOS) {
+			p = (const u32 *)&caicos_mgcg_enable;
+			count = CAICOS_MGCG_ENABLE_LENGTH;
+		} else
+			return;
+	} else {
+		if (rdev->family == CHIP_BARTS) {
+			p = (const u32 *)&barts_mgcg_disable[0];
+			count = BARTS_MGCG_DISABLE_LENGTH;
+		} else if (rdev->family == CHIP_TURKS) {
+			p = (const u32 *)&turks_mgcg_disable[0];
+			count = TURKS_MGCG_DISABLE_LENGTH;
+		} else if (rdev->family == CHIP_CAICOS) {
+			p = (const u32 *)&caicos_mgcg_disable[0];
+			count = CAICOS_MGCG_DISABLE_LENGTH;
+		} else
+			return;
+	}
+
+	btc_program_mgcg_hw_sequence(rdev, p, count);
+}
+
+static void btc_ls_clock_gating_default(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *p = NULL;
+
+	if (rdev->family == CHIP_BARTS) {
+		p = (const u32 *)&barts_sysls_default;
+		count = BARTS_SYSLS_DEFAULT_LENGTH;
+	} else if (rdev->family == CHIP_TURKS) {
+		p = (const u32 *)&turks_sysls_default;
+		count = TURKS_SYSLS_DEFAULT_LENGTH;
+	} else if (rdev->family == CHIP_CAICOS) {
+		p = (const u32 *)&caicos_sysls_default;
+		count = CAICOS_SYSLS_DEFAULT_LENGTH;
+	} else
+		return;
+
+	btc_program_mgcg_hw_sequence(rdev, p, count);
+}
+
+static void btc_ls_clock_gating_enable(struct radeon_device *rdev,
+				       bool enable)
+{
+	u32 count;
+	const u32 *p = NULL;
+
+	if (enable) {
+		if (rdev->family == CHIP_BARTS) {
+			p = (const u32 *)&barts_sysls_enable;
+			count = BARTS_SYSLS_ENABLE_LENGTH;
+		} else if (rdev->family == CHIP_TURKS) {
+			p = (const u32 *)&turks_sysls_enable;
+			count = TURKS_SYSLS_ENABLE_LENGTH;
+		} else if (rdev->family == CHIP_CAICOS) {
+			p = (const u32 *)&caicos_sysls_enable;
+			count = CAICOS_SYSLS_ENABLE_LENGTH;
+		} else
+			return;
+	} else {
+		if (rdev->family == CHIP_BARTS) {
+			p = (const u32 *)&barts_sysls_disable;
+			count = BARTS_SYSLS_DISABLE_LENGTH;
+		} else if (rdev->family == CHIP_TURKS) {
+			p = (const u32 *)&turks_sysls_disable;
+			count = TURKS_SYSLS_DISABLE_LENGTH;
+		} else if (rdev->family == CHIP_CAICOS) {
+			p = (const u32 *)&caicos_sysls_disable;
+			count = CAICOS_SYSLS_DISABLE_LENGTH;
+		} else
+			return;
+	}
+
+	btc_program_mgcg_hw_sequence(rdev, p, count);
+}
+
+bool btc_dpm_enabled(struct radeon_device *rdev)
+{
+	if (rv770_is_smc_running(rdev))
+		return true;
+	else
+		return false;
+}
+
+static int btc_init_smc_table(struct radeon_device *rdev,
+			      struct radeon_ps *radeon_boot_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	RV770_SMC_STATETABLE *table = &pi->smc_statetable;
+	int ret;
+
+	memset(table, 0, sizeof(RV770_SMC_STATETABLE));
+
+	cypress_populate_smc_voltage_tables(rdev, table);
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+		break;
+	case THERMAL_TYPE_NONE:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+		break;
+	default:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+		break;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (pi->mem_gddr5)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
+	if (ret)
+		return ret;
+
+	if (eg_pi->sclk_deep_sleep)
+		WREG32_P(SCLK_PSKIP_CNTL, PSKIP_ON_ALLOW_STOP_HI(32),
+			 ~PSKIP_ON_ALLOW_STOP_HI_MASK);
+
+	ret = btc_populate_smc_acpi_state(rdev, table);
+	if (ret)
+		return ret;
+
+	if (eg_pi->ulv.supported) {
+		ret = btc_populate_ulv_state(rdev, table);
+		if (ret)
+			eg_pi->ulv.supported = false;
+	}
+
+	table->driverState = table->initialState;
+
+	return rv770_copy_bytes_to_smc(rdev,
+				       pi->state_table_start,
+				       (u8 *)table,
+				       sizeof(RV770_SMC_STATETABLE),
+				       pi->sram_end);
+}
+
+static void btc_set_at_for_uvd(struct radeon_device *rdev,
+			       struct radeon_ps *radeon_new_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int idx = 0;
+
+	if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2))
+		idx = 1;
+
+	if ((idx == 1) && !eg_pi->smu_uvd_hs) {
+		pi->rlp = 10;
+		pi->rmp = 100;
+		pi->lhp = 100;
+		pi->lmp = 10;
+	} else {
+		pi->rlp = eg_pi->ats[idx].rlp;
+		pi->rmp = eg_pi->ats[idx].rmp;
+		pi->lhp = eg_pi->ats[idx].lhp;
+		pi->lmp = eg_pi->ats[idx].lmp;
+	}
+
+}
+
+void btc_notify_uvd_to_smc(struct radeon_device *rdev,
+			   struct radeon_ps *radeon_new_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
+		rv770_write_smc_soft_register(rdev,
+					      RV770_SMC_SOFT_REGISTER_uvd_enabled, 1);
+		eg_pi->uvd_enabled = true;
+	} else {
+		rv770_write_smc_soft_register(rdev,
+					      RV770_SMC_SOFT_REGISTER_uvd_enabled, 0);
+		eg_pi->uvd_enabled = false;
+	}
+}
+
+int btc_reset_to_default(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void btc_stop_smc(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (((RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK) >> LB_SYNC_RESET_SEL_SHIFT) != 1)
+			break;
+		udelay(1);
+	}
+	udelay(100);
+
+	r7xx_stop_smc(rdev);
+}
+
+void btc_read_arb_registers(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct evergreen_arb_registers *arb_registers =
+		&eg_pi->bootup_arb_registers;
+
+	arb_registers->mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+	arb_registers->mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+	arb_registers->mc_arb_rfsh_rate = RREG32(MC_ARB_RFSH_RATE);
+	arb_registers->mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
+}
+
+
+static void btc_set_arb0_registers(struct radeon_device *rdev,
+				   struct evergreen_arb_registers *arb_registers)
+{
+	u32 val;
+
+	WREG32(MC_ARB_DRAM_TIMING,  arb_registers->mc_arb_dram_timing);
+	WREG32(MC_ARB_DRAM_TIMING2, arb_registers->mc_arb_dram_timing2);
+
+	val = (arb_registers->mc_arb_rfsh_rate & POWERMODE0_MASK) >>
+		POWERMODE0_SHIFT;
+	WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK);
+
+	val = (arb_registers->mc_arb_burst_time & STATE0_MASK) >>
+		STATE0_SHIFT;
+	WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK);
+}
+
+static void btc_set_boot_state_timing(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (eg_pi->ulv.supported)
+		btc_set_arb0_registers(rdev, &eg_pi->bootup_arb_registers);
+}
+
+static bool btc_is_state_ulv_compatible(struct radeon_device *rdev,
+					struct radeon_ps *radeon_state)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
+
+	if (state->low.mclk != ulv_pl->mclk)
+		return false;
+
+	if (state->low.vddci != ulv_pl->vddci)
+		return false;
+
+	/* XXX check minclocks, etc. */
+
+	return true;
+}
+
+
+static int btc_set_ulv_dram_timing(struct radeon_device *rdev)
+{
+	u32 val;
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
+
+	radeon_atom_set_engine_dram_timings(rdev,
+					    ulv_pl->sclk,
+					    ulv_pl->mclk);
+
+	val = rv770_calculate_memory_refresh_rate(rdev, ulv_pl->sclk);
+	WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK);
+
+	val = cypress_calculate_burst_time(rdev, ulv_pl->sclk, ulv_pl->mclk);
+	WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK);
+
+	return 0;
+}
+
+static int btc_enable_ulv(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int btc_set_power_state_conditionally_enable_ulv(struct radeon_device *rdev,
+							struct radeon_ps *radeon_new_state)
+{
+	int ret = 0;
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (eg_pi->ulv.supported) {
+		if (btc_is_state_ulv_compatible(rdev, radeon_new_state)) {
+			// Set ARB[0] to reflect the DRAM timing needed for ULV.
+			ret = btc_set_ulv_dram_timing(rdev);
+			if (ret == 0)
+				ret = btc_enable_ulv(rdev);
+		}
+	}
+
+	return ret;
+}
+
+static bool btc_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+	bool result = true;
+
+	switch (in_reg) {
+	case MC_SEQ_RAS_TIMING >> 2:
+		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_CAS_TIMING >> 2:
+		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING2 >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
+		break;
+	case MC_PMG_CMD_EMRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS1 >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static void btc_set_valid_flag(struct evergreen_mc_reg_table *table)
+{
+	u8 i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+			    table->mc_reg_table_entry[j].mc_data[i]) {
+				table->valid_flag |= (1 << i);
+				break;
+			}
+		}
+	}
+}
+
+static int btc_set_mc_special_registers(struct radeon_device *rdev,
+					struct evergreen_mc_reg_table *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 i, j, k;
+	u32 tmp;
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		switch (table->mc_reg_address[i].s1) {
+		case MC_SEQ_MISC1 >> 2:
+			tmp = RREG32(MC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((tmp & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			}
+			j++;
+
+			if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			tmp = RREG32(MC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(tmp & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+				if (!pi->mem_gddr5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+
+			if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		case MC_SEQ_RESERVE_M >> 2:
+			tmp = RREG32(MC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(tmp & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			}
+			j++;
+
+			if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		default:
+			break;
+		}
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static void btc_set_s0_mc_reg_index(struct evergreen_mc_reg_table *table)
+{
+	u32 i;
+	u16 address;
+
+	for (i = 0; i < table->last; i++) {
+		table->mc_reg_address[i].s0 =
+			btc_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+			address : table->mc_reg_address[i].s1;
+	}
+}
+
+static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+				       struct evergreen_mc_reg_table *eg_table)
+{
+	u8 i, j;
+
+	if (table->last > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+		return -EINVAL;
+
+	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+		return -EINVAL;
+
+	for (i = 0; i < table->last; i++)
+		eg_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+	eg_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		eg_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for(j = 0; j < table->last; j++)
+			eg_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+	}
+	eg_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+static int btc_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+	int ret;
+	struct atom_mc_reg_table *table;
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct evergreen_mc_reg_table *eg_table = &eg_pi->mc_reg_table;
+	u8 module_index = rv770_get_memory_module_index(rdev);
+
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	/* Program additional LP registers that are no longer programmed by VBIOS */
+	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+
+	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+
+	if (ret)
+		goto init_mc_done;
+
+	ret = btc_copy_vbios_mc_reg_table(table, eg_table);
+
+	if (ret)
+		goto init_mc_done;
+
+	btc_set_s0_mc_reg_index(eg_table);
+	ret = btc_set_mc_special_registers(rdev, eg_table);
+
+	if (ret)
+		goto init_mc_done;
+
+	btc_set_valid_flag(eg_table);
+
+init_mc_done:
+	kfree(table);
+
+	return ret;
+}
+
+static void btc_init_stutter_mode(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp;
+
+	if (pi->mclk_stutter_mode_threshold) {
+		if (pi->mem_gddr5) {
+			tmp = RREG32(MC_PMG_AUTO_CFG);
+			if ((0x200 & tmp) == 0) {
+				tmp = (tmp & 0xfffffc0b) | 0x204;
+				WREG32(MC_PMG_AUTO_CFG, tmp);
+			}
+		}
+	}
+}
+
+bool btc_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+	u32 switch_limit = pi->mem_gddr5 ? 450 : 100;
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
+
+static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
+					 struct radeon_ps *rps)
+{
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct radeon_clock_and_voltage_limits *max_limits;
+	bool disable_mclk_switching;
+	u32 mclk, sclk;
+	u16 vddc, vddci;
+
+	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+	    btc_dpm_vblank_too_short(rdev))
+		disable_mclk_switching = true;
+	else
+		disable_mclk_switching = false;
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (rdev->pm.dpm.ac_power == false) {
+		if (ps->high.mclk > max_limits->mclk)
+			ps->high.mclk = max_limits->mclk;
+		if (ps->high.sclk > max_limits->sclk)
+			ps->high.sclk = max_limits->sclk;
+		if (ps->high.vddc > max_limits->vddc)
+			ps->high.vddc = max_limits->vddc;
+		if (ps->high.vddci > max_limits->vddci)
+			ps->high.vddci = max_limits->vddci;
+
+		if (ps->medium.mclk > max_limits->mclk)
+			ps->medium.mclk = max_limits->mclk;
+		if (ps->medium.sclk > max_limits->sclk)
+			ps->medium.sclk = max_limits->sclk;
+		if (ps->medium.vddc > max_limits->vddc)
+			ps->medium.vddc = max_limits->vddc;
+		if (ps->medium.vddci > max_limits->vddci)
+			ps->medium.vddci = max_limits->vddci;
+
+		if (ps->low.mclk > max_limits->mclk)
+			ps->low.mclk = max_limits->mclk;
+		if (ps->low.sclk > max_limits->sclk)
+			ps->low.sclk = max_limits->sclk;
+		if (ps->low.vddc > max_limits->vddc)
+			ps->low.vddc = max_limits->vddc;
+		if (ps->low.vddci > max_limits->vddci)
+			ps->low.vddci = max_limits->vddci;
+	}
+
+	/* XXX validate the min clocks required for display */
+
+	if (disable_mclk_switching) {
+		sclk = ps->low.sclk;
+		mclk = ps->high.mclk;
+		vddc = ps->low.vddc;
+		vddci = ps->high.vddci;
+	} else {
+		sclk = ps->low.sclk;
+		mclk = ps->low.mclk;
+		vddc = ps->low.vddc;
+		vddci = ps->low.vddci;
+	}
+
+	/* adjusted low state */
+	ps->low.sclk = sclk;
+	ps->low.mclk = mclk;
+	ps->low.vddc = vddc;
+	ps->low.vddci = vddci;
+
+	btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
+				  &ps->low.sclk, &ps->low.mclk);
+
+	/* adjusted medium, high states */
+	if (ps->medium.sclk < ps->low.sclk)
+		ps->medium.sclk = ps->low.sclk;
+	if (ps->medium.vddc < ps->low.vddc)
+		ps->medium.vddc = ps->low.vddc;
+	if (ps->high.sclk < ps->medium.sclk)
+		ps->high.sclk = ps->medium.sclk;
+	if (ps->high.vddc < ps->medium.vddc)
+		ps->high.vddc = ps->medium.vddc;
+
+	if (disable_mclk_switching) {
+		mclk = ps->low.mclk;
+		if (mclk < ps->medium.mclk)
+			mclk = ps->medium.mclk;
+		if (mclk < ps->high.mclk)
+			mclk = ps->high.mclk;
+		ps->low.mclk = mclk;
+		ps->low.vddci = vddci;
+		ps->medium.mclk = mclk;
+		ps->medium.vddci = vddci;
+		ps->high.mclk = mclk;
+		ps->high.vddci = vddci;
+	} else {
+		if (ps->medium.mclk < ps->low.mclk)
+			ps->medium.mclk = ps->low.mclk;
+		if (ps->medium.vddci < ps->low.vddci)
+			ps->medium.vddci = ps->low.vddci;
+		if (ps->high.mclk < ps->medium.mclk)
+			ps->high.mclk = ps->medium.mclk;
+		if (ps->high.vddci < ps->medium.vddci)
+			ps->high.vddci = ps->medium.vddci;
+	}
+
+	btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
+				  &ps->medium.sclk, &ps->medium.mclk);
+	btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
+				  &ps->high.sclk, &ps->high.mclk);
+
+	btc_adjust_clock_combinations(rdev, max_limits, &ps->low);
+	btc_adjust_clock_combinations(rdev, max_limits, &ps->medium);
+	btc_adjust_clock_combinations(rdev, max_limits, &ps->high);
+
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+					   ps->low.sclk, max_limits->vddc, &ps->low.vddc);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+					   ps->low.mclk, max_limits->vddci, &ps->low.vddci);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+					   ps->low.mclk, max_limits->vddc, &ps->low.vddc);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+					   rdev->clock.current_dispclk, max_limits->vddc, &ps->low.vddc);
+
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+					   ps->medium.sclk, max_limits->vddc, &ps->medium.vddc);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+					   ps->medium.mclk, max_limits->vddci, &ps->medium.vddci);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+					   ps->medium.mclk, max_limits->vddc, &ps->medium.vddc);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+					   rdev->clock.current_dispclk, max_limits->vddc, &ps->medium.vddc);
+
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+					   ps->high.sclk, max_limits->vddc, &ps->high.vddc);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+					   ps->high.mclk, max_limits->vddci, &ps->high.vddci);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+					   ps->high.mclk, max_limits->vddc, &ps->high.vddc);
+	btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+					   rdev->clock.current_dispclk, max_limits->vddc, &ps->high.vddc);
+
+	btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
+				      &ps->low.vddc, &ps->low.vddci);
+	btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
+				      &ps->medium.vddc, &ps->medium.vddci);
+	btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
+				      &ps->high.vddc, &ps->high.vddci);
+
+	if ((ps->high.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) &&
+	    (ps->medium.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) &&
+	    (ps->low.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc))
+		ps->dc_compatible = true;
+	else
+		ps->dc_compatible = false;
+
+	if (ps->low.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
+		ps->low.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
+	if (ps->medium.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
+		ps->medium.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
+	if (ps->high.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
+		ps->high.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
+}
+
+static void btc_update_current_ps(struct radeon_device *rdev,
+				  struct radeon_ps *rps)
+{
+	struct rv7xx_ps *new_ps = rv770_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	eg_pi->current_rps = *rps;
+	eg_pi->current_ps = *new_ps;
+	eg_pi->current_rps.ps_priv = &eg_pi->current_ps;
+}
+
+static void btc_update_requested_ps(struct radeon_device *rdev,
+				    struct radeon_ps *rps)
+{
+	struct rv7xx_ps *new_ps = rv770_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	eg_pi->requested_rps = *rps;
+	eg_pi->requested_ps = *new_ps;
+	eg_pi->requested_rps.ps_priv = &eg_pi->requested_ps;
+}
+
+#if 0
+void btc_dpm_reset_asic(struct radeon_device *rdev)
+{
+	rv770_restrict_performance_levels_before_switch(rdev);
+	btc_disable_ulv(rdev);
+	btc_set_boot_state_timing(rdev);
+	rv770_set_boot_state(rdev);
+}
+#endif
+
+int btc_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+	struct radeon_ps *new_ps = &requested_ps;
+
+	btc_update_requested_ps(rdev, new_ps);
+
+	btc_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
+
+	return 0;
+}
+
+int btc_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *new_ps = &eg_pi->requested_rps;
+	struct radeon_ps *old_ps = &eg_pi->current_rps;
+	int ret;
+
+	ret = btc_disable_ulv(rdev);
+	btc_set_boot_state_timing(rdev);
+	ret = rv770_restrict_performance_levels_before_switch(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
+		return ret;
+	}
+	if (eg_pi->pcie_performance_request)
+		cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
+
+	rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+	ret = rv770_halt_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_halt_smc failed\n");
+		return ret;
+	}
+	btc_set_at_for_uvd(rdev, new_ps);
+	if (eg_pi->smu_uvd_hs)
+		btc_notify_uvd_to_smc(rdev, new_ps);
+	ret = cypress_upload_sw_state(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("cypress_upload_sw_state failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = cypress_upload_mc_reg_table(rdev, new_ps);
+		if (ret) {
+			DRM_ERROR("cypress_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+
+	cypress_program_memory_timing_parameters(rdev, new_ps);
+
+	ret = rv770_resume_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_resume_smc failed\n");
+		return ret;
+	}
+	ret = rv770_set_sw_state(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_set_sw_state failed\n");
+		return ret;
+	}
+	rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+
+	if (eg_pi->pcie_performance_request)
+		cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
+
+	ret = btc_set_power_state_conditionally_enable_ulv(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("btc_set_power_state_conditionally_enable_ulv failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void btc_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *new_ps = &eg_pi->requested_rps;
+
+	btc_update_current_ps(rdev, new_ps);
+}
+
+int btc_dpm_enable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+	int ret;
+
+	if (pi->gfx_clock_gating)
+		btc_cg_clock_gating_default(rdev);
+
+	if (btc_dpm_enabled(rdev))
+		return -EINVAL;
+
+	if (pi->mg_clock_gating)
+		btc_mg_clock_gating_default(rdev);
+
+	if (eg_pi->ls_clock_gating)
+		btc_ls_clock_gating_default(rdev);
+
+	if (pi->voltage_control) {
+		rv770_enable_voltage_control(rdev, true);
+		ret = cypress_construct_voltage_tables(rdev);
+		if (ret) {
+			DRM_ERROR("cypress_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+
+	if (pi->mvdd_control) {
+		ret = cypress_get_mvdd_configuration(rdev);
+		if (ret) {
+			DRM_ERROR("cypress_get_mvdd_configuration failed\n");
+			return ret;
+		}
+	}
+
+	if (eg_pi->dynamic_ac_timing) {
+		ret = btc_initialize_mc_reg_table(rdev);
+		if (ret)
+			eg_pi->dynamic_ac_timing = false;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv770_enable_backbias(rdev, true);
+
+	if (pi->dynamic_ss)
+		cypress_enable_spread_spectrum(rdev, true);
+
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, true);
+
+	rv770_setup_bsp(rdev);
+	rv770_program_git(rdev);
+	rv770_program_tp(rdev);
+	rv770_program_tpp(rdev);
+	rv770_program_sstp(rdev);
+	rv770_program_engine_speed_parameters(rdev);
+	cypress_enable_display_gap(rdev);
+	rv770_program_vc(rdev);
+
+	if (pi->dynamic_pcie_gen2)
+		btc_enable_dynamic_pcie_gen2(rdev, true);
+
+	ret = rv770_upload_firmware(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_upload_firmware failed\n");
+		return ret;
+	}
+	ret = cypress_get_table_locations(rdev);
+	if (ret) {
+		DRM_ERROR("cypress_get_table_locations failed\n");
+		return ret;
+	}
+	ret = btc_init_smc_table(rdev, boot_ps);
+	if (ret)
+		return ret;
+
+	if (eg_pi->dynamic_ac_timing) {
+		ret = cypress_populate_mc_reg_table(rdev, boot_ps);
+		if (ret) {
+			DRM_ERROR("cypress_populate_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+
+	cypress_program_response_times(rdev);
+	r7xx_start_smc(rdev);
+	ret = cypress_notify_smc_display_change(rdev, false);
+	if (ret) {
+		DRM_ERROR("cypress_notify_smc_display_change failed\n");
+		return ret;
+	}
+	cypress_enable_sclk_control(rdev, true);
+
+	if (eg_pi->memory_transition)
+		cypress_enable_mclk_control(rdev, true);
+
+	cypress_start_dpm(rdev);
+
+	if (pi->gfx_clock_gating)
+		btc_cg_clock_gating_enable(rdev, true);
+
+	if (pi->mg_clock_gating)
+		btc_mg_clock_gating_enable(rdev, true);
+
+	if (eg_pi->ls_clock_gating)
+		btc_ls_clock_gating_enable(rdev, true);
+
+	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	btc_init_stutter_mode(rdev);
+
+	btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+	return 0;
+};
+
+void btc_dpm_disable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (!btc_dpm_enabled(rdev))
+		return;
+
+	rv770_clear_vc(rdev);
+
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, false);
+
+	if (pi->dynamic_pcie_gen2)
+		btc_enable_dynamic_pcie_gen2(rdev, false);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+
+	if (pi->gfx_clock_gating)
+		btc_cg_clock_gating_enable(rdev, false);
+
+	if (pi->mg_clock_gating)
+		btc_mg_clock_gating_enable(rdev, false);
+
+	if (eg_pi->ls_clock_gating)
+		btc_ls_clock_gating_enable(rdev, false);
+
+	rv770_stop_dpm(rdev);
+	btc_reset_to_default(rdev);
+	btc_stop_smc(rdev);
+	cypress_enable_spread_spectrum(rdev, false);
+
+	btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+}
+
+void btc_dpm_setup_asic(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int r;
+
+	r = ni_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
+	rv770_get_memory_type(rdev);
+	rv740_read_clock_registers(rdev);
+	btc_read_arb_registers(rdev);
+	rv770_read_voltage_smio_registers(rdev);
+
+	if (eg_pi->pcie_performance_request)
+		cypress_advertise_gen2_capability(rdev);
+
+	rv770_get_pcie_gen2_status(rdev);
+	rv770_enable_acpi_pm(rdev);
+}
+
+int btc_dpm_init(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi;
+	struct evergreen_power_info *eg_pi;
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
+	if (eg_pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = eg_pi;
+	pi = &eg_pi->rv7xx;
+
+	rv770_get_max_vddc(rdev);
+
+	eg_pi->ulv.supported = false;
+	pi->acpi_vddc = 0;
+	eg_pi->acpi_vddci = 0;
+	pi->min_vddc_in_table = 0;
+	pi->max_vddc_in_table = 0;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = rv7xx_parse_power_table(rdev);
+	if (ret)
+		return ret;
+	ret = r600_parse_extended_power_table(rdev);
+	if (ret)
+		return ret;
+
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+		kcalloc(4,
+			sizeof(struct radeon_clock_voltage_dependency_entry),
+			GFP_KERNEL);
+	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+		r600_free_extended_power_table(rdev);
+		return -ENOMEM;
+	}
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 800;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 800;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 800;
+
+	if (rdev->pm.dpm.voltage_response_time == 0)
+		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (rdev->pm.dpm.backbias_response_time == 0)
+		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->ref_div = dividers.ref_div + 1;
+	else
+		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	pi->mclk_strobe_mode_threshold = 40000;
+	pi->mclk_edc_enable_threshold = 40000;
+	eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+	pi->rlp = RV770_RLP_DFLT;
+	pi->rmp = RV770_RMP_DFLT;
+	pi->lhp = RV770_LHP_DFLT;
+	pi->lmp = RV770_LMP_DFLT;
+
+	eg_pi->ats[0].rlp = RV770_RLP_DFLT;
+	eg_pi->ats[0].rmp = RV770_RMP_DFLT;
+	eg_pi->ats[0].lhp = RV770_LHP_DFLT;
+	eg_pi->ats[0].lmp = RV770_LMP_DFLT;
+
+	eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
+	eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
+	eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
+	eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
+
+	eg_pi->smu_uvd_hs = true;
+
+	pi->voltage_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
+
+	pi->mvdd_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
+
+	eg_pi->vddci_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
+
+	rv770_get_engine_memory_ss(rdev);
+
+	pi->asi = RV770_ASI_DFLT;
+	pi->pasi = CYPRESS_HASI_DFLT;
+	pi->vrc = CYPRESS_VRC_DFLT;
+
+	pi->power_gating = false;
+
+	pi->gfx_clock_gating = true;
+
+	pi->mg_clock_gating = true;
+	pi->mgcgtssm = true;
+	eg_pi->ls_clock_gating = false;
+	eg_pi->sclk_deep_sleep = false;
+
+	pi->dynamic_pcie_gen2 = true;
+
+	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	pi->display_gap = true;
+
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		pi->dcodt = true;
+	else
+		pi->dcodt = false;
+
+	pi->ulps = true;
+
+	eg_pi->dynamic_ac_timing = true;
+	eg_pi->abm = true;
+	eg_pi->mcls = true;
+	eg_pi->light_sleep = true;
+	eg_pi->memory_transition = true;
+#if defined(CONFIG_ACPI)
+	eg_pi->pcie_performance_request =
+		radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+	eg_pi->pcie_performance_request = false;
+#endif
+
+	if (rdev->family == CHIP_BARTS)
+		eg_pi->dll_default_on = true;
+	else
+		eg_pi->dll_default_on = false;
+
+	eg_pi->sclk_deep_sleep = false;
+	if (ASIC_IS_LOMBOK(rdev))
+		pi->mclk_stutter_mode_threshold = 30000;
+	else
+		pi->mclk_stutter_mode_threshold = 0;
+
+	pi->sram_end = SMC_RAM_END;
+
+	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+	rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
+	rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
+	rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+	if (rdev->family == CHIP_TURKS)
+		rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+	else
+		rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
+
+	/* make sure dc limits are valid */
+	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	return 0;
+}
+
+void btc_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+	r600_free_extended_power_table(rdev);
+}
+
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						     struct seq_file *m)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+			   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+	}
+}
+
+u32 btc_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		return 0;
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		return pl->sclk;
+	}
+}
+
+u32 btc_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		return 0;
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		return pl->mclk;
+	}
+}
+
+u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->low.sclk;
+	else
+		return requested_state->high.sclk;
+}
+
+u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->low.mclk;
+	else
+		return requested_state->high.mclk;
+}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
new file mode 100644
index 0000000..3b6f12b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __BTC_DPM_H__
+#define __BTC_DPM_H__
+
+#define BTC_RLP_UVD_DFLT                              20
+#define BTC_RMP_UVD_DFLT                              50
+#define BTC_LHP_UVD_DFLT                              50
+#define BTC_LMP_UVD_DFLT                              20
+#define BARTS_MGCGCGTSSMCTRL_DFLT                     0x81944000
+#define TURKS_MGCGCGTSSMCTRL_DFLT                     0x6e944000
+#define CAICOS_MGCGCGTSSMCTRL_DFLT                    0x46944040
+#define BTC_CGULVPARAMETER_DFLT                       0x00040035
+#define BTC_CGULVCONTROL_DFLT                         0x00001450
+
+extern u32 btc_valid_sclk[40];
+
+void btc_read_arb_registers(struct radeon_device *rdev);
+void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
+				  const u32 *sequence, u32 count);
+void btc_skip_blacklist_clocks(struct radeon_device *rdev,
+			       const u32 max_sclk, const u32 max_mclk,
+			       u32 *sclk, u32 *mclk);
+void btc_adjust_clock_combinations(struct radeon_device *rdev,
+				   const struct radeon_clock_and_voltage_limits *max_limits,
+				   struct rv7xx_pl *pl);
+void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
+					u32 clock, u16 max_voltage, u16 *voltage);
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+						     u32 *max_clock);
+void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
+				   u16 max_vddc, u16 max_vddci,
+				   u16 *vddc, u16 *vddci);
+bool btc_dpm_enabled(struct radeon_device *rdev);
+int btc_reset_to_default(struct radeon_device *rdev);
+void btc_notify_uvd_to_smc(struct radeon_device *rdev,
+			   struct radeon_ps *radeon_new_state);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
new file mode 100644
index 0000000..9c65be2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef _BTCD_H_
+#define _BTCD_H_
+
+/* pm registers */
+
+#define GENERAL_PWRMGT                                  0x63c
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define ENABLE_GEN2PCIE                          (1 << 4)
+#       define ENABLE_GEN2XSP                           (1 << 5)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (3 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define LOW_VOLT_D2_ACPI                         (1 << 8)
+#       define LOW_VOLT_D3_ACPI                         (1 << 9)
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define BACKBIAS_PAD_EN                          (1 << 18)
+#       define BACKBIAS_VALUE                           (1 << 19)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#       define AC_DC_SW                                 (1 << 24)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x66c
+#       define CURRENT_PROFILE_INDEX_MASK                 (0xf << 4)
+#       define CURRENT_PROFILE_INDEX_SHIFT                4
+
+#define	CG_BIF_REQ_AND_RSP				0x7f4
+#define		CG_CLIENT_REQ(x)			((x) << 0)
+#define		CG_CLIENT_REQ_MASK			(0xff << 0)
+#define		CG_CLIENT_REQ_SHIFT			0
+#define		CG_CLIENT_RESP(x)			((x) << 8)
+#define		CG_CLIENT_RESP_MASK			(0xff << 8)
+#define		CG_CLIENT_RESP_SHIFT			8
+#define		CLIENT_CG_REQ(x)			((x) << 16)
+#define		CLIENT_CG_REQ_MASK			(0xff << 16)
+#define		CLIENT_CG_REQ_SHIFT			16
+#define		CLIENT_CG_RESP(x)			((x) << 24)
+#define		CLIENT_CG_RESP_MASK			(0xff << 24)
+#define		CLIENT_CG_RESP_SHIFT			24
+
+#define	SCLK_PSKIP_CNTL					0x8c0
+#define		PSKIP_ON_ALLOW_STOP_HI(x)		((x) << 16)
+#define		PSKIP_ON_ALLOW_STOP_HI_MASK		(0xff << 16)
+#define		PSKIP_ON_ALLOW_STOP_HI_SHIFT		16
+
+#define	CG_ULV_CONTROL					0x8c8
+#define	CG_ULV_PARAMETER				0x8cc
+
+#define	MC_ARB_DRAM_TIMING				0x2774
+#define	MC_ARB_DRAM_TIMING2				0x2778
+
+#define	MC_ARB_RFSH_RATE				0x27b0
+#define		POWERMODE0(x)				((x) << 0)
+#define		POWERMODE0_MASK				(0xff << 0)
+#define		POWERMODE0_SHIFT			0
+#define		POWERMODE1(x)				((x) << 8)
+#define		POWERMODE1_MASK				(0xff << 8)
+#define		POWERMODE1_SHIFT			8
+#define		POWERMODE2(x)				((x) << 16)
+#define		POWERMODE2_MASK				(0xff << 16)
+#define		POWERMODE2_SHIFT			16
+#define		POWERMODE3(x)				((x) << 24)
+#define		POWERMODE3_MASK				(0xff << 24)
+#define		POWERMODE3_SHIFT			24
+
+#define MC_ARB_BURST_TIME                               0x2808
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0x1f << 0)
+#define		STATE0_SHIFT				0
+#define		STATE1(x)				((x) << 5)
+#define		STATE1_MASK				(0x1f << 5)
+#define		STATE1_SHIFT				5
+#define		STATE2(x)				((x) << 10)
+#define		STATE2_MASK				(0x1f << 10)
+#define		STATE2_SHIFT				10
+#define		STATE3(x)				((x) << 15)
+#define		STATE3_MASK				(0x1f << 15)
+#define		STATE3_SHIFT				15
+
+#define MC_SEQ_RAS_TIMING                               0x28a0
+#define MC_SEQ_CAS_TIMING                               0x28a4
+#define MC_SEQ_MISC_TIMING                              0x28a8
+#define MC_SEQ_MISC_TIMING2                             0x28ac
+
+#define MC_SEQ_RD_CTL_D0                                0x28b4
+#define MC_SEQ_RD_CTL_D1                                0x28b8
+#define MC_SEQ_WR_CTL_D0                                0x28bc
+#define MC_SEQ_WR_CTL_D1                                0x28c0
+
+#define MC_PMG_AUTO_CFG                                 0x28d4
+
+#define MC_SEQ_STATUS_M                                 0x29f4
+#       define PMG_PWRSTATE                             (1 << 16)
+
+#define MC_SEQ_MISC0                                    0x2a00
+#define         MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define         MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define         MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0x2a04
+#define MC_SEQ_RESERVE_M                                0x2a08
+#define MC_PMG_CMD_EMRS                                 0x2a0c
+
+#define MC_SEQ_MISC3                                    0x2a2c
+
+#define MC_SEQ_MISC5                                    0x2a54
+#define MC_SEQ_MISC6                                    0x2a58
+
+#define MC_SEQ_MISC7                                    0x2a64
+
+#define MC_SEQ_CG                                       0x2a68
+#define		CG_SEQ_REQ(x)				((x) << 0)
+#define		CG_SEQ_REQ_MASK				(0xff << 0)
+#define		CG_SEQ_REQ_SHIFT			0
+#define		CG_SEQ_RESP(x)				((x) << 8)
+#define		CG_SEQ_RESP_MASK			(0xff << 8)
+#define		CG_SEQ_RESP_SHIFT			8
+#define		SEQ_CG_REQ(x)				((x) << 16)
+#define		SEQ_CG_REQ_MASK				(0xff << 16)
+#define		SEQ_CG_REQ_SHIFT			16
+#define		SEQ_CG_RESP(x)				((x) << 24)
+#define		SEQ_CG_RESP_MASK			(0xff << 24)
+#define		SEQ_CG_RESP_SHIFT			24
+#define MC_SEQ_RAS_TIMING_LP                            0x2a6c
+#define MC_SEQ_CAS_TIMING_LP                            0x2a70
+#define MC_SEQ_MISC_TIMING_LP                           0x2a74
+#define MC_SEQ_MISC_TIMING2_LP                          0x2a78
+#define MC_SEQ_WR_CTL_D0_LP                             0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP                             0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP                           0x2a88
+
+#define MC_PMG_CMD_MRS                                  0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP                             0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP                             0x2b20
+
+#define MC_PMG_CMD_MRS1                                 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0x2b48
+
+#define	LB_SYNC_RESET_SEL				0x6b28
+#define		LB_SYNC_RESET_SEL_MASK			(3 << 0)
+#define		LB_SYNC_RESET_SEL_SHIFT			0
+
+/* PCIE link stuff */
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_HW_VOLTAGE_IF_CONTROL(x)                ((x) << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_MASK              (3 << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_SHIFT             12
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
new file mode 100644
index 0000000..9fec4d0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The register state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 cayman_default_state[] =
+{
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0036900,
+	0x0000000f,
+	0x00000000, /* DB_DEPTH_INFO */
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0026900,
+	0x000000d9,
+	0x00000000, /* CP_RINGID */
+	0x00000000, /* CP_VMID */
+
+	0xc0096900,
+	0x00000100,
+	0x00ffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0016900,
+	0x00000187,
+	0x00000100, /* SPI_VS_OUT_ID_0 */
+
+	0xc0026900,
+	0x00000191,
+	0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+	0x00000101, /* SPI_PS_INPUT_CNTL_1 */
+
+	0xc0016900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+
+	0xc0106900,
+	0x000001b3,
+	0x20000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00100000, /* SPI_BARYC_CNTL */
+	0x00000000, /* SPI_PS_IN_CONTROL_2 */
+	0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
+	0x00000000, /* SPI_GPR_MGMT */
+	0x00000000, /* SPI_LDS_MGMT */
+	0x00000000, /* SPI_STACK_MGMT */
+	0x00000000, /* SPI_WAVE_MGMT_1 */
+	0x00000000, /* SPI_WAVE_MGMT_2 */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc00e6900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+	0x00000000, /* DB_EQAA */
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_START_FS */
+	0x00000000,
+
+	0xc0016900,
+	0x0000023b,
+	0x00000000, /* SQ_LDS_ALLOC_PS */
+
+	0xc0066900,
+	0x00000240,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000247,
+	0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000,
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000,
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000,
+
+	0xc01b6900,
+	0x000002f5,
+	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+	0xffffffff,
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
new file mode 100644
index 0000000..f5d0e9a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CAYMAN_BLIT_SHADERS_H
+#define CAYMAN_BLIT_SHADERS_H
+
+extern const u32 cayman_ps[];
+extern const u32 cayman_vs[];
+extern const u32 cayman_default_state[];
+
+extern const u32 cayman_ps_size, cayman_vs_size;
+extern const u32 cayman_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
new file mode 100644
index 0000000..d587779
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -0,0 +1,6003 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_ucode.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "ci_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE               4
+#define VOLTAGE_VID_OFFSET_SCALE1    625
+#define VOLTAGE_VID_OFFSET_SCALE2    100
+
+static const struct ci_pt_defaults defaults_hawaii_xt =
+{
+	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro =
+{
+	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt =
+{
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
+	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_pro =
+{
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
+	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
+	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
+};
+
+static const struct ci_pt_defaults defaults_saturn_xt =
+{
+	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
+	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_saturn_pro =
+{
+	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
+	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
+	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
+};
+
+static const struct ci_pt_config_reg didt_config_ci[] =
+{
+	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0xFFFFFFFF }
+};
+
+extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
+				       u32 arb_freq_src, u32 arb_freq_dest);
+extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
+extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
+extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+						     u32 max_voltage_steps,
+						     struct atom_voltage_table *voltage_table);
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern int ci_mc_load_microcode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+			  u32 block, bool enable);
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+					 struct atom_voltage_table_entry *voltage_table,
+					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+				       u32 target_tdp);
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+
+static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+						      PPSMC_Msg msg, u32 parameter);
+
+static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
+static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
+
+static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
+{
+	struct ci_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	switch (rdev->pdev->device) {
+	case 0x6649:
+	case 0x6650:
+	case 0x6651:
+	case 0x6658:
+	case 0x665C:
+	case 0x665D:
+	default:
+		pi->powertune_defaults = &defaults_bonaire_xt;
+		break;
+	case 0x6640:
+	case 0x6641:
+	case 0x6646:
+	case 0x6647:
+		pi->powertune_defaults = &defaults_saturn_xt;
+		break;
+	case 0x67B8:
+	case 0x67B0:
+		pi->powertune_defaults = &defaults_hawaii_xt;
+		break;
+	case 0x67BA:
+	case 0x67B1:
+		pi->powertune_defaults = &defaults_hawaii_pro;
+		break;
+	case 0x67A0:
+	case 0x67A1:
+	case 0x67A2:
+	case 0x67A8:
+	case 0x67A9:
+	case 0x67AA:
+	case 0x67B9:
+	case 0x67BE:
+		pi->powertune_defaults = &defaults_bonaire_xt;
+		break;
+	}
+
+	pi->dte_tj_offset = 0;
+
+	pi->caps_power_containment = true;
+	pi->caps_cac = false;
+	pi->caps_sq_ramping = false;
+	pi->caps_db_ramping = false;
+	pi->caps_td_ramping = false;
+	pi->caps_tcp_ramping = false;
+
+	if (pi->caps_power_containment) {
+		pi->caps_cac = true;
+		if (rdev->family == CHIP_HAWAII)
+			pi->enable_bapm_feature = false;
+		else
+			pi->enable_bapm_feature = true;
+		pi->enable_tdc_limit_feature = true;
+		pi->enable_pkg_pwr_tracking_feature = true;
+	}
+}
+
+static u8 ci_convert_to_vid(u16 vddc)
+{
+	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
+	u32 i;
+
+	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
+		return -EINVAL;
+	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
+		return -EINVAL;
+	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
+	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
+		return -EINVAL;
+
+	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
+			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
+			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
+		} else {
+			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
+			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
+		}
+	}
+	return 0;
+}
+
+static int ci_populate_vddc_vid(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u8 *vid = pi->smc_powertune_table.VddCVid;
+	u32 i;
+
+	if (pi->vddc_voltage_table.count > 8)
+		return -EINVAL;
+
+	for (i = 0; i < pi->vddc_voltage_table.count; i++)
+		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
+
+	return 0;
+}
+
+static int ci_populate_svi_load_line(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+
+	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
+	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
+	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
+	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int ci_populate_tdc_limit(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+	u16 tdc_limit;
+
+	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
+	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
+	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+		pt_defaults->tdc_vddc_throttle_release_limit_perc;
+	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
+
+	return 0;
+}
+
+static int ci_populate_dw8(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+	int ret;
+
+	ret = ci_read_smc_sram_dword(rdev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
+				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
+				     pi->sram_end);
+	if (ret)
+		return -EINVAL;
+	else
+		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
+
+	return 0;
+}
+
+static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
+	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
+		rdev->pm.dpm.fan.fan_output_sensitivity =
+			rdev->pm.dpm.fan.default_fan_output_sensitivity;
+
+	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
+		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
+
+	return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+	int i, min, max;
+
+	min = max = hi_vid[0];
+	for (i = 0; i < 8; i++) {
+		if (0 != hi_vid[i]) {
+			if (min > hi_vid[i])
+				min = hi_vid[i];
+			if (max < hi_vid[i])
+				max = hi_vid[i];
+		}
+
+		if (0 != lo_vid[i]) {
+			if (min > lo_vid[i])
+				min = lo_vid[i];
+			if (max < lo_vid[i])
+				max = lo_vid[i];
+		}
+	}
+
+	if ((min == 0) || (max == 0))
+		return -EINVAL;
+	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
+	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
+
+	return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
+	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+	struct radeon_cac_tdp_table *cac_tdp_table =
+		rdev->pm.dpm.dyn_state.cac_tdp_table;
+
+	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
+	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
+
+	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
+	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
+
+	return 0;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
+	struct radeon_cac_tdp_table *cac_tdp_table =
+		rdev->pm.dpm.dyn_state.cac_tdp_table;
+	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
+	int i, j, k;
+	const u16 *def1;
+	const u16 *def2;
+
+	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
+	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
+
+	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
+	dpm_table->GpuTjMax =
+		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
+	dpm_table->GpuTjHyst = 8;
+
+	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
+
+	if (ppm) {
+		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
+		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
+	} else {
+		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
+		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
+	}
+
+	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
+	def1 = pt_defaults->bapmti_r;
+	def2 = pt_defaults->bapmti_rc;
+
+	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU7_DTE_SINKS; k++) {
+				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
+				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
+				def1++;
+				def2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int ci_populate_pm_base(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 pm_fuse_table_offset;
+	int ret;
+
+	if (pi->caps_power_containment) {
+		ret = ci_read_smc_sram_dword(rdev,
+					     SMU7_FIRMWARE_HEADER_LOCATION +
+					     offsetof(SMU7_Firmware_Header, PmFuseTable),
+					     &pm_fuse_table_offset, pi->sram_end);
+		if (ret)
+			return ret;
+		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
+		if (ret)
+			return ret;
+		ret = ci_populate_vddc_vid(rdev);
+		if (ret)
+			return ret;
+		ret = ci_populate_svi_load_line(rdev);
+		if (ret)
+			return ret;
+		ret = ci_populate_tdc_limit(rdev);
+		if (ret)
+			return ret;
+		ret = ci_populate_dw8(rdev);
+		if (ret)
+			return ret;
+		ret = ci_populate_fuzzy_fan(rdev);
+		if (ret)
+			return ret;
+		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
+		if (ret)
+			return ret;
+		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
+		if (ret)
+			return ret;
+		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
+					   (u8 *)&pi->smc_powertune_table,
+					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 data;
+
+	if (pi->caps_sq_ramping) {
+		data = RREG32_DIDT(DIDT_SQ_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_SQ_CTRL0, data);
+	}
+
+	if (pi->caps_db_ramping) {
+		data = RREG32_DIDT(DIDT_DB_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_DB_CTRL0, data);
+	}
+
+	if (pi->caps_td_ramping) {
+		data = RREG32_DIDT(DIDT_TD_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_TD_CTRL0, data);
+	}
+
+	if (pi->caps_tcp_ramping) {
+		data = RREG32_DIDT(DIDT_TCP_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_TCP_CTRL0, data);
+	}
+}
+
+static int ci_program_pt_config_registers(struct radeon_device *rdev,
+					  const struct ci_pt_config_reg *cac_config_regs)
+{
+	const struct ci_pt_config_reg *config_regs = cac_config_regs;
+	u32 data;
+	u32 cache = 0;
+
+	if (config_regs == NULL)
+		return -EINVAL;
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
+			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+		} else {
+			switch (config_regs->type) {
+			case CISLANDS_CONFIGREG_SMC_IND:
+				data = RREG32_SMC(config_regs->offset);
+				break;
+			case CISLANDS_CONFIGREG_DIDT_IND:
+				data = RREG32_DIDT(config_regs->offset);
+				break;
+			default:
+				data = RREG32(config_regs->offset << 2);
+				break;
+			}
+
+			data &= ~config_regs->mask;
+			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+			data |= cache;
+
+			switch (config_regs->type) {
+			case CISLANDS_CONFIGREG_SMC_IND:
+				WREG32_SMC(config_regs->offset, data);
+				break;
+			case CISLANDS_CONFIGREG_DIDT_IND:
+				WREG32_DIDT(config_regs->offset, data);
+				break;
+			default:
+				WREG32(config_regs->offset << 2, data);
+				break;
+			}
+			cache = 0;
+		}
+		config_regs++;
+	}
+	return 0;
+}
+
+static int ci_enable_didt(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret;
+
+	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
+		cik_enter_rlc_safe_mode(rdev);
+
+		if (enable) {
+			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
+			if (ret) {
+				cik_exit_rlc_safe_mode(rdev);
+				return ret;
+			}
+		}
+
+		ci_do_enable_didt(rdev, enable);
+
+		cik_exit_rlc_safe_mode(rdev);
+	}
+
+	return 0;
+}
+
+static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (enable) {
+		pi->power_containment_features = 0;
+		if (pi->caps_power_containment) {
+			if (pi->enable_bapm_feature) {
+				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
+				if (smc_result != PPSMC_Result_OK)
+					ret = -EINVAL;
+				else
+					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
+			}
+
+			if (pi->enable_tdc_limit_feature) {
+				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
+				if (smc_result != PPSMC_Result_OK)
+					ret = -EINVAL;
+				else
+					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
+			}
+
+			if (pi->enable_pkg_pwr_tracking_feature) {
+				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+				} else {
+					struct radeon_cac_tdp_table *cac_tdp_table =
+						rdev->pm.dpm.dyn_state.cac_tdp_table;
+					u32 default_pwr_limit =
+						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+
+					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+					ci_set_power_limit(rdev, default_pwr_limit);
+				}
+			}
+		}
+	} else {
+		if (pi->caps_power_containment && pi->power_containment_features) {
+			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
+				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
+
+			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
+				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
+
+			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
+			pi->power_containment_features = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (pi->caps_cac) {
+		if (enable) {
+			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
+			if (smc_result != PPSMC_Result_OK) {
+				ret = -EINVAL;
+				pi->cac_enabled = false;
+			} else {
+				pi->cac_enabled = true;
+			}
+		} else if (pi->cac_enabled) {
+			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
+			pi->cac_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
+					    bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result = PPSMC_Result_OK;
+
+	if (pi->thermal_sclk_dpm_enabled) {
+		if (enable)
+			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
+		else
+			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
+	}
+
+	if (smc_result == PPSMC_Result_OK)
+		return 0;
+	else
+		return -EINVAL;
+}
+
+static int ci_power_control_set_level(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_cac_tdp_table *cac_tdp_table =
+		rdev->pm.dpm.dyn_state.cac_tdp_table;
+	s32 adjust_percent;
+	s32 target_tdp;
+	int ret = 0;
+	bool adjust_polarity = false; /* ??? */
+
+	if (pi->caps_power_containment) {
+		adjust_percent = adjust_polarity ?
+			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
+		target_tdp = ((100 + adjust_percent) *
+			      (s32)cac_tdp_table->configurable_tdp) / 100;
+
+		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
+	}
+
+	return ret;
+}
+
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (pi->uvd_power_gated == gate)
+		return;
+
+	pi->uvd_power_gated = gate;
+
+	ci_update_uvd_dpm(rdev, gate);
+}
+
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+
+	/* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+	if (r600_dpm_get_vrefresh(rdev) > 120)
+		return true;
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
+
+static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
+					struct radeon_ps *rps)
+{
+	struct ci_ps *ps = ci_get_ps(rps);
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_clock_and_voltage_limits *max_limits;
+	bool disable_mclk_switching;
+	u32 sclk, mclk;
+	int i;
+
+	if (rps->vce_active) {
+		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
+	} else {
+		rps->evclk = 0;
+		rps->ecclk = 0;
+	}
+
+	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+	    ci_dpm_vblank_too_short(rdev))
+		disable_mclk_switching = true;
+	else
+		disable_mclk_switching = false;
+
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+		pi->battery_state = true;
+	else
+		pi->battery_state = false;
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (rdev->pm.dpm.ac_power == false) {
+		for (i = 0; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk > max_limits->mclk)
+				ps->performance_levels[i].mclk = max_limits->mclk;
+			if (ps->performance_levels[i].sclk > max_limits->sclk)
+				ps->performance_levels[i].sclk = max_limits->sclk;
+		}
+	}
+
+	/* XXX validate the min clocks required for display */
+
+	if (disable_mclk_switching) {
+		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
+		sclk = ps->performance_levels[0].sclk;
+	} else {
+		mclk = ps->performance_levels[0].mclk;
+		sclk = ps->performance_levels[0].sclk;
+	}
+
+	if (rps->vce_active) {
+		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
+			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
+		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
+			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
+	}
+
+	ps->performance_levels[0].sclk = sclk;
+	ps->performance_levels[0].mclk = mclk;
+
+	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
+		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
+
+	if (disable_mclk_switching) {
+		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
+			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
+	} else {
+		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
+			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
+	}
+}
+
+static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
+					    int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+	u32 tmp;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	tmp = RREG32_SMC(CG_THERMAL_INT);
+	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
+	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
+		CI_DIG_THERM_INTL(low_temp / 1000);
+	WREG32_SMC(CG_THERMAL_INT, tmp);
+
+#if 0
+	/* XXX: need to figure out how to handle this properly */
+	tmp = RREG32_SMC(CG_THERMAL_CTRL);
+	tmp &= DIG_THERM_DPM_MASK;
+	tmp |= DIG_THERM_DPM(high_temp / 1000);
+	WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+	rdev->pm.dpm.thermal.min_temp = low_temp;
+	rdev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+static int ci_thermal_enable_alert(struct radeon_device *rdev,
+				   bool enable)
+{
+	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
+	PPSMC_Result result;
+
+	if (enable) {
+		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+		WREG32_SMC(CG_THERMAL_INT, thermal_int);
+		rdev->irq.dpm_thermal = false;
+		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
+		if (result != PPSMC_Result_OK) {
+			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+			return -EINVAL;
+		}
+	} else {
+		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+		WREG32_SMC(CG_THERMAL_INT, thermal_int);
+		rdev->irq.dpm_thermal = true;
+		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
+		if (result != PPSMC_Result_OK) {
+			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+
+	if (pi->fan_ctrl_is_in_default_mode) {
+		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+		pi->fan_ctrl_default_mode = tmp;
+		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+		pi->t_min = tmp;
+		pi->fan_ctrl_is_in_default_mode = false;
+	}
+
+	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+	tmp |= TMIN(0);
+	WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+	tmp |= FDO_PWM_MODE(mode);
+	WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	u32 duty100;
+	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	u16 fdo_min, slope1, slope2;
+	u32 reference_clock, tmp;
+	int ret;
+	u64 tmp64;
+
+	if (!pi->fan_table_start) {
+		rdev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0) {
+		rdev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (u16)tmp64;
+
+	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = radeon_get_xclk(rdev);
+
+	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+					       reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((u16)duty100);
+
+	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+	fan_table.TempSrc = (uint8_t)tmp;
+
+	ret = ci_copy_bytes_to_smc(rdev,
+				   pi->fan_table_start,
+				   (u8 *)(&fan_table),
+				   sizeof(fan_table),
+				   pi->sram_end);
+
+	if (ret) {
+		DRM_ERROR("Failed to load fan table to the SMC.");
+		rdev->pm.dpm.fan.ucode_fan_control = false;
+	}
+
+	return 0;
+}
+
+static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result ret;
+
+	if (pi->caps_od_fuzzy_fan_control_support) {
+		ret = ci_send_msg_to_smc_with_parameter(rdev,
+							PPSMC_StartFanControl,
+							FAN_CONTROL_FUZZY);
+		if (ret != PPSMC_Result_OK)
+			return -EINVAL;
+		ret = ci_send_msg_to_smc_with_parameter(rdev,
+							PPSMC_MSG_SetFanPwmMax,
+							rdev->pm.dpm.fan.default_max_fan_pwm);
+		if (ret != PPSMC_Result_OK)
+			return -EINVAL;
+	} else {
+		ret = ci_send_msg_to_smc_with_parameter(rdev,
+							PPSMC_StartFanControl,
+							FAN_CONTROL_TABLE);
+		if (ret != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	pi->fan_is_controlled_by_smc = true;
+	return 0;
+}
+
+static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+	PPSMC_Result ret;
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+	if (ret == PPSMC_Result_OK) {
+		pi->fan_is_controlled_by_smc = false;
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+					     u32 *speed)
+{
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)duty * 100;
+	do_div(tmp64, duty100);
+	*speed = (u32)tmp64;
+
+	if (*speed > 100)
+		*speed = 100;
+
+	return 0;
+}
+
+int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+					     u32 speed)
+{
+	u32 tmp;
+	u32 duty, duty100;
+	u64 tmp64;
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	if (pi->fan_is_controlled_by_smc)
+		return -EINVAL;
+
+	if (speed > 100)
+		return -EINVAL;
+
+	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)speed * duty100;
+	do_div(tmp64, 100);
+	duty = (u32)tmp64;
+
+	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+	tmp |= FDO_STATIC_DUTY(duty);
+	WREG32_SMC(CG_FDO_CTRL0, tmp);
+
+	return 0;
+}
+
+void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (rdev->pm.dpm.fan.ucode_fan_control)
+			ci_fan_ctrl_stop_smc_fan_control(rdev);
+		ci_fan_ctrl_set_static_mode(rdev, mode);
+	} else {
+		/* restart auto-manage */
+		if (rdev->pm.dpm.fan.ucode_fan_control)
+			ci_thermal_start_smc_fan_control(rdev);
+		else
+			ci_fan_ctrl_set_default_mode(rdev);
+	}
+}
+
+u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+
+	if (pi->fan_is_controlled_by_smc)
+		return 0;
+
+	tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+	return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+					 u32 *speed)
+{
+	u32 tach_period;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	if (rdev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+	if (tach_period == 0)
+		return -ENOENT;
+
+	*speed = 60 * xclk * 10000 / tach_period;
+
+	return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+					 u32 speed)
+{
+	u32 tach_period, tmp;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	if (rdev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	if ((speed < rdev->pm.fan_min_rpm) ||
+	    (speed > rdev->pm.fan_max_rpm))
+		return -EINVAL;
+
+	if (rdev->pm.dpm.fan.ucode_fan_control)
+		ci_fan_ctrl_stop_smc_fan_control(rdev);
+
+	tach_period = 60 * xclk * 10000 / (8 * speed);
+	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+	tmp |= TARGET_PERIOD(tach_period);
+	WREG32_SMC(CG_TACH_CTRL, tmp);
+
+	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+	return 0;
+}
+#endif
+
+static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+
+	if (!pi->fan_ctrl_is_in_default_mode) {
+		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
+		WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+		tmp |= TMIN(pi->t_min);
+		WREG32_SMC(CG_FDO_CTRL2, tmp);
+		pi->fan_ctrl_is_in_default_mode = true;
+	}
+}
+
+static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+	if (rdev->pm.dpm.fan.ucode_fan_control) {
+		ci_fan_ctrl_start_smc_fan_control(rdev);
+		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+	}
+}
+
+static void ci_thermal_initialize(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (rdev->pm.fan_pulses_per_revolution) {
+		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+		WREG32_SMC(CG_TACH_CTRL, tmp);
+	}
+
+	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+	tmp |= TACH_PWM_RESP_RATE(0x28);
+	WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+	int ret;
+
+	ci_thermal_initialize(rdev);
+	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = ci_thermal_enable_alert(rdev, true);
+	if (ret)
+		return ret;
+	if (rdev->pm.dpm.fan.ucode_fan_control) {
+		ret = ci_thermal_setup_fan_table(rdev);
+		if (ret)
+			return ret;
+		ci_thermal_start_smc_fan_control(rdev);
+	}
+
+	return 0;
+}
+
+static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+	if (!rdev->pm.no_fan)
+		ci_fan_ctrl_set_default_mode(rdev);
+}
+
+#if 0
+static int ci_read_smc_soft_register(struct radeon_device *rdev,
+				     u16 reg_offset, u32 *value)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	return ci_read_smc_sram_dword(rdev,
+				      pi->soft_regs_start + reg_offset,
+				      value, pi->sram_end);
+}
+#endif
+
+static int ci_write_smc_soft_register(struct radeon_device *rdev,
+				      u16 reg_offset, u32 value)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	return ci_write_smc_sram_dword(rdev,
+				       pi->soft_regs_start + reg_offset,
+				       value, pi->sram_end);
+}
+
+static void ci_init_fps_limits(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+
+	if (pi->caps_fps) {
+		u16 tmp;
+
+		tmp = 45;
+		table->FpsHighT = cpu_to_be16(tmp);
+
+		tmp = 30;
+		table->FpsLowT = cpu_to_be16(tmp);
+	}
+}
+
+static int ci_update_sclk_t(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret = 0;
+	u32 low_sclk_interrupt_t = 0;
+
+	if (pi->caps_sclk_throttle_low_notification) {
+		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+		ret = ci_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
+					   (u8 *)&low_sclk_interrupt_t,
+					   sizeof(u32), pi->sram_end);
+
+	}
+
+	return ret;
+}
+
+static void ci_get_leakage_voltages(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u16 leakage_id, virtual_voltage_id;
+	u16 vddc, vddci;
+	int i;
+
+	pi->vddc_leakage.count = 0;
+	pi->vddci_leakage.count = 0;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
+				continue;
+			if (vddc != 0 && vddc != virtual_voltage_id) {
+				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+				pi->vddc_leakage.count++;
+			}
+		}
+	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
+										 virtual_voltage_id,
+										 leakage_id) == 0) {
+				if (vddc != 0 && vddc != virtual_voltage_id) {
+					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+					pi->vddc_leakage.count++;
+				}
+				if (vddci != 0 && vddci != virtual_voltage_id) {
+					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
+					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
+					pi->vddci_leakage.count++;
+				}
+			}
+		}
+	}
+}
+
+static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	bool want_thermal_protection;
+	enum radeon_dpm_event_src dpm_event_src;
+	u32 tmp;
+
+	switch (sources) {
+	case 0:
+	default:
+		want_thermal_protection = false;
+		break;
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+		break;
+	}
+
+	if (want_thermal_protection) {
+#if 0
+		/* XXX: need to figure out how to handle this properly */
+		tmp = RREG32_SMC(CG_THERMAL_CTRL);
+		tmp &= DPM_EVENT_SRC_MASK;
+		tmp |= DPM_EVENT_SRC(dpm_event_src);
+		WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+		tmp = RREG32_SMC(GENERAL_PWRMGT);
+		if (pi->thermal_protection)
+			tmp &= ~THERMAL_PROTECTION_DIS;
+		else
+			tmp |= THERMAL_PROTECTION_DIS;
+		WREG32_SMC(GENERAL_PWRMGT, tmp);
+	} else {
+		tmp = RREG32_SMC(GENERAL_PWRMGT);
+		tmp |= THERMAL_PROTECTION_DIS;
+		WREG32_SMC(GENERAL_PWRMGT, tmp);
+	}
+}
+
+static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
+					   enum radeon_dpm_auto_throttle_src source,
+					   bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (enable) {
+		if (!(pi->active_auto_throttle_sources & (1 << source))) {
+			pi->active_auto_throttle_sources |= 1 << source;
+			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	} else {
+		if (pi->active_auto_throttle_sources & (1 << source)) {
+			pi->active_auto_throttle_sources &= ~(1 << source);
+			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	}
+}
+
+static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
+{
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
+}
+
+static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result;
+
+	if (!pi->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((!pi->sclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	if ((!pi->mclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	pi->need_update_smu7_dpm_table = 0;
+	return 0;
+}
+
+static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result;
+
+	if (enable) {
+		if (!pi->sclk_dpm_key_disabled) {
+			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+
+		if (!pi->mclk_dpm_key_disabled) {
+			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+
+			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
+
+			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
+			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
+			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
+
+			udelay(10);
+
+			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
+			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
+			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
+		}
+	} else {
+		if (!pi->sclk_dpm_key_disabled) {
+			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+
+		if (!pi->mclk_dpm_key_disabled) {
+			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int ci_start_dpm(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result;
+	int ret;
+	u32 tmp;
+
+	tmp = RREG32_SMC(GENERAL_PWRMGT);
+	tmp |= GLOBAL_PWRMGT_EN;
+	WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+	tmp |= DYNAMIC_PM_EN;
+	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
+
+	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
+
+	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
+	if (smc_result != PPSMC_Result_OK)
+		return -EINVAL;
+
+	ret = ci_enable_sclk_mclk_dpm(rdev, true);
+	if (ret)
+		return ret;
+
+	if (!pi->pcie_dpm_key_disabled) {
+		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result;
+
+	if (!pi->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((!pi->sclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	if ((!pi->mclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_stop_dpm(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result smc_result;
+	int ret;
+	u32 tmp;
+
+	tmp = RREG32_SMC(GENERAL_PWRMGT);
+	tmp &= ~GLOBAL_PWRMGT_EN;
+	WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+	tmp &= ~DYNAMIC_PM_EN;
+	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+	if (!pi->pcie_dpm_key_disabled) {
+		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	ret = ci_enable_sclk_mclk_dpm(rdev, false);
+	if (ret)
+		return ret;
+
+	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
+	if (smc_result != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
+{
+	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+	if (enable)
+		tmp &= ~SCLK_PWRMGT_OFF;
+	else
+		tmp |= SCLK_PWRMGT_OFF;
+	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+}
+
+#if 0
+static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
+					bool ac_power)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_cac_tdp_table *cac_tdp_table =
+		rdev->pm.dpm.dyn_state.cac_tdp_table;
+	u32 power_limit;
+
+	if (ac_power)
+		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+	else
+		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
+
+	ci_set_power_limit(rdev, power_limit);
+
+	if (pi->caps_automatic_dc_transition) {
+		if (ac_power)
+			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
+		else
+			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
+	}
+
+	return 0;
+}
+#endif
+
+static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+	u32 tmp;
+	int i;
+
+	if (!ci_is_smc_running(rdev))
+		return PPSMC_Result_Failed;
+
+	WREG32(SMC_MESSAGE_0, msg);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(SMC_RESP_0);
+		if (tmp != 0)
+			break;
+		udelay(1);
+	}
+	tmp = RREG32(SMC_RESP_0);
+
+	return (PPSMC_Result)tmp;
+}
+
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+						      PPSMC_Msg msg, u32 parameter)
+{
+	WREG32(SMC_MSG_ARG_0, parameter);
+	return ci_send_msg_to_smc(rdev, msg);
+}
+
+static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
+							PPSMC_Msg msg, u32 *parameter)
+{
+	PPSMC_Result smc_result;
+
+	smc_result = ci_send_msg_to_smc(rdev, msg);
+
+	if ((smc_result == PPSMC_Result_OK) && parameter)
+		*parameter = RREG32(SMC_MSG_ARG_0);
+
+	return smc_result;
+}
+
+static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (!pi->sclk_dpm_key_disabled) {
+		PPSMC_Result smc_result =
+			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (!pi->mclk_dpm_key_disabled) {
+		PPSMC_Result smc_result =
+			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (!pi->pcie_dpm_key_disabled) {
+		PPSMC_Result smc_result =
+			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+		PPSMC_Result smc_result =
+			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+				       u32 target_tdp)
+{
+	PPSMC_Result smc_result =
+		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+	if (smc_result != PPSMC_Result_OK)
+		return -EINVAL;
+	return 0;
+}
+
+#if 0
+static int ci_set_boot_state(struct radeon_device *rdev)
+{
+	return ci_enable_sclk_mclk_dpm(rdev, false);
+}
+#endif
+
+static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
+{
+	u32 sclk_freq;
+	PPSMC_Result smc_result =
+		ci_send_msg_to_smc_return_parameter(rdev,
+						    PPSMC_MSG_API_GetSclkFrequency,
+						    &sclk_freq);
+	if (smc_result != PPSMC_Result_OK)
+		sclk_freq = 0;
+
+	return sclk_freq;
+}
+
+static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
+{
+	u32 mclk_freq;
+	PPSMC_Result smc_result =
+		ci_send_msg_to_smc_return_parameter(rdev,
+						    PPSMC_MSG_API_GetMclkFrequency,
+						    &mclk_freq);
+	if (smc_result != PPSMC_Result_OK)
+		mclk_freq = 0;
+
+	return mclk_freq;
+}
+
+static void ci_dpm_start_smc(struct radeon_device *rdev)
+{
+	int i;
+
+	ci_program_jump_on_start(rdev);
+	ci_start_smc_clock(rdev);
+	ci_start_smc(rdev);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
+			break;
+	}
+}
+
+static void ci_dpm_stop_smc(struct radeon_device *rdev)
+{
+	ci_reset_smc(rdev);
+	ci_stop_smc_clock(rdev);
+}
+
+static int ci_process_firmware_header(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = ci_read_smc_sram_dword(rdev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, DpmTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->dpm_table_start = tmp;
+
+	ret = ci_read_smc_sram_dword(rdev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, SoftRegisters),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->soft_regs_start = tmp;
+
+	ret = ci_read_smc_sram_dword(rdev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->mc_reg_table_start = tmp;
+
+	ret = ci_read_smc_sram_dword(rdev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, FanTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->fan_table_start = tmp;
+
+	ret = ci_read_smc_sram_dword(rdev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->arb_table_start = tmp;
+
+	return 0;
+}
+
+static void ci_read_clock_registers(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	pi->clock_registers.cg_spll_func_cntl =
+		RREG32_SMC(CG_SPLL_FUNC_CNTL);
+	pi->clock_registers.cg_spll_func_cntl_2 =
+		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
+	pi->clock_registers.cg_spll_func_cntl_3 =
+		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
+	pi->clock_registers.cg_spll_func_cntl_4 =
+		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
+	pi->clock_registers.cg_spll_spread_spectrum =
+		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+	pi->clock_registers.cg_spll_spread_spectrum_2 =
+		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
+	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void ci_init_sclk_t(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	pi->low_sclk_interrupt_t = 0;
+}
+
+static void ci_enable_thermal_protection(struct radeon_device *rdev,
+					 bool enable)
+{
+	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+	if (enable)
+		tmp &= ~THERMAL_PROTECTION_DIS;
+	else
+		tmp |= THERMAL_PROTECTION_DIS;
+	WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static void ci_enable_acpi_power_management(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+	tmp |= STATIC_PM_EN;
+
+	WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+#if 0
+static int ci_enter_ulp_state(struct radeon_device *rdev)
+{
+
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+	udelay(25000);
+
+	return 0;
+}
+
+static int ci_exit_ulp_state(struct radeon_device *rdev)
+{
+	int i;
+
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+	udelay(7000);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(SMC_RESP_0) == 1)
+			break;
+		udelay(1000);
+	}
+
+	return 0;
+}
+#endif
+
+static int ci_notify_smc_display_change(struct radeon_device *rdev,
+					bool has_display)
+{
+	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
+}
+
+static int ci_enable_ds_master_switch(struct radeon_device *rdev,
+				      bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (enable) {
+		if (pi->caps_sclk_ds) {
+			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
+				return -EINVAL;
+		} else {
+			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	} else {
+		if (pi->caps_sclk_ds) {
+			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void ci_program_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+	u32 pre_vbi_time_in_us;
+	u32 frame_time_in_us;
+	u32 ref_clock = rdev->clock.spll.reference_freq;
+	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
+	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+
+	tmp &= ~DISP_GAP_MASK;
+	if (rdev->pm.dpm.new_active_crtc_count > 0)
+		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+
+	if (refresh_rate == 0)
+		refresh_rate = 60;
+	if (vblank_time == 0xffffffff)
+		vblank_time = 500;
+	frame_time_in_us = 1000000 / refresh_rate;
+	pre_vbi_time_in_us =
+		frame_time_in_us - 200 - vblank_time;
+	tmp = pre_vbi_time_in_us * (ref_clock / 100);
+
+	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
+	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
+	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+
+	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
+
+}
+
+static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+
+	if (enable) {
+		if (pi->caps_sclk_ss_support) {
+			tmp = RREG32_SMC(GENERAL_PWRMGT);
+			tmp |= DYN_SPREAD_SPECTRUM_EN;
+			WREG32_SMC(GENERAL_PWRMGT, tmp);
+		}
+	} else {
+		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+		tmp &= ~SSEN;
+		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
+
+		tmp = RREG32_SMC(GENERAL_PWRMGT);
+		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
+		WREG32_SMC(GENERAL_PWRMGT, tmp);
+	}
+}
+
+static void ci_program_sstp(struct radeon_device *rdev)
+{
+	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void ci_enable_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
+	tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+		DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
+
+	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void ci_program_vc(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
+	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
+	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
+	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
+	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
+	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
+	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
+	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
+}
+
+static void ci_clear_vc(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+	WREG32_SMC(CG_FTV_0, 0);
+	WREG32_SMC(CG_FTV_1, 0);
+	WREG32_SMC(CG_FTV_2, 0);
+	WREG32_SMC(CG_FTV_3, 0);
+	WREG32_SMC(CG_FTV_4, 0);
+	WREG32_SMC(CG_FTV_5, 0);
+	WREG32_SMC(CG_FTV_6, 0);
+	WREG32_SMC(CG_FTV_7, 0);
+}
+
+static int ci_upload_firmware(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int i, ret;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
+			break;
+	}
+	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
+
+	ci_stop_smc_clock(rdev);
+	ci_reset_smc(rdev);
+
+	ret = ci_load_smc_ucode(rdev, pi->sram_end);
+
+	return ret;
+
+}
+
+static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
+				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+				     struct atom_voltage_table *voltage_table)
+{
+	u32 i;
+
+	if (voltage_dependency_table == NULL)
+		return -EINVAL;
+
+	voltage_table->mask_low = 0;
+	voltage_table->phase_delay = 0;
+
+	voltage_table->count = voltage_dependency_table->count;
+	for (i = 0; i < voltage_table->count; i++) {
+		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+		voltage_table->entries[i].smio_low = 0;
+	}
+
+	return 0;
+}
+
+static int ci_construct_voltage_tables(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret;
+
+	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+						    VOLTAGE_OBJ_GPIO_LUT,
+						    &pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		ret = ci_get_svi2_voltage_table(rdev,
+						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						&pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
+		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
+							 &pi->vddc_voltage_table);
+
+	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
+						    VOLTAGE_OBJ_GPIO_LUT,
+						    &pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		ret = ci_get_svi2_voltage_table(rdev,
+						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						&pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
+		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
+							 &pi->vddci_voltage_table);
+
+	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
+						    VOLTAGE_OBJ_GPIO_LUT,
+						    &pi->mvdd_voltage_table);
+		if (ret)
+			return ret;
+	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		ret = ci_get_svi2_voltage_table(rdev,
+						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+						&pi->mvdd_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
+		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
+							 &pi->mvdd_voltage_table);
+
+	return 0;
+}
+
+static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
+					  struct atom_voltage_table_entry *voltage_table,
+					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
+{
+	int ret;
+
+	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
+					    &smc_voltage_table->StdVoltageHiSidd,
+					    &smc_voltage_table->StdVoltageLoSidd);
+
+	if (ret) {
+		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
+		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
+	}
+
+	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
+	smc_voltage_table->StdVoltageHiSidd =
+		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
+	smc_voltage_table->StdVoltageLoSidd =
+		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
+}
+
+static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	unsigned int count;
+
+	table->VddcLevelCount = pi->vddc_voltage_table.count;
+	for (count = 0; count < table->VddcLevelCount; count++) {
+		ci_populate_smc_voltage_table(rdev,
+					      &pi->vddc_voltage_table.entries[count],
+					      &table->VddcLevel[count]);
+
+		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+			table->VddcLevel[count].Smio |=
+				pi->vddc_voltage_table.entries[count].smio_low;
+		else
+			table->VddcLevel[count].Smio = 0;
+	}
+	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
+
+	return 0;
+}
+
+static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
+				       SMU7_Discrete_DpmTable *table)
+{
+	unsigned int count;
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	table->VddciLevelCount = pi->vddci_voltage_table.count;
+	for (count = 0; count < table->VddciLevelCount; count++) {
+		ci_populate_smc_voltage_table(rdev,
+					      &pi->vddci_voltage_table.entries[count],
+					      &table->VddciLevel[count]);
+
+		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+			table->VddciLevel[count].Smio |=
+				pi->vddci_voltage_table.entries[count].smio_low;
+		else
+			table->VddciLevel[count].Smio = 0;
+	}
+	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
+
+	return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	unsigned int count;
+
+	table->MvddLevelCount = pi->mvdd_voltage_table.count;
+	for (count = 0; count < table->MvddLevelCount; count++) {
+		ci_populate_smc_voltage_table(rdev,
+					      &pi->mvdd_voltage_table.entries[count],
+					      &table->MvddLevel[count]);
+
+		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+			table->MvddLevel[count].Smio |=
+				pi->mvdd_voltage_table.entries[count].smio_low;
+		else
+			table->MvddLevel[count].Smio = 0;
+	}
+	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
+
+	return 0;
+}
+
+static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
+					  SMU7_Discrete_DpmTable *table)
+{
+	int ret;
+
+	ret = ci_populate_smc_vddc_table(rdev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_vddci_table(rdev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_mvdd_table(rdev, table);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
+				  SMU7_Discrete_VoltageLevel *voltage)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 i = 0;
+
+	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
+			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
+				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+
+		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
+			return -EINVAL;
+	}
+
+	return -EINVAL;
+}
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+					 struct atom_voltage_table_entry *voltage_table,
+					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
+{
+	u16 v_index, idx;
+	bool voltage_found = false;
+	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
+	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
+
+	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+		return -EINVAL;
+
+	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+			if (voltage_table->value ==
+			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+				voltage_found = true;
+				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+					idx = v_index;
+				else
+					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+				*std_voltage_lo_sidd =
+					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+				*std_voltage_hi_sidd =
+					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+				break;
+			}
+		}
+
+		if (!voltage_found) {
+			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+				if (voltage_table->value <=
+				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+					voltage_found = true;
+					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+						idx = v_index;
+					else
+						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+					*std_voltage_lo_sidd =
+						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+					*std_voltage_hi_sidd =
+						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+					break;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
+						  const struct radeon_phase_shedding_limits_table *limits,
+						  u32 sclk,
+						  u32 *phase_shedding)
+{
+	unsigned int i;
+
+	*phase_shedding = 1;
+
+	for (i = 0; i < limits->count; i++) {
+		if (sclk < limits->entries[i].sclk) {
+			*phase_shedding = i;
+			break;
+		}
+	}
+}
+
+static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
+						  const struct radeon_phase_shedding_limits_table *limits,
+						  u32 mclk,
+						  u32 *phase_shedding)
+{
+	unsigned int i;
+
+	*phase_shedding = 1;
+
+	for (i = 0; i < limits->count; i++) {
+		if (mclk < limits->entries[i].mclk) {
+			*phase_shedding = i;
+			break;
+		}
+	}
+}
+
+static int ci_init_arb_table_index(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
+				       tmp, pi->sram_end);
+}
+
+static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
+					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
+					 u32 clock, u32 *voltage)
+{
+	u32 i = 0;
+
+	if (allowed_clock_voltage_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+			*voltage = allowed_clock_voltage_table->entries[i].v;
+			return 0;
+		}
+	}
+
+	*voltage = allowed_clock_voltage_table->entries[i-1].v;
+
+	return 0;
+}
+
+static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+					     u32 sclk, u32 min_sclk_in_sr)
+{
+	u32 i;
+	u32 tmp;
+	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
+		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+
+	if (sclk < min)
+		return 0;
+
+	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+		tmp = sclk / (1 << i);
+		if (tmp >= min || i == 0)
+			break;
+	}
+
+	return (u8)i;
+}
+
+static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
+{
+	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int ci_reset_to_default(struct radeon_device *rdev)
+{
+	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
+
+	if (tmp == MC_CG_ARB_FREQ_F0)
+		return 0;
+
+	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static void ci_register_patching_mc_arb(struct radeon_device *rdev,
+					const u32 engine_clock,
+					const u32 memory_clock,
+					u32 *dram_timimg2)
+{
+	bool patch;
+	u32 tmp, tmp2;
+
+	tmp = RREG32(MC_SEQ_MISC0);
+	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+	if (patch &&
+	    ((rdev->pdev->device == 0x67B0) ||
+	     (rdev->pdev->device == 0x67B1))) {
+		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
+			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
+			*dram_timimg2 &= ~0x00ff0000;
+			*dram_timimg2 |= tmp2 << 16;
+		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
+			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
+			*dram_timimg2 &= ~0x00ff0000;
+			*dram_timimg2 |= tmp2 << 16;
+		}
+	}
+}
+
+
+static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
+						u32 sclk,
+						u32 mclk,
+						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+	u32 dram_timing;
+	u32 dram_timing2;
+	u32 burst_time;
+
+	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
+
+	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
+
+	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
+	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
+	arb_regs->McArbBurstTime = (u8)burst_time;
+
+	return 0;
+}
+
+static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	SMU7_Discrete_MCArbDramTimingTable arb_regs;
+	u32 i, j;
+	int ret =  0;
+
+	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
+			ret = ci_populate_memory_timing_parameters(rdev,
+								   pi->dpm_table.sclk_table.dpm_levels[i].value,
+								   pi->dpm_table.mclk_table.dpm_levels[j].value,
+								   &arb_regs.entries[i][j]);
+			if (ret)
+				break;
+		}
+	}
+
+	if (ret == 0)
+		ret = ci_copy_bytes_to_smc(rdev,
+					   pi->arb_table_start,
+					   (u8 *)&arb_regs,
+					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
+					   pi->sram_end);
+
+	return ret;
+}
+
+static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (pi->need_update_smu7_dpm_table == 0)
+		return 0;
+
+	return ci_do_program_memory_timing_parameters(rdev);
+}
+
+static void ci_populate_smc_initial_state(struct radeon_device *rdev,
+					  struct radeon_ps *radeon_boot_state)
+{
+	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 level = 0;
+
+	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
+		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
+		    boot_state->performance_levels[0].sclk) {
+			pi->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
+		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
+		    boot_state->performance_levels[0].mclk) {
+			pi->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+}
+
+static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
+{
+	u32 i;
+	u32 mask_value = 0;
+
+	for (i = dpm_table->count; i > 0; i--) {
+		mask_value = mask_value << 1;
+		if (dpm_table->dpm_levels[i-1].enabled)
+			mask_value |= 0x1;
+		else
+			mask_value &= 0xFFFFFFFE;
+	}
+
+	return mask_value;
+}
+
+static void ci_populate_smc_link_level(struct radeon_device *rdev,
+				       SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	u32 i;
+
+	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed =
+			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount =
+			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity = 1;
+		table->LinkLevel[i].DownT = cpu_to_be32(5);
+		table->LinkLevel[i].UpT = cpu_to_be32(30);
+	}
+
+	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
+	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+}
+
+static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
+				     SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->UvdLevelCount =
+		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
+
+	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].VclkFrequency =
+			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
+		table->UvdLevel[count].DclkFrequency =
+			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
+		table->UvdLevel[count].MinVddc =
+			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+		table->UvdLevel[count].MinVddcPhases = 1;
+
+		ret = radeon_atom_get_clock_dividers(rdev,
+						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+						     table->UvdLevel[count].VclkFrequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
+
+		ret = radeon_atom_get_clock_dividers(rdev,
+						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+						     table->UvdLevel[count].DclkFrequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
+
+		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
+		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
+		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
+	}
+
+	return ret;
+}
+
+static int ci_populate_smc_vce_level(struct radeon_device *rdev,
+				     SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->VceLevelCount =
+		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
+
+	for (count = 0; count < table->VceLevelCount; count++) {
+		table->VceLevel[count].Frequency =
+			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
+		table->VceLevel[count].MinVoltage =
+			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+		table->VceLevel[count].MinPhases = 1;
+
+		ret = radeon_atom_get_clock_dividers(rdev,
+						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+						     table->VceLevel[count].Frequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->VceLevel[count].Divider = (u8)dividers.post_divider;
+
+		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
+		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
+	}
+
+	return ret;
+
+}
+
+static int ci_populate_smc_acp_level(struct radeon_device *rdev,
+				     SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->AcpLevelCount = (u8)
+		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
+
+	for (count = 0; count < table->AcpLevelCount; count++) {
+		table->AcpLevel[count].Frequency =
+			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
+		table->AcpLevel[count].MinVoltage =
+			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
+		table->AcpLevel[count].MinPhases = 1;
+
+		ret = radeon_atom_get_clock_dividers(rdev,
+						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+						     table->AcpLevel[count].Frequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
+
+		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
+		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
+	}
+
+	return ret;
+}
+
+static int ci_populate_smc_samu_level(struct radeon_device *rdev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->SamuLevelCount =
+		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
+
+	for (count = 0; count < table->SamuLevelCount; count++) {
+		table->SamuLevel[count].Frequency =
+			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
+		table->SamuLevel[count].MinVoltage =
+			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+		table->SamuLevel[count].MinPhases = 1;
+
+		ret = radeon_atom_get_clock_dividers(rdev,
+						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+						     table->SamuLevel[count].Frequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
+
+		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
+		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
+	}
+
+	return ret;
+}
+
+static int ci_calculate_mclk_params(struct radeon_device *rdev,
+				    u32 memory_clock,
+				    SMU7_Discrete_MemoryLevel *mclk,
+				    bool strobe_mode,
+				    bool dll_state_on)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32  dll_cntl = pi->clock_registers.dll_cntl;
+	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
+	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
+	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
+	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
+	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
+	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
+	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
+	struct atom_mpll_param mpll_param;
+	int ret;
+
+	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
+	if (ret)
+		return ret;
+
+	mpll_func_cntl &= ~BWCTRL_MASK;
+	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+	if (pi->mem_gddr5) {
+		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+			YCLK_POST_DIV(mpll_param.post_div);
+	}
+
+	if (pi->caps_mclk_ss_support) {
+		struct radeon_atom_ss ss;
+		u32 freq_nom;
+		u32 tmp;
+		u32 reference_clock = rdev->clock.mpll.reference_freq;
+
+		if (mpll_param.qdr == 1)
+			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
+		else
+			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
+
+		tmp = (freq_nom / reference_clock);
+		tmp = tmp * tmp;
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+			u32 clks = reference_clock * 5 / ss.rate;
+			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+			mpll_ss1 &= ~CLKV_MASK;
+			mpll_ss1 |= CLKV(clkv);
+
+			mpll_ss2 &= ~CLKS_MASK;
+			mpll_ss2 |= CLKS(clks);
+		}
+	}
+
+	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+	if (dll_state_on)
+		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+	else
+		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	mclk->MclkFrequency = memory_clock;
+	mclk->MpllFuncCntl = mpll_func_cntl;
+	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+	mclk->DllCntl = dll_cntl;
+	mclk->MpllSs1 = mpll_ss1;
+	mclk->MpllSs2 = mpll_ss2;
+
+	return 0;
+}
+
+static int ci_populate_single_memory_level(struct radeon_device *rdev,
+					   u32 memory_clock,
+					   SMU7_Discrete_MemoryLevel *memory_level)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret;
+	bool dll_state_on;
+
+	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
+		ret = ci_get_dependency_volt_by_clk(rdev,
+						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						    memory_clock, &memory_level->MinVddc);
+		if (ret)
+			return ret;
+	}
+
+	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
+		ret = ci_get_dependency_volt_by_clk(rdev,
+						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						    memory_clock, &memory_level->MinVddci);
+		if (ret)
+			return ret;
+	}
+
+	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
+		ret = ci_get_dependency_volt_by_clk(rdev,
+						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+						    memory_clock, &memory_level->MinMvdd);
+		if (ret)
+			return ret;
+	}
+
+	memory_level->MinVddcPhases = 1;
+
+	if (pi->vddc_phase_shed_control)
+		ci_populate_phase_value_based_on_mclk(rdev,
+						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						      memory_clock,
+						      &memory_level->MinVddcPhases);
+
+	memory_level->EnabledForThrottle = 1;
+	memory_level->UpH = 0;
+	memory_level->DownH = 100;
+	memory_level->VoltageDownH = 0;
+	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
+
+	memory_level->StutterEnable = false;
+	memory_level->StrobeEnable = false;
+	memory_level->EdcReadEnable = false;
+	memory_level->EdcWriteEnable = false;
+	memory_level->RttEnable = false;
+
+	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	if (pi->mclk_stutter_mode_threshold &&
+	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
+	    (pi->uvd_enabled == false) &&
+	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+	    (rdev->pm.dpm.new_active_crtc_count <= 2))
+		memory_level->StutterEnable = true;
+
+	if (pi->mclk_strobe_mode_threshold &&
+	    (memory_clock <= pi->mclk_strobe_mode_threshold))
+		memory_level->StrobeEnable = 1;
+
+	if (pi->mem_gddr5) {
+		memory_level->StrobeRatio =
+			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
+		if (pi->mclk_edc_enable_threshold &&
+		    (memory_clock > pi->mclk_edc_enable_threshold))
+			memory_level->EdcReadEnable = true;
+
+		if (pi->mclk_edc_wr_enable_threshold &&
+		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
+			memory_level->EdcWriteEnable = true;
+
+		if (memory_level->StrobeEnable) {
+			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
+			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+			else
+				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+		} else {
+			dll_state_on = pi->dll_default_on;
+		}
+	} else {
+		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
+		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+	}
+
+	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+	if (ret)
+		return ret;
+
+	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
+	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
+	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+
+	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
+	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
+	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
+	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
+	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
+	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
+	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
+	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
+	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
+	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
+	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
+
+	return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	SMU7_Discrete_VoltageLevel voltage_level;
+	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
+	u32 dll_cntl = pi->clock_registers.dll_cntl;
+	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+	int ret;
+
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc)
+		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
+	else
+		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
+
+	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
+
+	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
+
+	ret = radeon_atom_get_clock_dividers(rdev,
+					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+					     table->ACPILevel.SclkFrequency, false, &dividers);
+	if (ret)
+		return ret;
+
+	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
+	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+	table->ACPILevel.DeepSleepDivId = 0;
+
+	spll_func_cntl &= ~SPLL_PWRON;
+	spll_func_cntl |= SPLL_RESET;
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
+	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
+	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
+	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
+	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
+	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
+	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
+	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
+	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
+	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
+	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
+	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
+	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
+	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
+
+	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+		if (pi->acpi_vddci)
+			table->MemoryACPILevel.MinVddci =
+				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
+		else
+			table->MemoryACPILevel.MinVddci =
+				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
+	}
+
+	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
+		table->MemoryACPILevel.MinMvdd = 0;
+	else
+		table->MemoryACPILevel.MinMvdd =
+			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
+
+	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
+	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
+	table->MemoryACPILevel.MpllAdFuncCntl =
+		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
+	table->MemoryACPILevel.MpllDqFuncCntl =
+		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
+	table->MemoryACPILevel.MpllFuncCntl =
+		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
+	table->MemoryACPILevel.MpllFuncCntl_1 =
+		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
+	table->MemoryACPILevel.MpllFuncCntl_2 =
+		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
+	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
+	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpH = 0;
+	table->MemoryACPILevel.DownH = 100;
+	table->MemoryACPILevel.VoltageDownH = 0;
+	table->MemoryACPILevel.ActivityLevel =
+		cpu_to_be16((u16)pi->mclk_activity_target);
+
+	table->MemoryACPILevel.StutterEnable = false;
+	table->MemoryACPILevel.StrobeEnable = false;
+	table->MemoryACPILevel.EdcReadEnable = false;
+	table->MemoryACPILevel.EdcWriteEnable = false;
+	table->MemoryACPILevel.RttEnable = false;
+
+	return 0;
+}
+
+
+static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_ulv_parm *ulv = &pi->ulv;
+
+	if (ulv->supported) {
+		if (enable)
+			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+		else
+			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_populate_ulv_level(struct radeon_device *rdev,
+				 SMU7_Discrete_Ulv *state)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	if (ulv_voltage == 0) {
+		pi->ulv.supported = false;
+		return 0;
+	}
+
+	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+			state->VddcOffset = 0;
+		else
+			state->VddcOffset =
+				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
+	} else {
+		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+			state->VddcOffsetVid = 0;
+		else
+			state->VddcOffsetVid = (u8)
+				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
+				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+	}
+	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
+
+	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
+	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
+	state->VddcOffset = cpu_to_be16(state->VddcOffset);
+
+	return 0;
+}
+
+static int ci_calculate_sclk_params(struct radeon_device *rdev,
+				    u32 engine_clock,
+				    SMU7_Discrete_GraphicsLevel *sclk)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev,
+					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+	fbdiv = dividers.fb_div & 0x3FFFFFF;
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->caps_sclk_ss_support) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = engine_clock * dividers.post_div;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLK_S_MASK;
+			cg_spll_spread_spectrum |= CLK_S(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+		}
+	}
+
+	sclk->SclkFrequency = engine_clock;
+	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+	sclk->SclkDid = (u8)dividers.post_divider;
+
+	return 0;
+}
+
+static int ci_populate_single_graphic_level(struct radeon_device *rdev,
+					    u32 engine_clock,
+					    u16 sclk_activity_level_t,
+					    SMU7_Discrete_GraphicsLevel *graphic_level)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret;
+
+	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
+	if (ret)
+		return ret;
+
+	ret = ci_get_dependency_volt_by_clk(rdev,
+					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+					    engine_clock, &graphic_level->MinVddc);
+	if (ret)
+		return ret;
+
+	graphic_level->SclkFrequency = engine_clock;
+
+	graphic_level->Flags =  0;
+	graphic_level->MinVddcPhases = 1;
+
+	if (pi->vddc_phase_shed_control)
+		ci_populate_phase_value_based_on_sclk(rdev,
+						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						      engine_clock,
+						      &graphic_level->MinVddcPhases);
+
+	graphic_level->ActivityLevel = sclk_activity_level_t;
+
+	graphic_level->CcPwrDynRm = 0;
+	graphic_level->CcPwrDynRm1 = 0;
+	graphic_level->EnabledForThrottle = 1;
+	graphic_level->UpH = 0;
+	graphic_level->DownH = 0;
+	graphic_level->VoltageDownH = 0;
+	graphic_level->PowerThrottle = 0;
+
+	if (pi->caps_sclk_ds)
+		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
+										   engine_clock,
+										   CISLAND_MINIMUM_ENGINE_CLOCK);
+
+	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
+	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
+	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
+	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
+	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
+	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
+	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
+	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
+	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
+	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
+
+	return 0;
+}
+
+static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	u32 level_array_address = pi->dpm_table_start +
+		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
+		SMU7_MAX_LEVELS_GRAPHICS;
+	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
+	u32 i, ret;
+
+	memset(levels, 0, level_array_size);
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		ret = ci_populate_single_graphic_level(rdev,
+						       dpm_table->sclk_table.dpm_levels[i].value,
+						       (u16)pi->activity_target[i],
+						       &pi->smc_state_table.GraphicsLevel[i]);
+		if (ret)
+			return ret;
+		if (i > 1)
+			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+		if (i == (dpm_table->sclk_table.count - 1))
+			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+				PPSMC_DISPLAY_WATERMARK_HIGH;
+	}
+	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+				   (u8 *)levels, level_array_size,
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ci_populate_ulv_state(struct radeon_device *rdev,
+				 SMU7_Discrete_Ulv *ulv_level)
+{
+	return ci_populate_ulv_level(rdev, ulv_level);
+}
+
+static int ci_populate_all_memory_levels(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	u32 level_array_address = pi->dpm_table_start +
+		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
+		SMU7_MAX_LEVELS_MEMORY;
+	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
+	u32 i, ret;
+
+	memset(levels, 0, level_array_size);
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
+			return -EINVAL;
+		ret = ci_populate_single_memory_level(rdev,
+						      dpm_table->mclk_table.dpm_levels[i].value,
+						      &pi->smc_state_table.MemoryLevel[i]);
+		if (ret)
+			return ret;
+	}
+
+	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+	if ((dpm_table->mclk_table.count >= 2) &&
+	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
+		pi->smc_state_table.MemoryLevel[1].MinVddc =
+			pi->smc_state_table.MemoryLevel[0].MinVddc;
+		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
+			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
+	}
+
+	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
+
+	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
+	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
+		PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+				   (u8 *)levels, level_array_size,
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void ci_reset_single_dpm_table(struct radeon_device *rdev,
+				      struct ci_single_dpm_table* dpm_table,
+				      u32 count)
+{
+	u32 i;
+
+	dpm_table->count = count;
+	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
+		dpm_table->dpm_levels[i].enabled = false;
+}
+
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+				      u32 index, u32 pcie_gen, u32 pcie_lanes)
+{
+	dpm_table->dpm_levels[index].value = pcie_gen;
+	dpm_table->dpm_levels[index].param1 = pcie_lanes;
+	dpm_table->dpm_levels[index].enabled = true;
+}
+
+static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
+		return -EINVAL;
+
+	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
+		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
+		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
+	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
+		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
+		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
+	}
+
+	ci_reset_single_dpm_table(rdev,
+				  &pi->dpm_table.pcie_speed_table,
+				  SMU7_MAX_LEVELS_LINK);
+
+	if (rdev->family == CHIP_BONAIRE)
+		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+					  pi->pcie_gen_powersaving.min,
+					  pi->pcie_lane_powersaving.max);
+	else
+		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+					  pi->pcie_gen_powersaving.min,
+					  pi->pcie_lane_powersaving.min);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
+				  pi->pcie_gen_performance.min,
+				  pi->pcie_lane_performance.min);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
+				  pi->pcie_gen_powersaving.min,
+				  pi->pcie_lane_powersaving.max);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
+				  pi->pcie_gen_performance.min,
+				  pi->pcie_lane_performance.max);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
+				  pi->pcie_gen_powersaving.max,
+				  pi->pcie_lane_powersaving.max);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
+				  pi->pcie_gen_performance.max,
+				  pi->pcie_lane_performance.max);
+
+	pi->dpm_table.pcie_speed_table.count = 6;
+
+	return 0;
+}
+
+static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+	struct radeon_cac_leakage_table *std_voltage_table =
+		&rdev->pm.dpm.dyn_state.cac_leakage_table;
+	u32 i;
+
+	if (allowed_sclk_vddc_table == NULL)
+		return -EINVAL;
+	if (allowed_sclk_vddc_table->count < 1)
+		return -EINVAL;
+	if (allowed_mclk_table == NULL)
+		return -EINVAL;
+	if (allowed_mclk_table->count < 1)
+		return -EINVAL;
+
+	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
+
+	ci_reset_single_dpm_table(rdev,
+				  &pi->dpm_table.sclk_table,
+				  SMU7_MAX_LEVELS_GRAPHICS);
+	ci_reset_single_dpm_table(rdev,
+				  &pi->dpm_table.mclk_table,
+				  SMU7_MAX_LEVELS_MEMORY);
+	ci_reset_single_dpm_table(rdev,
+				  &pi->dpm_table.vddc_table,
+				  SMU7_MAX_LEVELS_VDDC);
+	ci_reset_single_dpm_table(rdev,
+				  &pi->dpm_table.vddci_table,
+				  SMU7_MAX_LEVELS_VDDCI);
+	ci_reset_single_dpm_table(rdev,
+				  &pi->dpm_table.mvdd_table,
+				  SMU7_MAX_LEVELS_MVDD);
+
+	pi->dpm_table.sclk_table.count = 0;
+	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+		if ((i == 0) ||
+		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
+		     allowed_sclk_vddc_table->entries[i].clk)) {
+			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
+				allowed_sclk_vddc_table->entries[i].clk;
+			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
+				(i == 0) ? true : false;
+			pi->dpm_table.sclk_table.count++;
+		}
+	}
+
+	pi->dpm_table.mclk_table.count = 0;
+	for (i = 0; i < allowed_mclk_table->count; i++) {
+		if ((i == 0) ||
+		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
+		     allowed_mclk_table->entries[i].clk)) {
+			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
+				allowed_mclk_table->entries[i].clk;
+			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
+				(i == 0) ? true : false;
+			pi->dpm_table.mclk_table.count++;
+		}
+	}
+
+	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+		pi->dpm_table.vddc_table.dpm_levels[i].value =
+			allowed_sclk_vddc_table->entries[i].v;
+		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
+			std_voltage_table->entries[i].leakage;
+		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
+	}
+	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
+
+	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+	if (allowed_mclk_table) {
+		for (i = 0; i < allowed_mclk_table->count; i++) {
+			pi->dpm_table.vddci_table.dpm_levels[i].value =
+				allowed_mclk_table->entries[i].v;
+			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
+		}
+		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
+	}
+
+	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
+	if (allowed_mclk_table) {
+		for (i = 0; i < allowed_mclk_table->count; i++) {
+			pi->dpm_table.mvdd_table.dpm_levels[i].value =
+				allowed_mclk_table->entries[i].v;
+			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
+		}
+		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
+	}
+
+	ci_setup_default_pcie_tables(rdev);
+
+	return 0;
+}
+
+static int ci_find_boot_level(struct ci_single_dpm_table *table,
+			      u32 value, u32 *boot_level)
+{
+	u32 i;
+	int ret = -EINVAL;
+
+	for(i = 0; i < table->count; i++) {
+		if (value == table->dpm_levels[i].value) {
+			*boot_level = i;
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int ci_init_smc_table(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_ulv_parm *ulv = &pi->ulv;
+	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
+	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+	int ret;
+
+	ret = ci_setup_default_dpm_tables(rdev);
+	if (ret)
+		return ret;
+
+	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
+		ci_populate_smc_voltage_tables(rdev, table);
+
+	ci_init_fps_limits(rdev);
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (pi->mem_gddr5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (ulv->supported) {
+		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
+		if (ret)
+			return ret;
+		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+	}
+
+	ret = ci_populate_all_graphic_levels(rdev);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_all_memory_levels(rdev);
+	if (ret)
+		return ret;
+
+	ci_populate_smc_link_level(rdev, table);
+
+	ret = ci_populate_smc_acpi_level(rdev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_vce_level(rdev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_acp_level(rdev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_samu_level(rdev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_do_program_memory_timing_parameters(rdev);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_uvd_level(rdev, table);
+	if (ret)
+		return ret;
+
+	table->UvdBootLevel  = 0;
+	table->VceBootLevel  = 0;
+	table->AcpBootLevel  = 0;
+	table->SamuBootLevel  = 0;
+	table->GraphicsBootLevel  = 0;
+	table->MemoryBootLevel  = 0;
+
+	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
+				 pi->vbios_boot_state.sclk_bootup_value,
+				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
+
+	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
+				 pi->vbios_boot_state.mclk_bootup_value,
+				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
+
+	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
+	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
+	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
+
+	ci_populate_smc_initial_state(rdev, radeon_boot_state);
+
+	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
+	if (ret)
+		return ret;
+
+	table->UVDInterval = 1;
+	table->VCEInterval = 1;
+	table->ACPInterval = 1;
+	table->SAMUInterval = 1;
+	table->GraphicsVoltageChangeEnable = 1;
+	table->GraphicsThermThrottleEnable = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval = 1;
+	table->ThermalInterval = 1;
+	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
+					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
+					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+	table->MemoryVoltageChangeEnable = 1;
+	table->MemoryInterval = 1;
+	table->VoltageResponseTime = 0;
+	table->VddcVddciDelta = 4000;
+	table->PhaseResponseTime = 0;
+	table->MemoryThermThrottleEnable = 1;
+	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
+	table->PCIeGenInterval = 1;
+	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
+		table->SVI2Enable  = 1;
+	else
+		table->SVI2Enable  = 0;
+
+	table->ThermGpio = 17;
+	table->SclkStepSize = 0x4000;
+
+	table->SystemFlags = cpu_to_be32(table->SystemFlags);
+	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
+	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
+	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
+	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
+	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
+	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
+	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
+	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
+	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
+	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
+	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
+	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
+	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
+
+	ret = ci_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+				   (u8 *)&table->SystemFlags,
+				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void ci_trim_single_dpm_states(struct radeon_device *rdev,
+				      struct ci_single_dpm_table *dpm_table,
+				      u32 low_limit, u32 high_limit)
+{
+	u32 i;
+
+	for (i = 0; i < dpm_table->count; i++) {
+		if ((dpm_table->dpm_levels[i].value < low_limit) ||
+		    (dpm_table->dpm_levels[i].value > high_limit))
+			dpm_table->dpm_levels[i].enabled = false;
+		else
+			dpm_table->dpm_levels[i].enabled = true;
+	}
+}
+
+static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
+				    u32 speed_low, u32 lanes_low,
+				    u32 speed_high, u32 lanes_high)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+	u32 i, j;
+
+	for (i = 0; i < pcie_table->count; i++) {
+		if ((pcie_table->dpm_levels[i].value < speed_low) ||
+		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
+		    (pcie_table->dpm_levels[i].value > speed_high) ||
+		    (pcie_table->dpm_levels[i].param1 > lanes_high))
+			pcie_table->dpm_levels[i].enabled = false;
+		else
+			pcie_table->dpm_levels[i].enabled = true;
+	}
+
+	for (i = 0; i < pcie_table->count; i++) {
+		if (pcie_table->dpm_levels[i].enabled) {
+			for (j = i + 1; j < pcie_table->count; j++) {
+				if (pcie_table->dpm_levels[j].enabled) {
+					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
+					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
+						pcie_table->dpm_levels[j].enabled = false;
+				}
+			}
+		}
+	}
+}
+
+static int ci_trim_dpm_states(struct radeon_device *rdev,
+			      struct radeon_ps *radeon_state)
+{
+	struct ci_ps *state = ci_get_ps(radeon_state);
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 high_limit_count;
+
+	if (state->performance_level_count < 1)
+		return -EINVAL;
+
+	if (state->performance_level_count == 1)
+		high_limit_count = 0;
+	else
+		high_limit_count = 1;
+
+	ci_trim_single_dpm_states(rdev,
+				  &pi->dpm_table.sclk_table,
+				  state->performance_levels[0].sclk,
+				  state->performance_levels[high_limit_count].sclk);
+
+	ci_trim_single_dpm_states(rdev,
+				  &pi->dpm_table.mclk_table,
+				  state->performance_levels[0].mclk,
+				  state->performance_levels[high_limit_count].mclk);
+
+	ci_trim_pcie_dpm_states(rdev,
+				state->performance_levels[0].pcie_gen,
+				state->performance_levels[0].pcie_lane,
+				state->performance_levels[high_limit_count].pcie_gen,
+				state->performance_levels[high_limit_count].pcie_lane);
+
+	return 0;
+}
+
+static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
+{
+	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
+	struct radeon_clock_voltage_dependency_table *vddc_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	u32 requested_voltage = 0;
+	u32 i;
+
+	if (disp_voltage_table == NULL)
+		return -EINVAL;
+	if (!disp_voltage_table->count)
+		return -EINVAL;
+
+	for (i = 0; i < disp_voltage_table->count; i++) {
+		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
+			requested_voltage = disp_voltage_table->entries[i].v;
+	}
+
+	for (i = 0; i < vddc_table->count; i++) {
+		if (requested_voltage <= vddc_table->entries[i].v) {
+			requested_voltage = vddc_table->entries[i].v;
+			return (ci_send_msg_to_smc_with_parameter(rdev,
+								  PPSMC_MSG_VddC_Request,
+								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	PPSMC_Result result;
+
+	ci_apply_disp_minimum_voltage_request(rdev);
+
+	if (!pi->sclk_dpm_key_disabled) {
+		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			result = ci_send_msg_to_smc_with_parameter(rdev,
+								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
+								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+			if (result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+
+	if (!pi->mclk_dpm_key_disabled) {
+		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			result = ci_send_msg_to_smc_with_parameter(rdev,
+								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
+								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+			if (result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+#if 0
+	if (!pi->pcie_dpm_key_disabled) {
+		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			result = ci_send_msg_to_smc_with_parameter(rdev,
+								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
+								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+			if (result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+#endif
+	return 0;
+}
+
+static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
+						   struct radeon_ps *radeon_state)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_ps *state = ci_get_ps(radeon_state);
+	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+	u32 i;
+
+	pi->need_update_smu7_dpm_table = 0;
+
+	for (i = 0; i < sclk_table->count; i++) {
+		if (sclk == sclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= sclk_table->count) {
+		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+	} else {
+		/* XXX The current code always reprogrammed the sclk levels,
+		 * but we don't currently handle disp sclk requirements
+		 * so just skip it.
+		 */
+		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
+			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+	}
+
+	for (i = 0; i < mclk_table->count; i++) {
+		if (mclk == mclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= mclk_table->count)
+		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+	if (rdev->pm.dpm.current_active_crtc_count !=
+	    rdev->pm.dpm.new_active_crtc_count)
+		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+}
+
+static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
+						       struct radeon_ps *radeon_state)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_ps *state = ci_get_ps(radeon_state);
+	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	int ret;
+
+	if (!pi->need_update_smu7_dpm_table)
+		return 0;
+
+	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
+		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
+
+	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
+
+	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
+		ret = ci_populate_all_graphic_levels(rdev);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
+		ret = ci_populate_all_memory_levels(rdev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct radeon_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_uvd_dpm)
+					break;
+			}
+		}
+
+		ci_send_msg_to_smc_with_parameter(rdev,
+						  PPSMC_MSG_UVDDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+		if (pi->last_mclk_dpm_enable_mask & 0x1) {
+			pi->uvd_enabled = true;
+			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+			ci_send_msg_to_smc_with_parameter(rdev,
+							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
+							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+		}
+	} else {
+		if (pi->last_mclk_dpm_enable_mask & 0x1) {
+			pi->uvd_enabled = false;
+			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
+			ci_send_msg_to_smc_with_parameter(rdev,
+							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
+							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+		}
+	}
+
+	return (ci_send_msg_to_smc(rdev, enable ?
+				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct radeon_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_vce_dpm)
+					break;
+			}
+		}
+
+		ci_send_msg_to_smc_with_parameter(rdev,
+						  PPSMC_MSG_VCEDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
+	}
+
+	return (ci_send_msg_to_smc(rdev, enable ?
+				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+#if 0
+static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct radeon_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
+		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_samu_dpm)
+					break;
+			}
+		}
+
+		ci_send_msg_to_smc_with_parameter(rdev,
+						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
+	}
+	return (ci_send_msg_to_smc(rdev, enable ?
+				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	const struct radeon_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
+		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_acp_dpm)
+					break;
+			}
+		}
+
+		ci_send_msg_to_smc_with_parameter(rdev,
+						  PPSMC_MSG_ACPDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
+	}
+
+	return (ci_send_msg_to_smc(rdev, enable ?
+				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+#endif
+
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+
+	if (!gate) {
+		if (pi->caps_uvd_dpm ||
+		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
+			pi->smc_state_table.UvdBootLevel = 0;
+		else
+			pi->smc_state_table.UvdBootLevel =
+				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
+
+		tmp = RREG32_SMC(DPM_TABLE_475);
+		tmp &= ~UvdBootLevel_MASK;
+		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
+		WREG32_SMC(DPM_TABLE_475, tmp);
+	}
+
+	return ci_enable_uvd_dpm(rdev, !gate);
+}
+
+static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
+{
+	u8 i;
+	u32 min_evclk = 30000; /* ??? */
+	struct radeon_vce_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].evclk >= min_evclk)
+			return i;
+	}
+
+	return table->count - 1;
+}
+
+static int ci_update_vce_dpm(struct radeon_device *rdev,
+			     struct radeon_ps *radeon_new_state,
+			     struct radeon_ps *radeon_current_state)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret = 0;
+	u32 tmp;
+
+	if (radeon_current_state->evclk != radeon_new_state->evclk) {
+		if (radeon_new_state->evclk) {
+			/* turn the clocks on when encoding */
+			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
+
+			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
+			tmp = RREG32_SMC(DPM_TABLE_475);
+			tmp &= ~VceBootLevel_MASK;
+			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
+			WREG32_SMC(DPM_TABLE_475, tmp);
+
+			ret = ci_enable_vce_dpm(rdev, true);
+		} else {
+			/* turn the clocks off when not encoding */
+			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
+
+			ret = ci_enable_vce_dpm(rdev, false);
+		}
+	}
+	return ret;
+}
+
+#if 0
+static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+	return ci_enable_samu_dpm(rdev, gate);
+}
+
+static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+
+	if (!gate) {
+		pi->smc_state_table.AcpBootLevel = 0;
+
+		tmp = RREG32_SMC(DPM_TABLE_475);
+		tmp &= ~AcpBootLevel_MASK;
+		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
+		WREG32_SMC(DPM_TABLE_475, tmp);
+	}
+
+	return ci_enable_acp_dpm(rdev, !gate);
+}
+#endif
+
+static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
+					     struct radeon_ps *radeon_state)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret;
+
+	ret = ci_trim_dpm_states(rdev, radeon_state);
+	if (ret)
+		return ret;
+
+	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
+	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
+	pi->last_mclk_dpm_enable_mask =
+		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+	if (pi->uvd_enabled) {
+		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
+			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+	}
+	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
+
+	return 0;
+}
+
+static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
+				       u32 level_mask)
+{
+	u32 level = 0;
+
+	while ((level_mask & (1 << level)) == 0)
+		level++;
+
+	return level;
+}
+
+
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp, levels, i;
+	int ret;
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		if ((!pi->pcie_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			levels = 0;
+			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+			while (tmp >>= 1)
+				levels++;
+			if (levels) {
+				ret = ci_dpm_force_state_pcie(rdev, level);
+				if (ret)
+					return ret;
+				for (i = 0; i < rdev->usec_timeout; i++) {
+					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+					if (tmp == levels)
+						break;
+					udelay(1);
+				}
+			}
+		}
+		if ((!pi->sclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			levels = 0;
+			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				levels++;
+			if (levels) {
+				ret = ci_dpm_force_state_sclk(rdev, levels);
+				if (ret)
+					return ret;
+				for (i = 0; i < rdev->usec_timeout; i++) {
+					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+					if (tmp == levels)
+						break;
+					udelay(1);
+				}
+			}
+		}
+		if ((!pi->mclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			levels = 0;
+			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				levels++;
+			if (levels) {
+				ret = ci_dpm_force_state_mclk(rdev, levels);
+				if (ret)
+					return ret;
+				for (i = 0; i < rdev->usec_timeout; i++) {
+					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+					if (tmp == levels)
+						break;
+					udelay(1);
+				}
+			}
+		}
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		if ((!pi->sclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			levels = ci_get_lowest_enabled_level(rdev,
+							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+			ret = ci_dpm_force_state_sclk(rdev, levels);
+			if (ret)
+				return ret;
+			for (i = 0; i < rdev->usec_timeout; i++) {
+				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+				if (tmp == levels)
+					break;
+				udelay(1);
+			}
+		}
+		if ((!pi->mclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			levels = ci_get_lowest_enabled_level(rdev,
+							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+			ret = ci_dpm_force_state_mclk(rdev, levels);
+			if (ret)
+				return ret;
+			for (i = 0; i < rdev->usec_timeout; i++) {
+				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+				if (tmp == levels)
+					break;
+				udelay(1);
+			}
+		}
+		if ((!pi->pcie_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			levels = ci_get_lowest_enabled_level(rdev,
+							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+			ret = ci_dpm_force_state_pcie(rdev, levels);
+			if (ret)
+				return ret;
+			for (i = 0; i < rdev->usec_timeout; i++) {
+				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+				if (tmp == levels)
+					break;
+				udelay(1);
+			}
+		}
+	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+		if (!pi->pcie_dpm_key_disabled) {
+			PPSMC_Result smc_result;
+
+			smc_result = ci_send_msg_to_smc(rdev,
+							PPSMC_MSG_PCIeDPM_UnForceLevel);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+		ret = ci_upload_dpm_level_enable_mask(rdev);
+		if (ret)
+			return ret;
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+static int ci_set_mc_special_registers(struct radeon_device *rdev,
+				       struct ci_mc_reg_table *table)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u8 i, j, k;
+	u32 temp_reg;
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+			return -EINVAL;
+		switch(table->mc_reg_address[i].s1 << 2) {
+		case MC_SEQ_MISC1:
+			temp_reg = RREG32(MC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			}
+			j++;
+			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			temp_reg = RREG32(MC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+				if (!pi->mem_gddr5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			if (!pi->mem_gddr5) {
+				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
+				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
+				for (k = 0; k < table->num_entries; k++) {
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				}
+				j++;
+				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+					return -EINVAL;
+			}
+			break;
+		case MC_SEQ_RESERVE_M:
+			temp_reg = RREG32(MC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			}
+			j++;
+			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		default:
+			break;
+		}
+
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+	bool result = true;
+
+	switch(in_reg) {
+	case MC_SEQ_RAS_TIMING >> 2:
+		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_DLL_STBY >> 2:
+		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
+		break;
+	case MC_SEQ_G5PDX_CMD0 >> 2:
+		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
+		break;
+	case MC_SEQ_G5PDX_CMD1 >> 2:
+		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
+		break;
+	case MC_SEQ_G5PDX_CTRL >> 2:
+		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
+		break;
+	case MC_SEQ_CAS_TIMING >> 2:
+		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING2 >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
+		break;
+	case MC_SEQ_PMG_DVS_CMD >> 2:
+		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
+		break;
+	case MC_SEQ_PMG_DVS_CTL >> 2:
+		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
+		break;
+	case MC_PMG_CMD_EMRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS1 >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+		break;
+	case MC_SEQ_PMG_TIMING >> 2:
+		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS2 >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_2 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static void ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+	u8 i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+			    table->mc_reg_table_entry[j].mc_data[i]) {
+				table->valid_flag |= 1 << i;
+				break;
+			}
+		}
+	}
+}
+
+static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+	u32 i;
+	u16 address;
+
+	for (i = 0; i < table->last; i++) {
+		table->mc_reg_address[i].s0 =
+			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+			address : table->mc_reg_address[i].s1;
+	}
+}
+
+static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
+				      struct ci_mc_reg_table *ci_table)
+{
+	u8 i, j;
+
+	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+		return -EINVAL;
+	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+		return -EINVAL;
+
+	for (i = 0; i < table->last; i++)
+		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+	ci_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		ci_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++)
+			ci_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+	}
+	ci_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+static int ci_register_patching_mc_seq(struct radeon_device *rdev,
+				       struct ci_mc_reg_table *table)
+{
+	u8 i, k;
+	u32 tmp;
+	bool patch;
+
+	tmp = RREG32(MC_SEQ_MISC0);
+	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+	if (patch &&
+	    ((rdev->pdev->device == 0x67B0) ||
+	     (rdev->pdev->device == 0x67B1))) {
+		for (i = 0; i < table->last; i++) {
+			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			switch(table->mc_reg_address[i].s1 >> 2) {
+			case MC_SEQ_MISC1:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
+							0x00000007;
+				}
+				break;
+			case MC_SEQ_WR_CTL_D0:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+							0x0000D0DD;
+				}
+				break;
+			case MC_SEQ_WR_CTL_D1:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+							0x0000D0DD;
+				}
+				break;
+			case MC_SEQ_WR_CTL_2:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] = 0;
+				}
+				break;
+			case MC_SEQ_CAS_TIMING:
+				for (k = 0; k < table->num_entries; k++) {
+					if (table->mc_reg_table_entry[k].mclk_max == 125000)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+							0x000C0140;
+					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+							0x000C0150;
+				}
+				break;
+			case MC_SEQ_MISC_TIMING:
+				for (k = 0; k < table->num_entries; k++) {
+					if (table->mc_reg_table_entry[k].mclk_max == 125000)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+							0x00000030;
+					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+							0x00000035;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+
+		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
+		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
+		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
+	}
+
+	return 0;
+}
+
+static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct atom_mc_reg_table *table;
+	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
+	u8 module_index = rv770_get_memory_module_index(rdev);
+	int ret;
+
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
+	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
+	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
+	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
+	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
+	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
+	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+	if (ret)
+		goto init_mc_done;
+
+	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+	if (ret)
+		goto init_mc_done;
+
+	ci_set_s0_mc_reg_index(ci_table);
+
+	ret = ci_register_patching_mc_seq(rdev, ci_table);
+	if (ret)
+		goto init_mc_done;
+
+	ret = ci_set_mc_special_registers(rdev, ci_table);
+	if (ret)
+		goto init_mc_done;
+
+	ci_set_valid_flag(ci_table);
+
+init_mc_done:
+	kfree(table);
+
+	return ret;
+}
+
+static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
+					SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 i, j;
+
+	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
+		if (pi->mc_reg_table.valid_flag & (1 << j)) {
+			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+
+	mc_reg_table->last = (u8)i;
+
+	return 0;
+}
+
+static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
+				    SMU7_Discrete_MCRegisterSet *data,
+				    u32 num_entries, u32 valid_flag)
+{
+	u32 i, j;
+
+	for (i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & (1 << j)) {
+			data->value[i] = cpu_to_be32(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
+						 const u32 memory_clock,
+						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 i = 0;
+
+	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+			break;
+	}
+
+	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data, pi->mc_reg_table.last,
+				pi->mc_reg_table.valid_flag);
+}
+
+static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
+					   SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 i;
+
+	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
+		ci_convert_mc_reg_table_entry_to_smc(rdev,
+						     pi->dpm_table.mclk_table.dpm_levels[i].value,
+						     &mc_reg_table->data[i]);
+}
+
+static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	int ret;
+
+	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
+	if (ret)
+		return ret;
+	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+	return ci_copy_bytes_to_smc(rdev,
+				    pi->mc_reg_table_start,
+				    (u8 *)&pi->smc_mc_reg_table,
+				    sizeof(SMU7_Discrete_MCRegisters),
+				    pi->sram_end);
+}
+
+static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+		return 0;
+
+	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+	return ci_copy_bytes_to_smc(rdev,
+				    pi->mc_reg_table_start +
+				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
+				    (u8 *)&pi->smc_mc_reg_table.data[0],
+				    sizeof(SMU7_Discrete_MCRegisterSet) *
+				    pi->dpm_table.mclk_table.count,
+				    pi->sram_end);
+}
+
+static void ci_enable_voltage_control(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+	tmp |= VOLT_PWRMGT_EN;
+	WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
+						      struct radeon_ps *radeon_state)
+{
+	struct ci_ps *state = ci_get_ps(radeon_state);
+	int i;
+	u16 pcie_speed, max_speed = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		pcie_speed = state->performance_levels[i].pcie_gen;
+		if (max_speed < pcie_speed)
+			max_speed = pcie_speed;
+	}
+
+	return max_speed;
+}
+
+static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
+{
+	u32 speed_cntl = 0;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+	return (u16)speed_cntl;
+}
+
+static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
+{
+	u32 link_width = 0;
+
+	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
+	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
+
+	switch (link_width) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X12:
+		/* not actually supported */
+		return 12;
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
+							     struct radeon_ps *radeon_new_state,
+							     struct radeon_ps *radeon_current_state)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	enum radeon_pcie_gen target_link_speed =
+		ci_get_maximum_link_speed(rdev, radeon_new_state);
+	enum radeon_pcie_gen current_link_speed;
+
+	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
+		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
+	else
+		current_link_speed = pi->force_pcie_gen;
+
+	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+	pi->pspp_notify_required = false;
+	if (target_link_speed > current_link_speed) {
+		switch (target_link_speed) {
+#ifdef CONFIG_ACPI
+		case RADEON_PCIE_GEN3:
+			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+				break;
+			pi->force_pcie_gen = RADEON_PCIE_GEN2;
+			if (current_link_speed == RADEON_PCIE_GEN2)
+				break;
+		case RADEON_PCIE_GEN2:
+			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+				break;
+#endif
+		default:
+			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			pi->pspp_notify_required = true;
+	}
+}
+
+static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
+							   struct radeon_ps *radeon_new_state,
+							   struct radeon_ps *radeon_current_state)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	enum radeon_pcie_gen target_link_speed =
+		ci_get_maximum_link_speed(rdev, radeon_new_state);
+	u8 request;
+
+	if (pi->pspp_notify_required) {
+		if (target_link_speed == RADEON_PCIE_GEN3)
+			request = PCIE_PERF_REQ_PECI_GEN3;
+		else if (target_link_speed == RADEON_PCIE_GEN2)
+			request = PCIE_PERF_REQ_PECI_GEN2;
+		else
+			request = PCIE_PERF_REQ_PECI_GEN1;
+
+		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+		    (ci_get_current_pcie_speed(rdev) > 0))
+			return;
+
+#ifdef CONFIG_ACPI
+		radeon_acpi_pcie_performance_request(rdev, request, false);
+#endif
+	}
+}
+
+static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
+		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+
+	if (allowed_sclk_vddc_table == NULL)
+		return -EINVAL;
+	if (allowed_sclk_vddc_table->count < 1)
+		return -EINVAL;
+	if (allowed_mclk_vddc_table == NULL)
+		return -EINVAL;
+	if (allowed_mclk_vddc_table->count < 1)
+		return -EINVAL;
+	if (allowed_mclk_vddci_table == NULL)
+		return -EINVAL;
+	if (allowed_mclk_vddci_table->count < 1)
+		return -EINVAL;
+
+	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
+	pi->max_vddc_in_pp_table =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
+	pi->max_vddci_in_pp_table =
+		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
+		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+	return 0;
+}
+
+static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
+	u32 leakage_index;
+
+	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+		if (leakage_table->leakage_id[leakage_index] == *vddc) {
+			*vddc = leakage_table->actual_voltage[leakage_index];
+			break;
+		}
+	}
+}
+
+static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
+	u32 leakage_index;
+
+	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+		if (leakage_table->leakage_id[leakage_index] == *vddci) {
+			*vddci = leakage_table->actual_voltage[leakage_index];
+			break;
+		}
+	}
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+								      struct radeon_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
+								       struct radeon_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+									  struct radeon_vce_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+									  struct radeon_uvd_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
+								   struct radeon_phase_shedding_limits_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
+	}
+}
+
+static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
+							    struct radeon_clock_and_voltage_limits *table)
+{
+	if (table) {
+		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
+		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
+	}
+}
+
+static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
+							 struct radeon_cac_leakage_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
+	}
+}
+
+static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
+{
+
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
+	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
+								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
+	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
+	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
+							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
+	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
+	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
+						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
+
+}
+
+static void ci_get_memory_type(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	u32 tmp;
+
+	tmp = RREG32(MC_SEQ_MISC0);
+
+	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
+	    MC_SEQ_MISC0_GDDR5_VALUE)
+		pi->mem_gddr5 = true;
+	else
+		pi->mem_gddr5 = false;
+
+}
+
+static void ci_update_current_ps(struct radeon_device *rdev,
+				 struct radeon_ps *rps)
+{
+	struct ci_ps *new_ps = ci_get_ps(rps);
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	pi->current_rps = *rps;
+	pi->current_ps = *new_ps;
+	pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void ci_update_requested_ps(struct radeon_device *rdev,
+				   struct radeon_ps *rps)
+{
+	struct ci_ps *new_ps = ci_get_ps(rps);
+	struct ci_power_info *pi = ci_get_pi(rdev);
+
+	pi->requested_rps = *rps;
+	pi->requested_ps = *new_ps;
+	pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+	struct radeon_ps *new_ps = &requested_ps;
+
+	ci_update_requested_ps(rdev, new_ps);
+
+	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
+
+	return 0;
+}
+
+void ci_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+
+	ci_update_current_ps(rdev, new_ps);
+}
+
+
+void ci_dpm_setup_asic(struct radeon_device *rdev)
+{
+	int r;
+
+	r = ci_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
+	ci_read_clock_registers(rdev);
+	ci_get_memory_type(rdev);
+	ci_enable_acpi_power_management(rdev);
+	ci_init_sclk_t(rdev);
+}
+
+int ci_dpm_enable(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+	int ret;
+
+	if (ci_is_smc_running(rdev))
+		return -EINVAL;
+	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+		ci_enable_voltage_control(rdev);
+		ret = ci_construct_voltage_tables(rdev);
+		if (ret) {
+			DRM_ERROR("ci_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+	if (pi->caps_dynamic_ac_timing) {
+		ret = ci_initialize_mc_reg_table(rdev);
+		if (ret)
+			pi->caps_dynamic_ac_timing = false;
+	}
+	if (pi->dynamic_ss)
+		ci_enable_spread_spectrum(rdev, true);
+	if (pi->thermal_protection)
+		ci_enable_thermal_protection(rdev, true);
+	ci_program_sstp(rdev);
+	ci_enable_display_gap(rdev);
+	ci_program_vc(rdev);
+	ret = ci_upload_firmware(rdev);
+	if (ret) {
+		DRM_ERROR("ci_upload_firmware failed\n");
+		return ret;
+	}
+	ret = ci_process_firmware_header(rdev);
+	if (ret) {
+		DRM_ERROR("ci_process_firmware_header failed\n");
+		return ret;
+	}
+	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
+	if (ret) {
+		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
+		return ret;
+	}
+	ret = ci_init_smc_table(rdev);
+	if (ret) {
+		DRM_ERROR("ci_init_smc_table failed\n");
+		return ret;
+	}
+	ret = ci_init_arb_table_index(rdev);
+	if (ret) {
+		DRM_ERROR("ci_init_arb_table_index failed\n");
+		return ret;
+	}
+	if (pi->caps_dynamic_ac_timing) {
+		ret = ci_populate_initial_mc_reg_table(rdev);
+		if (ret) {
+			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = ci_populate_pm_base(rdev);
+	if (ret) {
+		DRM_ERROR("ci_populate_pm_base failed\n");
+		return ret;
+	}
+	ci_dpm_start_smc(rdev);
+	ci_enable_vr_hot_gpio_interrupt(rdev);
+	ret = ci_notify_smc_display_change(rdev, false);
+	if (ret) {
+		DRM_ERROR("ci_notify_smc_display_change failed\n");
+		return ret;
+	}
+	ci_enable_sclk_control(rdev, true);
+	ret = ci_enable_ulv(rdev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_ulv failed\n");
+		return ret;
+	}
+	ret = ci_enable_ds_master_switch(rdev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_ds_master_switch failed\n");
+		return ret;
+	}
+	ret = ci_start_dpm(rdev);
+	if (ret) {
+		DRM_ERROR("ci_start_dpm failed\n");
+		return ret;
+	}
+	ret = ci_enable_didt(rdev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_didt failed\n");
+		return ret;
+	}
+	ret = ci_enable_smc_cac(rdev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = ci_enable_power_containment(rdev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_power_containment failed\n");
+		return ret;
+	}
+
+	ret = ci_power_control_set_level(rdev);
+	if (ret) {
+		DRM_ERROR("ci_power_control_set_level failed\n");
+		return ret;
+	}
+
+	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
+		return ret;
+	}
+
+	ci_thermal_start_thermal_controller(rdev);
+
+	ci_update_current_ps(rdev, boot_ps);
+
+	return 0;
+}
+
+static int ci_set_temperature_range(struct radeon_device *rdev)
+{
+	int ret;
+
+	ret = ci_thermal_enable_alert(rdev, false);
+	if (ret)
+		return ret;
+	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = ci_thermal_enable_alert(rdev, true);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	ret = ci_set_temperature_range(rdev);
+	if (ret)
+		return ret;
+
+	ci_dpm_powergate_uvd(rdev, true);
+
+	return 0;
+}
+
+void ci_dpm_disable(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+	ci_dpm_powergate_uvd(rdev, false);
+
+	if (!ci_is_smc_running(rdev))
+		return;
+
+	ci_thermal_stop_thermal_controller(rdev);
+
+	if (pi->thermal_protection)
+		ci_enable_thermal_protection(rdev, false);
+	ci_enable_power_containment(rdev, false);
+	ci_enable_smc_cac(rdev, false);
+	ci_enable_didt(rdev, false);
+	ci_enable_spread_spectrum(rdev, false);
+	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+	ci_stop_dpm(rdev);
+	ci_enable_ds_master_switch(rdev, false);
+	ci_enable_ulv(rdev, false);
+	ci_clear_vc(rdev);
+	ci_reset_to_default(rdev);
+	ci_dpm_stop_smc(rdev);
+	ci_force_switch_to_arb_f0(rdev);
+	ci_enable_thermal_based_sclk_dpm(rdev, false);
+
+	ci_update_current_ps(rdev, boot_ps);
+}
+
+int ci_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+	struct radeon_ps *old_ps = &pi->current_rps;
+	int ret;
+
+	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
+	if (pi->pcie_performance_request)
+		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
+	ret = ci_freeze_sclk_mclk_dpm(rdev);
+	if (ret) {
+		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
+		return ret;
+	}
+	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
+		return ret;
+	}
+	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
+		return ret;
+	}
+
+	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
+	if (ret) {
+		DRM_ERROR("ci_update_vce_dpm failed\n");
+		return ret;
+	}
+
+	ret = ci_update_sclk_t(rdev);
+	if (ret) {
+		DRM_ERROR("ci_update_sclk_t failed\n");
+		return ret;
+	}
+	if (pi->caps_dynamic_ac_timing) {
+		ret = ci_update_and_upload_mc_reg_table(rdev);
+		if (ret) {
+			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = ci_program_memory_timing_parameters(rdev);
+	if (ret) {
+		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
+		return ret;
+	}
+	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
+	if (ret) {
+		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
+		return ret;
+	}
+	ret = ci_upload_dpm_level_enable_mask(rdev);
+	if (ret) {
+		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
+		return ret;
+	}
+	if (pi->pcie_performance_request)
+		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
+
+	return 0;
+}
+
+#if 0
+void ci_dpm_reset_asic(struct radeon_device *rdev)
+{
+	ci_set_boot_state(rdev);
+}
+#endif
+
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+	ci_program_display_gap(rdev);
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					  struct radeon_ps *rps,
+					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					  u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		rdev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
+				      struct radeon_ps *rps, int index,
+				      union pplib_clock_info *clock_info)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_ps *ps = ci_get_ps(rps);
+	struct ci_pl *pl = &ps->performance_levels[index];
+
+	ps->performance_level_count = index + 1;
+
+	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
+	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+
+	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
+						 pi->sys_pcie_mask,
+						 pi->vbios_boot_state.pcie_gen_bootup_value,
+						 clock_info->ci.ucPCIEGen);
+	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
+						   pi->vbios_boot_state.pcie_lane_bootup_value,
+						   le16_to_cpu(clock_info->ci.usPCIELane));
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+		pi->acpi_pcie_gen = pl->pcie_gen;
+	}
+
+	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+		pi->ulv.supported = true;
+		pi->ulv.pl = *pl;
+		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
+	}
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
+		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
+		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
+		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
+	}
+
+	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		pi->use_pcie_powersaving_levels = true;
+		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
+			pi->pcie_gen_powersaving.max = pl->pcie_gen;
+		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
+			pi->pcie_gen_powersaving.min = pl->pcie_gen;
+		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
+			pi->pcie_lane_powersaving.max = pl->pcie_lane;
+		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
+			pi->pcie_lane_powersaving.min = pl->pcie_lane;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		pi->use_pcie_performance_levels = true;
+		if (pi->pcie_gen_performance.max < pl->pcie_gen)
+			pi->pcie_gen_performance.max = pl->pcie_gen;
+		if (pi->pcie_gen_performance.min > pl->pcie_gen)
+			pi->pcie_gen_performance.min = pl->pcie_gen;
+		if (pi->pcie_lane_performance.max < pl->pcie_lane)
+			pi->pcie_lane_performance.max = pl->pcie_lane;
+		if (pi->pcie_lane_performance.min > pl->pcie_lane)
+			pi->pcie_lane_performance.min = pl->pcie_lane;
+		break;
+	default:
+		break;
+	}
+}
+
+static int ci_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct ci_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		if (!rdev->pm.power_state[i].clock_info)
+			return -EINVAL;
+		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(rdev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		rdev->pm.dpm.ps[i].ps_priv = ps;
+		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+					      non_clock_info,
+					      non_clock_info_array->ucEntrySize);
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			ci_parse_pplib_clock_info(rdev,
+						  &rdev->pm.dpm.ps[i], k,
+						  clock_info);
+			k++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	/* fill in the vce power states */
+	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
+		u32 sclk, mclk;
+		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
+		clock_info = (union pplib_clock_info *)
+			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+		sclk |= clock_info->ci.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+		rdev->pm.dpm.vce_states[i].sclk = sclk;
+		rdev->pm.dpm.vce_states[i].mclk = mclk;
+	}
+
+	return 0;
+}
+
+static int ci_get_vbios_boot_values(struct radeon_device *rdev,
+				    struct ci_vbios_boot_state *boot_state)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+	u8 frev, crev;
+	u16 data_offset;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
+						    data_offset);
+		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
+		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
+		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
+		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
+		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
+		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
+		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+void ci_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+	r600_free_extended_power_table(rdev);
+}
+
+int ci_dpm_init(struct radeon_device *rdev)
+{
+	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+	SMU7_Discrete_DpmTable  *dpm_table;
+	struct radeon_gpio_rec gpio;
+	u16 data_offset, size;
+	u8 frev, crev;
+	struct ci_power_info *pi;
+	enum pci_bus_speed speed_cap;
+	struct pci_dev *root = rdev->pdev->bus->self;
+	int ret;
+
+	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = pi;
+
+	speed_cap = pcie_get_speed_cap(root);
+	if (speed_cap == PCI_SPEED_UNKNOWN) {
+		pi->sys_pcie_mask = 0;
+	} else {
+		if (speed_cap == PCIE_SPEED_8_0GT)
+			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+				RADEON_PCIE_SPEED_50 |
+				RADEON_PCIE_SPEED_80;
+		else if (speed_cap == PCIE_SPEED_5_0GT)
+			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+				RADEON_PCIE_SPEED_50;
+		else
+			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+	}
+	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+
+	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
+	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
+	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
+	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
+
+	pi->pcie_lane_performance.max = 0;
+	pi->pcie_lane_performance.min = 16;
+	pi->pcie_lane_powersaving.max = 0;
+	pi->pcie_lane_powersaving.min = 16;
+
+	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
+	if (ret) {
+		ci_dpm_fini(rdev);
+		return ret;
+	}
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret) {
+		ci_dpm_fini(rdev);
+		return ret;
+	}
+
+	ret = r600_parse_extended_power_table(rdev);
+	if (ret) {
+		ci_dpm_fini(rdev);
+		return ret;
+	}
+
+	ret = ci_parse_power_table(rdev);
+	if (ret) {
+		ci_dpm_fini(rdev);
+		return ret;
+	}
+
+	pi->dll_default_on = false;
+	pi->sram_end = SMC_RAM_END;
+
+	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
+
+	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
+
+	pi->sclk_dpm_key_disabled = 0;
+	pi->mclk_dpm_key_disabled = 0;
+	pi->pcie_dpm_key_disabled = 0;
+	pi->thermal_sclk_dpm_enabled = 0;
+
+	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
+	if ((rdev->pdev->device == 0x6658) &&
+	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
+		pi->mclk_dpm_key_disabled = 1;
+	}
+
+	pi->caps_sclk_ds = true;
+
+	pi->mclk_strobe_mode_threshold = 40000;
+	pi->mclk_stutter_mode_threshold = 40000;
+	pi->mclk_edc_enable_threshold = 40000;
+	pi->mclk_edc_wr_enable_threshold = 40000;
+
+	ci_initialize_powertune_defaults(rdev);
+
+	pi->caps_fps = false;
+
+	pi->caps_sclk_throttle_low_notification = false;
+
+	pi->caps_uvd_dpm = true;
+	pi->caps_vce_dpm = true;
+
+	ci_get_leakage_voltages(rdev);
+	ci_patch_dependency_tables_with_leakage(rdev);
+	ci_set_private_data_variables_based_on_pptable(rdev);
+
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+		kcalloc(4,
+			sizeof(struct radeon_clock_voltage_dependency_entry),
+			GFP_KERNEL);
+	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+		ci_dpm_fini(rdev);
+		return -ENOMEM;
+	}
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+
+	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+	if (rdev->family == CHIP_HAWAII) {
+		pi->thermal_temp_setting.temperature_low = 94500;
+		pi->thermal_temp_setting.temperature_high = 95000;
+		pi->thermal_temp_setting.temperature_shutdown = 104000;
+	} else {
+		pi->thermal_temp_setting.temperature_low = 99500;
+		pi->thermal_temp_setting.temperature_high = 100000;
+		pi->thermal_temp_setting.temperature_shutdown = 104000;
+	}
+
+	pi->uvd_enabled = false;
+
+	dpm_table = &pi->smc_state_table;
+
+	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
+	if (gpio.valid) {
+		dpm_table->VRHotGpio = gpio.shift;
+		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+	} else {
+		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
+		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+	}
+
+	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
+	if (gpio.valid) {
+		dpm_table->AcDcGpio = gpio.shift;
+		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+	} else {
+		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
+		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+	}
+
+	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
+	if (gpio.valid) {
+		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
+
+		switch (gpio.shift) {
+		case 0:
+			tmp &= ~GNB_SLOW_MODE_MASK;
+			tmp |= GNB_SLOW_MODE(1);
+			break;
+		case 1:
+			tmp &= ~GNB_SLOW_MODE_MASK;
+			tmp |= GNB_SLOW_MODE(2);
+			break;
+		case 2:
+			tmp |= GNB_SLOW;
+			break;
+		case 3:
+			tmp |= FORCE_NB_PS1;
+			break;
+		case 4:
+			tmp |= DPM_ENABLED;
+			break;
+		default:
+			DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
+			break;
+		}
+		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
+	}
+
+	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
+		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+		else
+			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
+		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+		else
+			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
+	}
+
+	pi->vddc_phase_shed_control = true;
+
+#if defined(CONFIG_ACPI)
+	pi->pcie_performance_request =
+		radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+	pi->pcie_performance_request = false;
+#endif
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		pi->caps_sclk_ss_support = true;
+		pi->caps_mclk_ss_support = true;
+		pi->dynamic_ss = true;
+	} else {
+		pi->caps_sclk_ss_support = false;
+		pi->caps_mclk_ss_support = false;
+		pi->dynamic_ss = true;
+	}
+
+	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	pi->caps_dynamic_ac_timing = true;
+
+	pi->uvd_power_gated = false;
+
+	/* make sure dc limits are valid */
+	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	pi->fan_ctrl_is_in_default_mode = true;
+
+	return 0;
+}
+
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
+	u32 sclk = ci_get_average_sclk_freq(rdev);
+	u32 mclk = ci_get_average_mclk_freq(rdev);
+
+	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
+	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
+	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
+		   sclk, mclk);
+}
+
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+			      struct radeon_ps *rps)
+{
+	struct ci_ps *ps = ci_get_ps(rps);
+	struct ci_pl *pl;
+	int i;
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->performance_level_count; i++) {
+		pl = &ps->performance_levels[i];
+		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
+		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
+	}
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	u32 sclk = ci_get_average_sclk_freq(rdev);
+
+	return sclk;
+}
+
+u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	u32 mclk = ci_get_average_mclk_freq(rdev);
+
+	return mclk;
+}
+
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].sclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct ci_power_info *pi = ci_get_pi(rdev);
+	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].mclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
new file mode 100644
index 0000000..dff2a63
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_DPM_H__
+#define __CI_DPM_H__
+
+#include "ppsmc.h"
+
+#define SMU__NUM_SCLK_DPM_STATE  8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#include "smu7_discrete.h"
+
+#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+
+#define CISLANDS_UNUSED_GPIO_PIN 0x7F
+
+struct ci_pl {
+	u32 mclk;
+	u32 sclk;
+	enum radeon_pcie_gen pcie_gen;
+	u16 pcie_lane;
+};
+
+struct ci_ps {
+	u16 performance_level_count;
+	bool dc_compatible;
+	u32 sclk_t;
+	struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct ci_dpm_level {
+	bool enabled;
+	u32 value;
+	u32 param1;
+};
+
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+
+struct ci_single_dpm_table {
+	u32 count;
+	struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct ci_dpm_table {
+	struct ci_single_dpm_table sclk_table;
+	struct ci_single_dpm_table mclk_table;
+	struct ci_single_dpm_table pcie_speed_table;
+	struct ci_single_dpm_table vddc_table;
+	struct ci_single_dpm_table vddci_table;
+	struct ci_single_dpm_table mvdd_table;
+};
+
+struct ci_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_ulv_parm
+{
+	bool supported;
+	u32 cg_ulv_parameter;
+	u32 volt_change_delay;
+	struct ci_pl pl;
+};
+
+#define CISLANDS_MAX_LEAKAGE_COUNT  8
+
+struct ci_leakage_voltage {
+	u16 count;
+	u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
+	u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+struct ci_dpm_level_enable_mask {
+	u32 uvd_dpm_enable_mask;
+	u32 vce_dpm_enable_mask;
+	u32 acp_dpm_enable_mask;
+	u32 samu_dpm_enable_mask;
+	u32 sclk_dpm_enable_mask;
+	u32 mclk_dpm_enable_mask;
+	u32 pcie_dpm_enable_mask;
+};
+
+struct ci_vbios_boot_state
+{
+	u16 mvdd_bootup_value;
+	u16 vddc_bootup_value;
+	u16 vddci_bootup_value;
+	u32 sclk_bootup_value;
+	u32 mclk_bootup_value;
+	u16 pcie_gen_bootup_value;
+	u16 pcie_lane_bootup_value;
+};
+
+struct ci_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 dll_cntl;
+	u32 mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl_1;
+	u32 mpll_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct ci_thermal_temperature_setting {
+	s32 temperature_low;
+	s32 temperature_high;
+	s32 temperature_shutdown;
+};
+
+struct ci_pcie_perf_range {
+	u16 max;
+	u16 min;
+};
+
+enum ci_pt_config_reg_type {
+	CISLANDS_CONFIGREG_MMR = 0,
+	CISLANDS_CONFIGREG_SMC_IND,
+	CISLANDS_CONFIGREG_DIDT_IND,
+	CISLANDS_CONFIGREG_CACHE,
+	CISLANDS_CONFIGREG_MAX
+};
+
+#define POWERCONTAINMENT_FEATURE_BAPM            0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
+
+struct ci_pt_config_reg {
+	u32 offset;
+	u32 mask;
+	u32 shift;
+	u32 value;
+	enum ci_pt_config_reg_type type;
+};
+
+struct ci_pt_defaults {
+	u8 svi_load_line_en;
+	u8 svi_load_line_vddc;
+	u8 tdc_vddc_throttle_release_limit_perc;
+	u8 tdc_mawt;
+	u8 tdc_waterfall_ctl;
+	u8 dte_ambient_temp_base;
+	u32 display_cac;
+	u32 bapm_temp_gradient;
+	u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+	u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
+#define DPMTABLE_UPDATE_SCLK        0x00000004
+#define DPMTABLE_UPDATE_MCLK        0x00000008
+
+struct ci_power_info {
+	struct ci_dpm_table dpm_table;
+	u32 voltage_control;
+	u32 mvdd_control;
+	u32 vddci_control;
+	u32 active_auto_throttle_sources;
+	struct ci_clock_registers clock_registers;
+	u16 acpi_vddc;
+	u16 acpi_vddci;
+	enum radeon_pcie_gen force_pcie_gen;
+	enum radeon_pcie_gen acpi_pcie_gen;
+	struct ci_leakage_voltage vddc_leakage;
+	struct ci_leakage_voltage vddci_leakage;
+	u16 max_vddc_in_pp_table;
+	u16 min_vddc_in_pp_table;
+	u16 max_vddci_in_pp_table;
+	u16 min_vddci_in_pp_table;
+	u32 mclk_strobe_mode_threshold;
+	u32 mclk_stutter_mode_threshold;
+	u32 mclk_edc_enable_threshold;
+	u32 mclk_edc_wr_enable_threshold;
+	struct ci_vbios_boot_state vbios_boot_state;
+	/* smc offsets */
+	u32 sram_end;
+	u32 dpm_table_start;
+	u32 soft_regs_start;
+	u32 mc_reg_table_start;
+	u32 fan_table_start;
+	u32 arb_table_start;
+	/* smc tables */
+	SMU7_Discrete_DpmTable smc_state_table;
+	SMU7_Discrete_MCRegisters smc_mc_reg_table;
+	SMU7_Discrete_PmFuses smc_powertune_table;
+	/* other stuff */
+	struct ci_mc_reg_table mc_reg_table;
+	struct atom_voltage_table vddc_voltage_table;
+	struct atom_voltage_table vddci_voltage_table;
+	struct atom_voltage_table mvdd_voltage_table;
+	struct ci_ulv_parm ulv;
+	u32 power_containment_features;
+	const struct ci_pt_defaults *powertune_defaults;
+	u32 dte_tj_offset;
+	bool vddc_phase_shed_control;
+	struct ci_thermal_temperature_setting thermal_temp_setting;
+	struct ci_dpm_level_enable_mask dpm_level_enable_mask;
+	u32 need_update_smu7_dpm_table;
+	u32 sclk_dpm_key_disabled;
+	u32 mclk_dpm_key_disabled;
+	u32 pcie_dpm_key_disabled;
+	u32 thermal_sclk_dpm_enabled;
+	struct ci_pcie_perf_range pcie_gen_performance;
+	struct ci_pcie_perf_range pcie_lane_performance;
+	struct ci_pcie_perf_range pcie_gen_powersaving;
+	struct ci_pcie_perf_range pcie_lane_powersaving;
+	u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+	u32 mclk_activity_target;
+	u32 low_sclk_interrupt_t;
+	u32 last_mclk_dpm_enable_mask;
+	u32 sys_pcie_mask;
+	/* caps */
+	bool caps_power_containment;
+	bool caps_cac;
+	bool caps_sq_ramping;
+	bool caps_db_ramping;
+	bool caps_td_ramping;
+	bool caps_tcp_ramping;
+	bool caps_fps;
+	bool caps_sclk_ds;
+	bool caps_sclk_ss_support;
+	bool caps_mclk_ss_support;
+	bool caps_uvd_dpm;
+	bool caps_vce_dpm;
+	bool caps_samu_dpm;
+	bool caps_acp_dpm;
+	bool caps_automatic_dc_transition;
+	bool caps_sclk_throttle_low_notification;
+	bool caps_dynamic_ac_timing;
+	bool caps_od_fuzzy_fan_control_support;
+	/* flags */
+	bool thermal_protection;
+	bool pcie_performance_request;
+	bool dynamic_ss;
+	bool dll_default_on;
+	bool cac_enabled;
+	bool uvd_enabled;
+	bool battery_state;
+	bool pspp_notify_required;
+	bool mem_gddr5;
+	bool enable_bapm_feature;
+	bool enable_tdc_limit_feature;
+	bool enable_pkg_pwr_tracking_feature;
+	bool use_pcie_performance_levels;
+	bool use_pcie_powersaving_levels;
+	bool uvd_power_gated;
+	/* driver states */
+	struct radeon_ps current_rps;
+	struct ci_ps current_ps;
+	struct radeon_ps requested_rps;
+	struct ci_ps requested_ps;
+	/* fan control */
+	bool fan_ctrl_is_in_default_mode;
+	bool fan_is_controlled_by_smc;
+	u32 t_min;
+	u32 fan_ctrl_default_mode;
+};
+
+#define CISLANDS_VOLTAGE_CONTROL_NONE                   0x0
+#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO                0x1
+#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2               0x2
+
+#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT             256
+
+#define CISLANDS_VRC_DFLT0                              0x3FFFC000
+#define CISLANDS_VRC_DFLT1                              0x000400
+#define CISLANDS_VRC_DFLT2                              0xC00080
+#define CISLANDS_VRC_DFLT3                              0xC00200
+#define CISLANDS_VRC_DFLT4                              0xC01680
+#define CISLANDS_VRC_DFLT5                              0xC00033
+#define CISLANDS_VRC_DFLT6                              0xC00033
+#define CISLANDS_VRC_DFLT7                              0x3FFFC000
+
+#define CISLANDS_CGULVPARAMETER_DFLT                    0x00040035
+#define CISLAND_TARGETACTIVITY_DFLT                     30
+#define CISLAND_MCLK_TARGETACTIVITY_DFLT                10
+
+#define PCIE_PERF_REQ_REMOVE_REGISTRY   0
+#define PCIE_PERF_REQ_FORCE_LOWPOWER    1
+#define PCIE_PERF_REQ_PECI_GEN1         2
+#define PCIE_PERF_REQ_PECI_GEN2         3
+#define PCIE_PERF_REQ_PECI_GEN3         4
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit);
+void ci_start_smc(struct radeon_device *rdev);
+void ci_reset_smc(struct radeon_device *rdev);
+int ci_program_jump_on_start(struct radeon_device *rdev);
+void ci_stop_smc_clock(struct radeon_device *rdev);
+void ci_start_smc_clock(struct radeon_device *rdev);
+bool ci_is_smc_running(struct radeon_device *rdev);
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+			   u32 smc_address, u32 *value, u32 limit);
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+			    u32 smc_address, u32 value, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
new file mode 100644
index 0000000..3711219
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "cikd.h"
+#include "ppsmc.h"
+#include "radeon_ucode.h"
+#include "ci_dpm.h"
+
+static int ci_set_smc_sram_address(struct radeon_device *rdev,
+				   u32 smc_address, u32 limit)
+{
+	if (smc_address & 3)
+		return -EINVAL;
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(SMC_IND_INDEX_0, smc_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+	return 0;
+}
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit)
+{
+	unsigned long flags;
+	u32 data, original_data;
+	u32 addr;
+	u32 extra_shift;
+	int ret = 0;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		ret = ci_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	/* RMW for the final bytes */
+	if (byte_count > 0) {
+		data = 0;
+
+		ret = ci_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		original_data = RREG32(SMC_IND_DATA_0);
+
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		ret = ci_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+	}
+
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+void ci_start_smc(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+	tmp &= ~RST_REG;
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void ci_reset_smc(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+	tmp |= RST_REG;
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int ci_program_jump_on_start(struct radeon_device *rdev)
+{
+	static const u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+
+	return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void ci_stop_smc_clock(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	tmp |= CK_DISABLE;
+
+	WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+void ci_start_smc_clock(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	tmp &= ~CK_DISABLE;
+
+	WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool ci_is_smc_running(struct radeon_device *rdev)
+{
+	u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+	u32 pc_c = RREG32_SMC(SMC_PC_C);
+
+	if (!(clk & CK_DISABLE) && (0x20100 <= pc_c))
+		return true;
+
+	return false;
+}
+
+#if 0
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	if (!ci_is_smc_running(rdev))
+		return PPSMC_Result_OK;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+		if ((tmp & CKEN) == 0)
+			break;
+		udelay(1);
+	}
+
+	return PPSMC_Result_OK;
+}
+#endif
+
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
+{
+	unsigned long flags;
+	u32 ucode_start_address;
+	u32 ucode_size;
+	const u8 *src;
+	u32 data;
+
+	if (!rdev->smc_fw)
+		return -EINVAL;
+
+	if (rdev->new_fw) {
+		const struct smc_firmware_header_v1_0 *hdr =
+			(const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+		radeon_ucode_print_smc_hdr(&hdr->header);
+
+		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+		src = (const u8 *)
+			(rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+	} else {
+		switch (rdev->family) {
+		case CHIP_BONAIRE:
+			ucode_start_address = BONAIRE_SMC_UCODE_START;
+			ucode_size = BONAIRE_SMC_UCODE_SIZE;
+			break;
+		case CHIP_HAWAII:
+			ucode_start_address = HAWAII_SMC_UCODE_START;
+			ucode_size = HAWAII_SMC_UCODE_SIZE;
+			break;
+		default:
+			DRM_ERROR("unknown asic in smc ucode loader\n");
+			BUG();
+		}
+
+		src = (const u8 *)rdev->smc_fw->data;
+	}
+
+	if (ucode_size & 3)
+		return -EINVAL;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, ucode_start_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+	while (ucode_size >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		ucode_size -= 4;
+	}
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+			   u32 smc_address, u32 *value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+			    u32 smc_address, u32 value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
new file mode 100644
index 0000000..ebce460
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -0,0 +1,9808 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "cikd.h"
+#include "atom.h"
+#include "cik_blit_shaders.h"
+#include "radeon_ucode.h"
+#include "clearstate_ci.h"
+
+#define SH_MEM_CONFIG_GFX_DEFAULT \
+	ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
+
+MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+
+MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+MODULE_FIRMWARE("radeon/bonaire_me.bin");
+MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
+
+MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
+MODULE_FIRMWARE("radeon/HAWAII_me.bin");
+MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
+MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
+MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
+
+MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+MODULE_FIRMWARE("radeon/hawaii_me.bin");
+MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
+
+MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
+MODULE_FIRMWARE("radeon/KAVERI_me.bin");
+MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
+MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
+MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
+MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+MODULE_FIRMWARE("radeon/kaveri_me.bin");
+MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+
+MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
+MODULE_FIRMWARE("radeon/KABINI_me.bin");
+MODULE_FIRMWARE("radeon/KABINI_ce.bin");
+MODULE_FIRMWARE("radeon/KABINI_mec.bin");
+MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
+MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+MODULE_FIRMWARE("radeon/kabini_me.bin");
+MODULE_FIRMWARE("radeon/kabini_ce.bin");
+MODULE_FIRMWARE("radeon/kabini_mec.bin");
+MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+
+MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
+MODULE_FIRMWARE("radeon/MULLINS_me.bin");
+MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
+MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
+MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
+MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
+
+MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+MODULE_FIRMWARE("radeon/mullins_me.bin");
+MODULE_FIRMWARE("radeon/mullins_ce.bin");
+MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+
+extern int r600_ih_ring_alloc(struct radeon_device *rdev);
+extern void r600_ih_ring_fini(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern void si_rlc_reset(struct radeon_device *rdev);
+extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
+static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
+extern int cik_sdma_resume(struct radeon_device *rdev);
+extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
+extern void cik_sdma_fini(struct radeon_device *rdev);
+extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable);
+static void cik_rlc_stop(struct radeon_device *rdev);
+static void cik_pcie_gen3_enable(struct radeon_device *rdev);
+static void cik_program_aspm(struct radeon_device *rdev);
+static void cik_init_pg(struct radeon_device *rdev);
+static void cik_init_cg(struct radeon_device *rdev);
+static void cik_fini_pg(struct radeon_device *rdev);
+static void cik_fini_cg(struct radeon_device *rdev);
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+					  bool enable);
+
+/**
+ * cik_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int cik_get_allowed_info_register(struct radeon_device *rdev,
+				  u32 reg, u32 *val)
+{
+	switch (reg) {
+	case GRBM_STATUS:
+	case GRBM_STATUS2:
+	case GRBM_STATUS_SE0:
+	case GRBM_STATUS_SE1:
+	case GRBM_STATUS_SE2:
+	case GRBM_STATUS_SE3:
+	case SRBM_STATUS:
+	case SRBM_STATUS2:
+	case (SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET):
+	case (SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET):
+	case UVD_STATUS:
+	/* TODO VCE */
+		*val = RREG32(reg);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * Indirect registers accessor
+ */
+u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->didt_idx_lock, flags);
+	WREG32(CIK_DIDT_IND_INDEX, (reg));
+	r = RREG32(CIK_DIDT_IND_DATA);
+	spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
+	return r;
+}
+
+void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->didt_idx_lock, flags);
+	WREG32(CIK_DIDT_IND_INDEX, (reg));
+	WREG32(CIK_DIDT_IND_DATA, (v));
+	spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
+}
+
+/* get temperature in millidegrees */
+int ci_get_temp(struct radeon_device *rdev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+		CTF_TEMP_SHIFT;
+
+	if (temp & 0x200)
+		actual_temp = 255;
+	else
+		actual_temp = temp & 0x1ff;
+
+	actual_temp = actual_temp * 1000;
+
+	return actual_temp;
+}
+
+/* get temperature in millidegrees */
+int kv_get_temp(struct radeon_device *rdev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = RREG32_SMC(0xC0300E0C);
+
+	if (temp)
+		actual_temp = (temp / 8) - 49;
+	else
+		actual_temp = 0;
+
+	actual_temp = actual_temp * 1000;
+
+	return actual_temp;
+}
+
+/*
+ * Indirect registers accessor
+ */
+u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
+	WREG32(PCIE_INDEX, reg);
+	(void)RREG32(PCIE_INDEX);
+	r = RREG32(PCIE_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
+	return r;
+}
+
+void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
+	WREG32(PCIE_INDEX, reg);
+	(void)RREG32(PCIE_INDEX);
+	WREG32(PCIE_DATA, v);
+	(void)RREG32(PCIE_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
+}
+
+static const u32 spectre_rlc_save_restore_register_list[] =
+{
+	(0x0e00 << 16) | (0xc12c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc140 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc150 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc15c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc168 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc170 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc178 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc204 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2b8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2bc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2c0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8228 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x829c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x869c >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x98f8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9900 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc260 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c000 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c00c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9700 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x8e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x9e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0xae00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0xbe00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8900 >> 2),
+	0x00000000,
+	0x3,
+	(0x0e00 << 16) | (0xc130 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc134 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc1fc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc208 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc264 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc268 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc26c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc270 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc274 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc278 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc27c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc280 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc284 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc288 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc28c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc290 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc294 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc298 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc29c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2a0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2a4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2a8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2ac  >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2b0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x301d0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30238 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30250 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30254 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30258 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3025c >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x8e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x9e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0xae00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0xbe00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x8e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x9e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0xae00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0xbe00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x8e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x9e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0xae00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0xbe00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x8e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x9e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0xae00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0xbe00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x8e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x9e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0xae00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0xbe00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc99c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9834 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f00 >> 2),
+	0x00000000,
+	(0x0001 << 16) | (0x30f00 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f04 >> 2),
+	0x00000000,
+	(0x0001 << 16) | (0x30f04 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f08 >> 2),
+	0x00000000,
+	(0x0001 << 16) | (0x30f08 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f0c >> 2),
+	0x00000000,
+	(0x0001 << 16) | (0x30f0c >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8a14 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8a18 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a00 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8bf0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8bcc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8b24 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30a04 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a10 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a14 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a18 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a2c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc700 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc704 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc708 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc768 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc770 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc774 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc778 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc77c >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc780 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc784 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc788 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc78c >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc798 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc79c >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc7a0 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc7a4 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc7a8 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc7ac >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc7b0 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc7b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9100 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c010 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92a8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92ac >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92b8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92bc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92c0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92c4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92c8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92cc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x92d0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c00 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c04 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c38 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c3c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xae00 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9604 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac08 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac0c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac10 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac14 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac58 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac68 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac6c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac70 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac74 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac78 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac7c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac80 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac84 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac88 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac8c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x970c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9714 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9718 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x971c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x8e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x9e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0xae00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0xbe00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xcd10 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xcd14 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88b0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88b8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88bc >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88c4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88c8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88d0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88d4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88d8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8980 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30938 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3093c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30940 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x89a0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30900 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30904 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x89b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c210 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c214 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c218 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8904 >> 2),
+	0x00000000,
+	0x5,
+	(0x0e00 << 16) | (0x8c28 >> 2),
+	(0x0e00 << 16) | (0x8c2c >> 2),
+	(0x0e00 << 16) | (0x8c30 >> 2),
+	(0x0e00 << 16) | (0x8c34 >> 2),
+	(0x0e00 << 16) | (0x9600 >> 2),
+};
+
+static const u32 kalindi_rlc_save_restore_register_list[] =
+{
+	(0x0e00 << 16) | (0xc12c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc140 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc150 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc15c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc168 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc170 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc204 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2b8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2bc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2c0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8228 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x829c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x869c >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x98f8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9900 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc260 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c000 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c00c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9700 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xcd20 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8900 >> 2),
+	0x00000000,
+	0x3,
+	(0x0e00 << 16) | (0xc130 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc134 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc1fc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc208 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc264 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc268 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc26c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc270 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc274 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc28c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc290 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc294 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc298 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2a0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2a4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2a8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc2ac >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x301d0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30238 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30250 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30254 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30258 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3025c >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc900 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc904 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc908 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc90c >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0xc910 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc99c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9834 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f00 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f04 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f08 >> 2),
+	0x00000000,
+	(0x0000 << 16) | (0x30f0c >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8a14 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8a18 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a00 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8bf0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8bcc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8b24 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30a04 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a10 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a14 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a18 >> 2),
+	0x00000000,
+	(0x0600 << 16) | (0x30a2c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc700 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc704 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc708 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xc768 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc770 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc774 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc798 >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0xc79c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9100 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c010 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c00 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c04 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c38 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8c3c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xae00 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9604 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac08 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac0c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac10 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac14 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac58 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac68 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac6c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac70 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac74 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac78 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac7c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac80 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac84 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac88 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xac8c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x970c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9714 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x9718 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x971c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x4e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x5e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x6e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x7e00 << 16) | (0x31068 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xcd10 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0xcd14 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88b0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88b8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88bc >> 2),
+	0x00000000,
+	(0x0400 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88c4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88c8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88d0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88d4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x88d8 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8980 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30938 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3093c >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30940 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x89a0 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30900 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x30904 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x89b4 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3e1fc >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c210 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c214 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x3c218 >> 2),
+	0x00000000,
+	(0x0e00 << 16) | (0x8904 >> 2),
+	0x00000000,
+	0x5,
+	(0x0e00 << 16) | (0x8c28 >> 2),
+	(0x0e00 << 16) | (0x8c2c >> 2),
+	(0x0e00 << 16) | (0x8c30 >> 2),
+	(0x0e00 << 16) | (0x8c34 >> 2),
+	(0x0e00 << 16) | (0x9600 >> 2),
+};
+
+static const u32 bonaire_golden_spm_registers[] =
+{
+	0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 bonaire_golden_common_registers[] =
+{
+	0xc770, 0xffffffff, 0x00000800,
+	0xc774, 0xffffffff, 0x00000800,
+	0xc798, 0xffffffff, 0x00007fbf,
+	0xc79c, 0xffffffff, 0x00007faf
+};
+
+static const u32 bonaire_golden_registers[] =
+{
+	0x3354, 0x00000333, 0x00000333,
+	0x3350, 0x000c0fc0, 0x00040200,
+	0x9a10, 0x00010000, 0x00058208,
+	0x3c000, 0xffff1fff, 0x00140000,
+	0x3c200, 0xfdfc0fff, 0x00000100,
+	0x3c234, 0x40000000, 0x40000200,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0xf0311fff, 0x80300000,
+	0x98f8, 0x73773777, 0x12010001,
+	0x350c, 0x00810000, 0x408af000,
+	0x7030, 0x31000111, 0x00000011,
+	0x2f48, 0x73773777, 0x12010001,
+	0x220c, 0x00007fb6, 0x0021a1b1,
+	0x2210, 0x00007fb6, 0x002021b1,
+	0x2180, 0x00007fb6, 0x00002191,
+	0x2218, 0x00007fb6, 0x002121b1,
+	0x221c, 0x00007fb6, 0x002021b1,
+	0x21dc, 0x00007fb6, 0x00002191,
+	0x21e0, 0x00007fb6, 0x00002191,
+	0x3628, 0x0000003f, 0x0000000a,
+	0x362c, 0x0000003f, 0x0000000a,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8bf0, 0x00002001, 0x00000001,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x30a04, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x4d8, 0x00000fff, 0x00000100,
+	0x3e78, 0x00000001, 0x00000002,
+	0x9100, 0x03000000, 0x0362c688,
+	0x8c00, 0x000000ff, 0x00000001,
+	0xe40, 0x00001fff, 0x00001fff,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x000000f3,
+	0xac0c, 0xffffffff, 0x00001032
+};
+
+static const u32 bonaire_mgcg_cgcg_init[] =
+{
+	0xc420, 0xffffffff, 0xfffffffc,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c2a0, 0xffffffff, 0x00000100,
+	0x3c208, 0xffffffff, 0x00000100,
+	0x3c2c0, 0xffffffff, 0xc0000100,
+	0x3c2c8, 0xffffffff, 0xc0000100,
+	0x3c2c4, 0xffffffff, 0xc0000100,
+	0x55e4, 0xffffffff, 0x00600100,
+	0x3c280, 0xffffffff, 0x00000100,
+	0x3c214, 0xffffffff, 0x06000100,
+	0x3c220, 0xffffffff, 0x00000100,
+	0x3c218, 0xffffffff, 0x06000100,
+	0x3c204, 0xffffffff, 0x00000100,
+	0x3c2e0, 0xffffffff, 0x00000100,
+	0x3c224, 0xffffffff, 0x00000100,
+	0x3c200, 0xffffffff, 0x00000100,
+	0x3c230, 0xffffffff, 0x00000100,
+	0x3c234, 0xffffffff, 0x00000100,
+	0x3c250, 0xffffffff, 0x00000100,
+	0x3c254, 0xffffffff, 0x00000100,
+	0x3c258, 0xffffffff, 0x00000100,
+	0x3c25c, 0xffffffff, 0x00000100,
+	0x3c260, 0xffffffff, 0x00000100,
+	0x3c27c, 0xffffffff, 0x00000100,
+	0x3c278, 0xffffffff, 0x00000100,
+	0x3c210, 0xffffffff, 0x06000100,
+	0x3c290, 0xffffffff, 0x00000100,
+	0x3c274, 0xffffffff, 0x00000100,
+	0x3c2b4, 0xffffffff, 0x00000100,
+	0x3c2b0, 0xffffffff, 0x00000100,
+	0x3c270, 0xffffffff, 0x00000100,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c020, 0xffffffff, 0x00010000,
+	0x3c024, 0xffffffff, 0x00030002,
+	0x3c028, 0xffffffff, 0x00040007,
+	0x3c02c, 0xffffffff, 0x00060005,
+	0x3c030, 0xffffffff, 0x00090008,
+	0x3c034, 0xffffffff, 0x00010000,
+	0x3c038, 0xffffffff, 0x00030002,
+	0x3c03c, 0xffffffff, 0x00040007,
+	0x3c040, 0xffffffff, 0x00060005,
+	0x3c044, 0xffffffff, 0x00090008,
+	0x3c048, 0xffffffff, 0x00010000,
+	0x3c04c, 0xffffffff, 0x00030002,
+	0x3c050, 0xffffffff, 0x00040007,
+	0x3c054, 0xffffffff, 0x00060005,
+	0x3c058, 0xffffffff, 0x00090008,
+	0x3c05c, 0xffffffff, 0x00010000,
+	0x3c060, 0xffffffff, 0x00030002,
+	0x3c064, 0xffffffff, 0x00040007,
+	0x3c068, 0xffffffff, 0x00060005,
+	0x3c06c, 0xffffffff, 0x00090008,
+	0x3c070, 0xffffffff, 0x00010000,
+	0x3c074, 0xffffffff, 0x00030002,
+	0x3c078, 0xffffffff, 0x00040007,
+	0x3c07c, 0xffffffff, 0x00060005,
+	0x3c080, 0xffffffff, 0x00090008,
+	0x3c084, 0xffffffff, 0x00010000,
+	0x3c088, 0xffffffff, 0x00030002,
+	0x3c08c, 0xffffffff, 0x00040007,
+	0x3c090, 0xffffffff, 0x00060005,
+	0x3c094, 0xffffffff, 0x00090008,
+	0x3c098, 0xffffffff, 0x00010000,
+	0x3c09c, 0xffffffff, 0x00030002,
+	0x3c0a0, 0xffffffff, 0x00040007,
+	0x3c0a4, 0xffffffff, 0x00060005,
+	0x3c0a8, 0xffffffff, 0x00090008,
+	0x3c000, 0xffffffff, 0x96e00200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc424, 0xffffffff, 0x0020003f,
+	0x38, 0xffffffff, 0x0140001c,
+	0x3c, 0x000f0000, 0x000f0000,
+	0x220, 0xffffffff, 0xC060000C,
+	0x224, 0xc0000fff, 0x00000100,
+	0xf90, 0xffffffff, 0x00000100,
+	0xf98, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd00c, 0xff000ff0, 0x00000100,
+	0xd80c, 0xff000ff0, 0x00000100
+};
+
+static const u32 spectre_golden_spm_registers[] =
+{
+	0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 spectre_golden_common_registers[] =
+{
+	0xc770, 0xffffffff, 0x00000800,
+	0xc774, 0xffffffff, 0x00000800,
+	0xc798, 0xffffffff, 0x00007fbf,
+	0xc79c, 0xffffffff, 0x00007faf
+};
+
+static const u32 spectre_golden_registers[] =
+{
+	0x3c000, 0xffff1fff, 0x96940200,
+	0x3c00c, 0xffff0001, 0xff000000,
+	0x3c200, 0xfffc0fff, 0x00000100,
+	0x6ed8, 0x00010101, 0x00010000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0xfffffffc, 0x00020200,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0xf0311fff, 0x80300000,
+	0x98f8, 0x73773777, 0x12010001,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x2f48, 0x73773777, 0x12010001,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x28350, 0x3f3f3fff, 0x00000082,
+	0x28354, 0x0000003f, 0x00000000,
+	0x3e78, 0x00000001, 0x00000002,
+	0x913c, 0xffff03df, 0x00000004,
+	0xc768, 0x00000008, 0x00000008,
+	0x8c00, 0x000008ff, 0x00000800,
+	0x9508, 0x00010000, 0x00010000,
+	0xac0c, 0xffffffff, 0x54763210,
+	0x214f8, 0x01ff01ff, 0x00000002,
+	0x21498, 0x007ff800, 0x00200000,
+	0x2015c, 0xffffffff, 0x00000f40,
+	0x30934, 0xffffffff, 0x00000001
+};
+
+static const u32 spectre_mgcg_cgcg_init[] =
+{
+	0xc420, 0xffffffff, 0xfffffffc,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c2a0, 0xffffffff, 0x00000100,
+	0x3c208, 0xffffffff, 0x00000100,
+	0x3c2c0, 0xffffffff, 0x00000100,
+	0x3c2c8, 0xffffffff, 0x00000100,
+	0x3c2c4, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00600100,
+	0x3c280, 0xffffffff, 0x00000100,
+	0x3c214, 0xffffffff, 0x06000100,
+	0x3c220, 0xffffffff, 0x00000100,
+	0x3c218, 0xffffffff, 0x06000100,
+	0x3c204, 0xffffffff, 0x00000100,
+	0x3c2e0, 0xffffffff, 0x00000100,
+	0x3c224, 0xffffffff, 0x00000100,
+	0x3c200, 0xffffffff, 0x00000100,
+	0x3c230, 0xffffffff, 0x00000100,
+	0x3c234, 0xffffffff, 0x00000100,
+	0x3c250, 0xffffffff, 0x00000100,
+	0x3c254, 0xffffffff, 0x00000100,
+	0x3c258, 0xffffffff, 0x00000100,
+	0x3c25c, 0xffffffff, 0x00000100,
+	0x3c260, 0xffffffff, 0x00000100,
+	0x3c27c, 0xffffffff, 0x00000100,
+	0x3c278, 0xffffffff, 0x00000100,
+	0x3c210, 0xffffffff, 0x06000100,
+	0x3c290, 0xffffffff, 0x00000100,
+	0x3c274, 0xffffffff, 0x00000100,
+	0x3c2b4, 0xffffffff, 0x00000100,
+	0x3c2b0, 0xffffffff, 0x00000100,
+	0x3c270, 0xffffffff, 0x00000100,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c020, 0xffffffff, 0x00010000,
+	0x3c024, 0xffffffff, 0x00030002,
+	0x3c028, 0xffffffff, 0x00040007,
+	0x3c02c, 0xffffffff, 0x00060005,
+	0x3c030, 0xffffffff, 0x00090008,
+	0x3c034, 0xffffffff, 0x00010000,
+	0x3c038, 0xffffffff, 0x00030002,
+	0x3c03c, 0xffffffff, 0x00040007,
+	0x3c040, 0xffffffff, 0x00060005,
+	0x3c044, 0xffffffff, 0x00090008,
+	0x3c048, 0xffffffff, 0x00010000,
+	0x3c04c, 0xffffffff, 0x00030002,
+	0x3c050, 0xffffffff, 0x00040007,
+	0x3c054, 0xffffffff, 0x00060005,
+	0x3c058, 0xffffffff, 0x00090008,
+	0x3c05c, 0xffffffff, 0x00010000,
+	0x3c060, 0xffffffff, 0x00030002,
+	0x3c064, 0xffffffff, 0x00040007,
+	0x3c068, 0xffffffff, 0x00060005,
+	0x3c06c, 0xffffffff, 0x00090008,
+	0x3c070, 0xffffffff, 0x00010000,
+	0x3c074, 0xffffffff, 0x00030002,
+	0x3c078, 0xffffffff, 0x00040007,
+	0x3c07c, 0xffffffff, 0x00060005,
+	0x3c080, 0xffffffff, 0x00090008,
+	0x3c084, 0xffffffff, 0x00010000,
+	0x3c088, 0xffffffff, 0x00030002,
+	0x3c08c, 0xffffffff, 0x00040007,
+	0x3c090, 0xffffffff, 0x00060005,
+	0x3c094, 0xffffffff, 0x00090008,
+	0x3c098, 0xffffffff, 0x00010000,
+	0x3c09c, 0xffffffff, 0x00030002,
+	0x3c0a0, 0xffffffff, 0x00040007,
+	0x3c0a4, 0xffffffff, 0x00060005,
+	0x3c0a8, 0xffffffff, 0x00090008,
+	0x3c0ac, 0xffffffff, 0x00010000,
+	0x3c0b0, 0xffffffff, 0x00030002,
+	0x3c0b4, 0xffffffff, 0x00040007,
+	0x3c0b8, 0xffffffff, 0x00060005,
+	0x3c0bc, 0xffffffff, 0x00090008,
+	0x3c000, 0xffffffff, 0x96e00200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc424, 0xffffffff, 0x0020003f,
+	0x38, 0xffffffff, 0x0140001c,
+	0x3c, 0x000f0000, 0x000f0000,
+	0x220, 0xffffffff, 0xC060000C,
+	0x224, 0xc0000fff, 0x00000100,
+	0xf90, 0xffffffff, 0x00000100,
+	0xf98, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd00c, 0xff000ff0, 0x00000100,
+	0xd80c, 0xff000ff0, 0x00000100
+};
+
+static const u32 kalindi_golden_spm_registers[] =
+{
+	0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 kalindi_golden_common_registers[] =
+{
+	0xc770, 0xffffffff, 0x00000800,
+	0xc774, 0xffffffff, 0x00000800,
+	0xc798, 0xffffffff, 0x00007fbf,
+	0xc79c, 0xffffffff, 0x00007faf
+};
+
+static const u32 kalindi_golden_registers[] =
+{
+	0x3c000, 0xffffdfff, 0x6e944040,
+	0x55e4, 0xff607fff, 0xfc000100,
+	0x3c220, 0xff000fff, 0x00000100,
+	0x3c224, 0xff000fff, 0x00000100,
+	0x3c200, 0xfffc0fff, 0x00000100,
+	0x6ed8, 0x00010101, 0x00010000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0xf0311fff, 0x80300000,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2408, 0x000fffff, 0x000c007f,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ffcfff,
+	0x30a04, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x4d8, 0x00000fff, 0x00000100,
+	0x3e78, 0x00000001, 0x00000002,
+	0xc768, 0x00000008, 0x00000008,
+	0x8c00, 0x000000ff, 0x00000003,
+	0x214f8, 0x01ff01ff, 0x00000002,
+	0x21498, 0x007ff800, 0x00200000,
+	0x2015c, 0xffffffff, 0x00000f40,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x30934, 0xffffffff, 0x00000000
+};
+
+static const u32 kalindi_mgcg_cgcg_init[] =
+{
+	0xc420, 0xffffffff, 0xfffffffc,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c2a0, 0xffffffff, 0x00000100,
+	0x3c208, 0xffffffff, 0x00000100,
+	0x3c2c0, 0xffffffff, 0x00000100,
+	0x3c2c8, 0xffffffff, 0x00000100,
+	0x3c2c4, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00600100,
+	0x3c280, 0xffffffff, 0x00000100,
+	0x3c214, 0xffffffff, 0x06000100,
+	0x3c220, 0xffffffff, 0x00000100,
+	0x3c218, 0xffffffff, 0x06000100,
+	0x3c204, 0xffffffff, 0x00000100,
+	0x3c2e0, 0xffffffff, 0x00000100,
+	0x3c224, 0xffffffff, 0x00000100,
+	0x3c200, 0xffffffff, 0x00000100,
+	0x3c230, 0xffffffff, 0x00000100,
+	0x3c234, 0xffffffff, 0x00000100,
+	0x3c250, 0xffffffff, 0x00000100,
+	0x3c254, 0xffffffff, 0x00000100,
+	0x3c258, 0xffffffff, 0x00000100,
+	0x3c25c, 0xffffffff, 0x00000100,
+	0x3c260, 0xffffffff, 0x00000100,
+	0x3c27c, 0xffffffff, 0x00000100,
+	0x3c278, 0xffffffff, 0x00000100,
+	0x3c210, 0xffffffff, 0x06000100,
+	0x3c290, 0xffffffff, 0x00000100,
+	0x3c274, 0xffffffff, 0x00000100,
+	0x3c2b4, 0xffffffff, 0x00000100,
+	0x3c2b0, 0xffffffff, 0x00000100,
+	0x3c270, 0xffffffff, 0x00000100,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c020, 0xffffffff, 0x00010000,
+	0x3c024, 0xffffffff, 0x00030002,
+	0x3c028, 0xffffffff, 0x00040007,
+	0x3c02c, 0xffffffff, 0x00060005,
+	0x3c030, 0xffffffff, 0x00090008,
+	0x3c034, 0xffffffff, 0x00010000,
+	0x3c038, 0xffffffff, 0x00030002,
+	0x3c03c, 0xffffffff, 0x00040007,
+	0x3c040, 0xffffffff, 0x00060005,
+	0x3c044, 0xffffffff, 0x00090008,
+	0x3c000, 0xffffffff, 0x96e00200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc424, 0xffffffff, 0x0020003f,
+	0x38, 0xffffffff, 0x0140001c,
+	0x3c, 0x000f0000, 0x000f0000,
+	0x220, 0xffffffff, 0xC060000C,
+	0x224, 0xc0000fff, 0x00000100,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd00c, 0xff000ff0, 0x00000100,
+	0xd80c, 0xff000ff0, 0x00000100
+};
+
+static const u32 hawaii_golden_spm_registers[] =
+{
+	0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 hawaii_golden_common_registers[] =
+{
+	0x30800, 0xffffffff, 0xe0000000,
+	0x28350, 0xffffffff, 0x3a00161a,
+	0x28354, 0xffffffff, 0x0000002e,
+	0x9a10, 0xffffffff, 0x00018208,
+	0x98f8, 0xffffffff, 0x12011003
+};
+
+static const u32 hawaii_golden_registers[] =
+{
+	0x3354, 0x00000333, 0x00000333,
+	0x9a10, 0x00010000, 0x00058208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0xf0311fff, 0x80300000,
+	0x350c, 0x00810000, 0x408af000,
+	0x7030, 0x31000111, 0x00000011,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2120, 0x0000007f, 0x0000001b,
+	0x21dc, 0x00007fb6, 0x00002191,
+	0x3628, 0x0000003f, 0x0000000a,
+	0x362c, 0x0000003f, 0x0000000a,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8bf0, 0x00002001, 0x00000001,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x30a04, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x3e78, 0x00000001, 0x00000002,
+	0xc768, 0x00000008, 0x00000008,
+	0xc770, 0x00000f00, 0x00000800,
+	0xc774, 0x00000f00, 0x00000800,
+	0xc798, 0x00ffffff, 0x00ff7fbf,
+	0xc79c, 0x00ffffff, 0x00ff7faf,
+	0x8c00, 0x000000ff, 0x00000800,
+	0xe40, 0x00001fff, 0x00001fff,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xae00, 0x00100000, 0x000ff07c,
+	0xac14, 0x000003ff, 0x0000000f,
+	0xac10, 0xffffffff, 0x7564fdec,
+	0xac0c, 0xffffffff, 0x3120b9a8,
+	0xac08, 0x20000000, 0x0f9c0000
+};
+
+static const u32 hawaii_mgcg_cgcg_init[] =
+{
+	0xc420, 0xffffffff, 0xfffffffd,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c2a0, 0xffffffff, 0x00000100,
+	0x3c208, 0xffffffff, 0x00000100,
+	0x3c2c0, 0xffffffff, 0x00000100,
+	0x3c2c8, 0xffffffff, 0x00000100,
+	0x3c2c4, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00200100,
+	0x3c280, 0xffffffff, 0x00000100,
+	0x3c214, 0xffffffff, 0x06000100,
+	0x3c220, 0xffffffff, 0x00000100,
+	0x3c218, 0xffffffff, 0x06000100,
+	0x3c204, 0xffffffff, 0x00000100,
+	0x3c2e0, 0xffffffff, 0x00000100,
+	0x3c224, 0xffffffff, 0x00000100,
+	0x3c200, 0xffffffff, 0x00000100,
+	0x3c230, 0xffffffff, 0x00000100,
+	0x3c234, 0xffffffff, 0x00000100,
+	0x3c250, 0xffffffff, 0x00000100,
+	0x3c254, 0xffffffff, 0x00000100,
+	0x3c258, 0xffffffff, 0x00000100,
+	0x3c25c, 0xffffffff, 0x00000100,
+	0x3c260, 0xffffffff, 0x00000100,
+	0x3c27c, 0xffffffff, 0x00000100,
+	0x3c278, 0xffffffff, 0x00000100,
+	0x3c210, 0xffffffff, 0x06000100,
+	0x3c290, 0xffffffff, 0x00000100,
+	0x3c274, 0xffffffff, 0x00000100,
+	0x3c2b4, 0xffffffff, 0x00000100,
+	0x3c2b0, 0xffffffff, 0x00000100,
+	0x3c270, 0xffffffff, 0x00000100,
+	0x30800, 0xffffffff, 0xe0000000,
+	0x3c020, 0xffffffff, 0x00010000,
+	0x3c024, 0xffffffff, 0x00030002,
+	0x3c028, 0xffffffff, 0x00040007,
+	0x3c02c, 0xffffffff, 0x00060005,
+	0x3c030, 0xffffffff, 0x00090008,
+	0x3c034, 0xffffffff, 0x00010000,
+	0x3c038, 0xffffffff, 0x00030002,
+	0x3c03c, 0xffffffff, 0x00040007,
+	0x3c040, 0xffffffff, 0x00060005,
+	0x3c044, 0xffffffff, 0x00090008,
+	0x3c048, 0xffffffff, 0x00010000,
+	0x3c04c, 0xffffffff, 0x00030002,
+	0x3c050, 0xffffffff, 0x00040007,
+	0x3c054, 0xffffffff, 0x00060005,
+	0x3c058, 0xffffffff, 0x00090008,
+	0x3c05c, 0xffffffff, 0x00010000,
+	0x3c060, 0xffffffff, 0x00030002,
+	0x3c064, 0xffffffff, 0x00040007,
+	0x3c068, 0xffffffff, 0x00060005,
+	0x3c06c, 0xffffffff, 0x00090008,
+	0x3c070, 0xffffffff, 0x00010000,
+	0x3c074, 0xffffffff, 0x00030002,
+	0x3c078, 0xffffffff, 0x00040007,
+	0x3c07c, 0xffffffff, 0x00060005,
+	0x3c080, 0xffffffff, 0x00090008,
+	0x3c084, 0xffffffff, 0x00010000,
+	0x3c088, 0xffffffff, 0x00030002,
+	0x3c08c, 0xffffffff, 0x00040007,
+	0x3c090, 0xffffffff, 0x00060005,
+	0x3c094, 0xffffffff, 0x00090008,
+	0x3c098, 0xffffffff, 0x00010000,
+	0x3c09c, 0xffffffff, 0x00030002,
+	0x3c0a0, 0xffffffff, 0x00040007,
+	0x3c0a4, 0xffffffff, 0x00060005,
+	0x3c0a8, 0xffffffff, 0x00090008,
+	0x3c0ac, 0xffffffff, 0x00010000,
+	0x3c0b0, 0xffffffff, 0x00030002,
+	0x3c0b4, 0xffffffff, 0x00040007,
+	0x3c0b8, 0xffffffff, 0x00060005,
+	0x3c0bc, 0xffffffff, 0x00090008,
+	0x3c0c0, 0xffffffff, 0x00010000,
+	0x3c0c4, 0xffffffff, 0x00030002,
+	0x3c0c8, 0xffffffff, 0x00040007,
+	0x3c0cc, 0xffffffff, 0x00060005,
+	0x3c0d0, 0xffffffff, 0x00090008,
+	0x3c0d4, 0xffffffff, 0x00010000,
+	0x3c0d8, 0xffffffff, 0x00030002,
+	0x3c0dc, 0xffffffff, 0x00040007,
+	0x3c0e0, 0xffffffff, 0x00060005,
+	0x3c0e4, 0xffffffff, 0x00090008,
+	0x3c0e8, 0xffffffff, 0x00010000,
+	0x3c0ec, 0xffffffff, 0x00030002,
+	0x3c0f0, 0xffffffff, 0x00040007,
+	0x3c0f4, 0xffffffff, 0x00060005,
+	0x3c0f8, 0xffffffff, 0x00090008,
+	0xc318, 0xffffffff, 0x00020200,
+	0x3350, 0xffffffff, 0x00000200,
+	0x15c0, 0xffffffff, 0x00000400,
+	0x55e8, 0xffffffff, 0x00000000,
+	0x2f50, 0xffffffff, 0x00000902,
+	0x3c000, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc424, 0xffffffff, 0x0020003f,
+	0x38, 0xffffffff, 0x0140001c,
+	0x3c, 0x000f0000, 0x000f0000,
+	0x220, 0xffffffff, 0xc060000c,
+	0x224, 0xc0000fff, 0x00000100,
+	0xf90, 0xffffffff, 0x00000100,
+	0xf98, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd00c, 0xff000ff0, 0x00000100,
+	0xd80c, 0xff000ff0, 0x00000100
+};
+
+static const u32 godavari_golden_registers[] =
+{
+	0x55e4, 0xff607fff, 0xfc000100,
+	0x6ed8, 0x00010101, 0x00010000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x98302, 0xf00fffff, 0x00000400,
+	0x6130, 0xffffffff, 0x00010000,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0xf0311fff, 0x80300000,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2408, 0x000fffff, 0x000c007f,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x30a04, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x4d8, 0x00000fff, 0x00000100,
+	0xd014, 0x00010000, 0x00810001,
+	0xd814, 0x00010000, 0x00810001,
+	0x3e78, 0x00000001, 0x00000002,
+	0xc768, 0x00000008, 0x00000008,
+	0xc770, 0x00000f00, 0x00000800,
+	0xc774, 0x00000f00, 0x00000800,
+	0xc798, 0x00ffffff, 0x00ff7fbf,
+	0xc79c, 0x00ffffff, 0x00ff7faf,
+	0x8c00, 0x000000ff, 0x00000001,
+	0x214f8, 0x01ff01ff, 0x00000002,
+	0x21498, 0x007ff800, 0x00200000,
+	0x2015c, 0xffffffff, 0x00000f40,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x30934, 0xffffffff, 0x00000000
+};
+
+
+static void cik_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_BONAIRE:
+		radeon_program_register_sequence(rdev,
+						 bonaire_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 bonaire_golden_registers,
+						 (const u32)ARRAY_SIZE(bonaire_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 bonaire_golden_common_registers,
+						 (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
+		radeon_program_register_sequence(rdev,
+						 bonaire_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
+		break;
+	case CHIP_KABINI:
+		radeon_program_register_sequence(rdev,
+						 kalindi_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 kalindi_golden_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 kalindi_golden_common_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+		radeon_program_register_sequence(rdev,
+						 kalindi_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+		break;
+	case CHIP_MULLINS:
+		radeon_program_register_sequence(rdev,
+						 kalindi_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 godavari_golden_registers,
+						 (const u32)ARRAY_SIZE(godavari_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 kalindi_golden_common_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+		radeon_program_register_sequence(rdev,
+						 kalindi_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+		break;
+	case CHIP_KAVERI:
+		radeon_program_register_sequence(rdev,
+						 spectre_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 spectre_golden_registers,
+						 (const u32)ARRAY_SIZE(spectre_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 spectre_golden_common_registers,
+						 (const u32)ARRAY_SIZE(spectre_golden_common_registers));
+		radeon_program_register_sequence(rdev,
+						 spectre_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
+		break;
+	case CHIP_HAWAII:
+		radeon_program_register_sequence(rdev,
+						 hawaii_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 hawaii_golden_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 hawaii_golden_common_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
+		radeon_program_register_sequence(rdev,
+						 hawaii_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * cik_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (CIK).
+ */
+u32 cik_get_xclk(struct radeon_device *rdev)
+{
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
+			return reference_clock / 2;
+	} else {
+		if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
+			return reference_clock / 4;
+	}
+	return reference_clock;
+}
+
+/**
+ * cik_mm_rdoorbell - read a doorbell dword
+ *
+ * @rdev: radeon_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
+{
+	if (index < rdev->doorbell.num_doorbells) {
+		return readl(rdev->doorbell.ptr + index);
+	} else {
+		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+		return 0;
+	}
+}
+
+/**
+ * cik_mm_wdoorbell - write a doorbell dword
+ *
+ * @rdev: radeon_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
+{
+	if (index < rdev->doorbell.num_doorbells) {
+		writel(v, rdev->doorbell.ptr + index);
+	} else {
+		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+	}
+}
+
+#define BONAIRE_IO_MC_REGS_SIZE 36
+
+static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
+{
+	{0x00000070, 0x04400000},
+	{0x00000071, 0x80c01803},
+	{0x00000072, 0x00004004},
+	{0x00000073, 0x00000100},
+	{0x00000074, 0x00ff0000},
+	{0x00000075, 0x34000000},
+	{0x00000076, 0x08000014},
+	{0x00000077, 0x00cc08ec},
+	{0x00000078, 0x00000400},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x04090000},
+	{0x0000007c, 0x00000000},
+	{0x0000007e, 0x4408a8e8},
+	{0x0000007f, 0x00000304},
+	{0x00000080, 0x00000000},
+	{0x00000082, 0x00000001},
+	{0x00000083, 0x00000002},
+	{0x00000084, 0xf3e4f400},
+	{0x00000085, 0x052024e3},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x01000000},
+	{0x0000008a, 0x1c0a0000},
+	{0x0000008b, 0xff010000},
+	{0x0000008d, 0xffffefff},
+	{0x0000008e, 0xfff3efff},
+	{0x0000008f, 0xfff3efbf},
+	{0x00000092, 0xf7ffffff},
+	{0x00000093, 0xffffff7f},
+	{0x00000095, 0x00101101},
+	{0x00000096, 0x00000fff},
+	{0x00000097, 0x00116fff},
+	{0x00000098, 0x60010000},
+	{0x00000099, 0x10010000},
+	{0x0000009a, 0x00006000},
+	{0x0000009b, 0x00001000},
+	{0x0000009f, 0x00b48000}
+};
+
+#define HAWAII_IO_MC_REGS_SIZE 22
+
+static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
+{
+	{0x0000007d, 0x40000000},
+	{0x0000007e, 0x40180304},
+	{0x0000007f, 0x0000ff00},
+	{0x00000081, 0x00000000},
+	{0x00000083, 0x00000800},
+	{0x00000086, 0x00000000},
+	{0x00000087, 0x00000100},
+	{0x00000088, 0x00020100},
+	{0x00000089, 0x00000000},
+	{0x0000008b, 0x00040000},
+	{0x0000008c, 0x00000100},
+	{0x0000008e, 0xff010000},
+	{0x00000090, 0xffffefff},
+	{0x00000091, 0xfff3efff},
+	{0x00000092, 0xfff3efbf},
+	{0x00000093, 0xf7ffffff},
+	{0x00000094, 0xffffff7f},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x0000009f, 0x00c79000}
+};
+
+
+/**
+ * cik_srbm_select - select specific register instances
+ *
+ * @rdev: radeon_device pointer
+ * @me: selected ME (micro engine)
+ * @pipe: pipe
+ * @queue: queue
+ * @vmid: VMID
+ *
+ * Switches the currently active registers instances.  Some
+ * registers are instanced per VMID, others are instanced per
+ * me/pipe/queue combination.
+ */
+static void cik_srbm_select(struct radeon_device *rdev,
+			    u32 me, u32 pipe, u32 queue, u32 vmid)
+{
+	u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
+			     MEID(me & 0x3) |
+			     VMID(vmid & 0xf) |
+			     QUEUEID(queue & 0x7));
+	WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
+}
+
+/* ucode loading */
+/**
+ * ci_mc_load_microcode - load MC ucode into the hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Load the GDDR MC ucode into the hw (CIK).
+ * Returns 0 on success, error on failure.
+ */
+int ci_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data = NULL;
+	const __le32 *new_fw_data = NULL;
+	u32 running, tmp;
+	u32 *io_mc_regs = NULL;
+	const __le32 *new_io_mc_regs = NULL;
+	int i, regs_size, ucode_size;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	if (rdev->new_fw) {
+		const struct mc_firmware_header_v1_0 *hdr =
+			(const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
+
+		radeon_ucode_print_mc_hdr(&hdr->header);
+
+		regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+		new_io_mc_regs = (const __le32 *)
+			(rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+		new_fw_data = (const __le32 *)
+			(rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+	} else {
+		ucode_size = rdev->mc_fw->size / 4;
+
+		switch (rdev->family) {
+		case CHIP_BONAIRE:
+			io_mc_regs = (u32 *)&bonaire_io_mc_regs;
+			regs_size = BONAIRE_IO_MC_REGS_SIZE;
+			break;
+		case CHIP_HAWAII:
+			io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+			regs_size = HAWAII_IO_MC_REGS_SIZE;
+			break;
+		default:
+			return -EINVAL;
+		}
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+	}
+
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if (running == 0) {
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			if (rdev->new_fw) {
+				WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+				WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+			} else {
+				WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+				WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+			}
+		}
+
+		tmp = RREG32(MC_SEQ_MISC0);
+		if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
+		}
+
+		/* load the MC ucode */
+		for (i = 0; i < ucode_size; i++) {
+			if (rdev->new_fw)
+				WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+			else
+				WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+		}
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+				break;
+			udelay(1);
+		}
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+				break;
+			udelay(1);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cik_init_microcode - load ucode images from disk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int cik_init_microcode(struct radeon_device *rdev)
+{
+	const char *chip_name;
+	const char *new_chip_name;
+	size_t pfp_req_size, me_req_size, ce_req_size,
+		mec_req_size, rlc_req_size, mc_req_size = 0,
+		sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
+	char fw_name[30];
+	int new_fw = 0;
+	int err;
+	int num_fw;
+	bool new_smc = false;
+
+	DRM_DEBUG("\n");
+
+	switch (rdev->family) {
+	case CHIP_BONAIRE:
+		chip_name = "BONAIRE";
+		if ((rdev->pdev->revision == 0x80) ||
+		    (rdev->pdev->revision == 0x81) ||
+		    (rdev->pdev->device == 0x665f))
+			new_smc = true;
+		new_chip_name = "bonaire";
+		pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+		me_req_size = CIK_ME_UCODE_SIZE * 4;
+		ce_req_size = CIK_CE_UCODE_SIZE * 4;
+		mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+		rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
+		mc_req_size = BONAIRE_MC_UCODE_SIZE * 4;
+		mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
+		sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
+		num_fw = 8;
+		break;
+	case CHIP_HAWAII:
+		chip_name = "HAWAII";
+		if (rdev->pdev->revision == 0x80)
+			new_smc = true;
+		new_chip_name = "hawaii";
+		pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+		me_req_size = CIK_ME_UCODE_SIZE * 4;
+		ce_req_size = CIK_CE_UCODE_SIZE * 4;
+		mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+		rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
+		mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
+		mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
+		sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+		num_fw = 8;
+		break;
+	case CHIP_KAVERI:
+		chip_name = "KAVERI";
+		new_chip_name = "kaveri";
+		pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+		me_req_size = CIK_ME_UCODE_SIZE * 4;
+		ce_req_size = CIK_CE_UCODE_SIZE * 4;
+		mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+		rlc_req_size = KV_RLC_UCODE_SIZE * 4;
+		sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+		num_fw = 7;
+		break;
+	case CHIP_KABINI:
+		chip_name = "KABINI";
+		new_chip_name = "kabini";
+		pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+		me_req_size = CIK_ME_UCODE_SIZE * 4;
+		ce_req_size = CIK_CE_UCODE_SIZE * 4;
+		mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+		rlc_req_size = KB_RLC_UCODE_SIZE * 4;
+		sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+		num_fw = 6;
+		break;
+	case CHIP_MULLINS:
+		chip_name = "MULLINS";
+		new_chip_name = "mullins";
+		pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+		me_req_size = CIK_ME_UCODE_SIZE * 4;
+		ce_req_size = CIK_CE_UCODE_SIZE * 4;
+		mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+		rlc_req_size = ML_RLC_UCODE_SIZE * 4;
+		sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+		num_fw = 6;
+		break;
+	default: BUG();
+	}
+
+	DRM_INFO("Loading %s Microcode\n", new_chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+		err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->pfp_fw->size != pfp_req_size) {
+			pr_err("cik_cp: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->pfp_fw->size, fw_name);
+			err = -EINVAL;
+			goto out;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->pfp_fw);
+		if (err) {
+			pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+		err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->me_fw->size != me_req_size) {
+			pr_err("cik_cp: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->me_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->me_fw);
+		if (err) {
+			pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
+	err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+		err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->ce_fw->size != ce_req_size) {
+			pr_err("cik_cp: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->ce_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->ce_fw);
+		if (err) {
+			pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
+	err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+		err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->mec_fw->size != mec_req_size) {
+			pr_err("cik_cp: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->mec_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->mec_fw);
+		if (err) {
+			pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	if (rdev->family == CHIP_KAVERI) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
+		err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
+		if (err) {
+			goto out;
+		} else {
+			err = radeon_ucode_validate(rdev->mec2_fw);
+			if (err) {
+				goto out;
+			} else {
+				new_fw++;
+			}
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+		err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->rlc_fw->size != rlc_req_size) {
+			pr_err("cik_rlc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->rlc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->rlc_fw);
+		if (err) {
+			pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
+	err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+		err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->sdma_fw->size != sdma_req_size) {
+			pr_err("cik_sdma: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->sdma_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->sdma_fw);
+		if (err) {
+			pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	/* No SMC, MC ucode on APUs */
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+		if (err) {
+			snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+			err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+			if (err) {
+				snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+				err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+				if (err)
+					goto out;
+			}
+			if ((rdev->mc_fw->size != mc_req_size) &&
+			    (rdev->mc_fw->size != mc2_req_size)){
+				pr_err("cik_mc: Bogus length %zu in firmware \"%s\"\n",
+				       rdev->mc_fw->size, fw_name);
+				err = -EINVAL;
+			}
+			DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+		} else {
+			err = radeon_ucode_validate(rdev->mc_fw);
+			if (err) {
+				pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+				       fw_name);
+				goto out;
+			} else {
+				new_fw++;
+			}
+		}
+
+		if (new_smc)
+			snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+		else
+			snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+		if (err) {
+			snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+			err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+			if (err) {
+				pr_err("smc: error loading firmware \"%s\"\n",
+				       fw_name);
+				release_firmware(rdev->smc_fw);
+				rdev->smc_fw = NULL;
+				err = 0;
+			} else if (rdev->smc_fw->size != smc_req_size) {
+				pr_err("cik_smc: Bogus length %zu in firmware \"%s\"\n",
+				       rdev->smc_fw->size, fw_name);
+				err = -EINVAL;
+			}
+		} else {
+			err = radeon_ucode_validate(rdev->smc_fw);
+			if (err) {
+				pr_err("cik_fw: validation failed for firmware \"%s\"\n",
+				       fw_name);
+				goto out;
+			} else {
+				new_fw++;
+			}
+		}
+	}
+
+	if (new_fw == 0) {
+		rdev->new_fw = false;
+	} else if (new_fw < num_fw) {
+		pr_err("ci_fw: mixing new and old firmware!\n");
+		err = -EINVAL;
+	} else {
+		rdev->new_fw = true;
+	}
+
+out:
+	if (err) {
+		if (err != -EINVAL)
+			pr_err("cik_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->ce_fw);
+		rdev->ce_fw = NULL;
+		release_firmware(rdev->mec_fw);
+		rdev->mec_fw = NULL;
+		release_firmware(rdev->mec2_fw);
+		rdev->mec2_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->sdma_fw);
+		rdev->sdma_fw = NULL;
+		release_firmware(rdev->mc_fw);
+		rdev->mc_fw = NULL;
+		release_firmware(rdev->smc_fw);
+		rdev->smc_fw = NULL;
+	}
+	return err;
+}
+
+/*
+ * Core functions
+ */
+/**
+ * cik_tiling_mode_table_init - init the hw tiling table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Starting with SI, the tiling setup is done globally in a
+ * set of 32 tiling modes.  Rather than selecting each set of
+ * parameters per surface as on older asics, we just select
+ * which index in the tiling table we want to use, and the
+ * surface uses those parameters (CIK).
+ */
+static void cik_tiling_mode_table_init(struct radeon_device *rdev)
+{
+	u32 *tile = rdev->config.cik.tile_mode_array;
+	u32 *macrotile = rdev->config.cik.macrotile_mode_array;
+	const u32 num_tile_mode_states =
+			ARRAY_SIZE(rdev->config.cik.tile_mode_array);
+	const u32 num_secondary_tile_mode_states =
+			ARRAY_SIZE(rdev->config.cik.macrotile_mode_array);
+	u32 reg_offset, split_equal_to_row_size;
+	u32 num_pipe_configs;
+	u32 num_rbs = rdev->config.cik.max_backends_per_se *
+		rdev->config.cik.max_shader_engines;
+
+	switch (rdev->config.cik.mem_row_size_in_kb) {
+	case 1:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+		break;
+	case 2:
+	default:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+		break;
+	case 4:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+		break;
+	}
+
+	num_pipe_configs = rdev->config.cik.max_tile_pipes;
+	if (num_pipe_configs > 8)
+		num_pipe_configs = 16;
+
+	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+		tile[reg_offset] = 0;
+	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+		macrotile[reg_offset] = 0;
+
+	switch(num_pipe_configs) {
+	case 16:
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_2_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_2_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_2_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	case 8:
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_2_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_2_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	case 4:
+		if (num_rbs == 4) {
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+		} else if (num_rbs < 4) {
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		}
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	case 2:
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P2);
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2));
+		tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	default:
+		DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
+	}
+}
+
+/**
+ * cik_select_se_sh - select which SE, SH to address
+ *
+ * @rdev: radeon_device pointer
+ * @se_num: shader engine to address
+ * @sh_num: sh block to address
+ *
+ * Select which SE, SH combinations to address. Certain
+ * registers are instanced per SE or SH.  0xffffffff means
+ * broadcast to all SEs or SHs (CIK).
+ */
+static void cik_select_se_sh(struct radeon_device *rdev,
+			     u32 se_num, u32 sh_num)
+{
+	u32 data = INSTANCE_BROADCAST_WRITES;
+
+	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+	else if (se_num == 0xffffffff)
+		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+	else if (sh_num == 0xffffffff)
+		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+	else
+		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+	WREG32(GRBM_GFX_INDEX, data);
+}
+
+/**
+ * cik_create_bitmask - create a bitmask
+ *
+ * @bit_width: length of the mask
+ *
+ * create a variable length bit mask (CIK).
+ * Returns the bitmask.
+ */
+static u32 cik_create_bitmask(u32 bit_width)
+{
+	u32 i, mask = 0;
+
+	for (i = 0; i < bit_width; i++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+	return mask;
+}
+
+/**
+ * cik_get_rb_disabled - computes the mask of disabled RBs
+ *
+ * @rdev: radeon_device pointer
+ * @max_rb_num: max RBs (render backends) for the asic
+ * @se_num: number of SEs (shader engines) for the asic
+ * @sh_per_se: number of SH blocks per SE for the asic
+ *
+ * Calculates the bitmask of disabled RBs (CIK).
+ * Returns the disabled RB bitmask.
+ */
+static u32 cik_get_rb_disabled(struct radeon_device *rdev,
+			      u32 max_rb_num_per_se,
+			      u32 sh_per_se)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_RB_BACKEND_DISABLE);
+	if (data & 1)
+		data &= BACKEND_DISABLE_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+	data >>= BACKEND_DISABLE_SHIFT;
+
+	mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+	return data & mask;
+}
+
+/**
+ * cik_setup_rb - setup the RBs on the asic
+ *
+ * @rdev: radeon_device pointer
+ * @se_num: number of SEs (shader engines) for the asic
+ * @sh_per_se: number of SH blocks per SE for the asic
+ * @max_rb_num: max RBs (render backends) for the asic
+ *
+ * Configures per-SE/SH RB registers (CIK).
+ */
+static void cik_setup_rb(struct radeon_device *rdev,
+			 u32 se_num, u32 sh_per_se,
+			 u32 max_rb_num_per_se)
+{
+	int i, j;
+	u32 data, mask;
+	u32 disabled_rbs = 0;
+	u32 enabled_rbs = 0;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			cik_select_se_sh(rdev, i, j);
+			data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
+			if (rdev->family == CHIP_HAWAII)
+				disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
+			else
+				disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+		}
+	}
+	cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	mask = 1;
+	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+		if (!(disabled_rbs & mask))
+			enabled_rbs |= mask;
+		mask <<= 1;
+	}
+
+	rdev->config.cik.backend_enable_mask = enabled_rbs;
+
+	for (i = 0; i < se_num; i++) {
+		cik_select_se_sh(rdev, i, 0xffffffff);
+		data = 0;
+		for (j = 0; j < sh_per_se; j++) {
+			switch (enabled_rbs & 3) {
+			case 0:
+				if (j == 0)
+					data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
+				else
+					data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
+				break;
+			case 1:
+				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+				break;
+			case 2:
+				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+				break;
+			case 3:
+			default:
+				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+				break;
+			}
+			enabled_rbs >>= 2;
+		}
+		WREG32(PA_SC_RASTER_CONFIG, data);
+	}
+	cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+/**
+ * cik_gpu_init - setup the 3D engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Configures the 3D engine and tiling configuration
+ * registers so that the 3D engine is usable.
+ */
+static void cik_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+	int i, j;
+
+	switch (rdev->family) {
+	case CHIP_BONAIRE:
+		rdev->config.cik.max_shader_engines = 2;
+		rdev->config.cik.max_tile_pipes = 4;
+		rdev->config.cik.max_cu_per_sh = 7;
+		rdev->config.cik.max_sh_per_se = 1;
+		rdev->config.cik.max_backends_per_se = 2;
+		rdev->config.cik.max_texture_channel_caches = 4;
+		rdev->config.cik.max_gprs = 256;
+		rdev->config.cik.max_gs_threads = 32;
+		rdev->config.cik.max_hw_contexts = 8;
+
+		rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_HAWAII:
+		rdev->config.cik.max_shader_engines = 4;
+		rdev->config.cik.max_tile_pipes = 16;
+		rdev->config.cik.max_cu_per_sh = 11;
+		rdev->config.cik.max_sh_per_se = 1;
+		rdev->config.cik.max_backends_per_se = 4;
+		rdev->config.cik.max_texture_channel_caches = 16;
+		rdev->config.cik.max_gprs = 256;
+		rdev->config.cik.max_gs_threads = 32;
+		rdev->config.cik.max_hw_contexts = 8;
+
+		rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_KAVERI:
+		rdev->config.cik.max_shader_engines = 1;
+		rdev->config.cik.max_tile_pipes = 4;
+		rdev->config.cik.max_cu_per_sh = 8;
+		rdev->config.cik.max_backends_per_se = 2;
+		rdev->config.cik.max_sh_per_se = 1;
+		rdev->config.cik.max_texture_channel_caches = 4;
+		rdev->config.cik.max_gprs = 256;
+		rdev->config.cik.max_gs_threads = 16;
+		rdev->config.cik.max_hw_contexts = 8;
+
+		rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+	default:
+		rdev->config.cik.max_shader_engines = 1;
+		rdev->config.cik.max_tile_pipes = 2;
+		rdev->config.cik.max_cu_per_sh = 2;
+		rdev->config.cik.max_sh_per_se = 1;
+		rdev->config.cik.max_backends_per_se = 1;
+		rdev->config.cik.max_texture_channel_caches = 2;
+		rdev->config.cik.max_gprs = 256;
+		rdev->config.cik.max_gs_threads = 16;
+		rdev->config.cik.max_hw_contexts = 8;
+
+		rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 0x1);
+	WREG32(SRBM_INT_ACK, 0x1);
+
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
+	rdev->config.cik.mem_max_burst_length_bytes = 256;
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (rdev->config.cik.mem_row_size_in_kb > 4)
+		rdev->config.cik.mem_row_size_in_kb = 4;
+	/* XXX use MC settings? */
+	rdev->config.cik.shader_engine_tile_size = 32;
+	rdev->config.cik.num_gpus = 1;
+	rdev->config.cik.multi_gpu_tile_size = 64;
+
+	/* fix up row size */
+	gb_addr_config &= ~ROW_SIZE_MASK;
+	switch (rdev->config.cik.mem_row_size_in_kb) {
+	case 1:
+	default:
+		gb_addr_config |= ROW_SIZE(0);
+		break;
+	case 2:
+		gb_addr_config |= ROW_SIZE(1);
+		break;
+	case 4:
+		gb_addr_config |= ROW_SIZE(2);
+		break;
+	}
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.cik.tile_config = 0;
+	switch (rdev->config.cik.num_tile_pipes) {
+	case 1:
+		rdev->config.cik.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.cik.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.cik.tile_config |= (2 << 0);
+		break;
+	case 8:
+	default:
+		/* XXX what about 12? */
+		rdev->config.cik.tile_config |= (3 << 0);
+		break;
+	}
+	rdev->config.cik.tile_config |=
+		((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+	rdev->config.cik.tile_config |=
+		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+	rdev->config.cik.tile_config |=
+		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
+	WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
+	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+	cik_tiling_mode_table_init(rdev);
+
+	cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
+		     rdev->config.cik.max_sh_per_se,
+		     rdev->config.cik.max_backends_per_se);
+
+	rdev->config.cik.active_cus = 0;
+	for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
+		for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
+			rdev->config.cik.active_cus +=
+				hweight32(cik_get_cu_active_bitmap(rdev, i, j));
+		}
+	}
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	WREG32(SX_DEBUG_1, 0x20);
+
+	WREG32(TA_CNTL_AUX, 0x00010000);
+
+	tmp = RREG32(SPI_CONFIG_CNTL);
+	tmp |= 0x03000000;
+	WREG32(SPI_CONFIG_CNTL, tmp);
+
+	WREG32(SQ_CONFIG, 1);
+
+	WREG32(DB_DEBUG, 0);
+
+	tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
+	tmp |= 0x00000400;
+	WREG32(DB_DEBUG2, tmp);
+
+	tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
+	tmp |= 0x00020200;
+	WREG32(DB_DEBUG3, tmp);
+
+	tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
+	tmp |= 0x00018208;
+	WREG32(CB_HW_CONTROL, tmp);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
+				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_CONFIG, 0);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+	WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
+
+	udelay(50);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+/**
+ * cik_scratch_init - setup driver info for CP scratch regs
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the number and offset of the CP scratch registers.
+ * NOTE: use of CP scratch registers is a legacy inferface and
+ * is not used by default on newer asics (r6xx+).  On newer asics,
+ * memory buffers are used for fences rather than scratch regs.
+ */
+static void cik_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->scratch.num_reg = 7;
+	rdev->scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+/**
+ * cik_ring_test - basic gfx ring test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Allocate a scratch register and write to it using the gfx ring (CIK).
+ * Provides a basic gfx ring test to verify that the ring is working.
+ * Used by cik_cp_gfx_resume();
+ * Returns 0 on success, error on failure.
+ */
+int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 3);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+	radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+			  ring->idx, scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+/**
+ * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emits an hdp flush on the cp.
+ */
+static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
+				       int ridx)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	u32 ref_and_mask;
+
+	switch (ring->idx) {
+	case CAYMAN_RING_TYPE_CP1_INDEX:
+	case CAYMAN_RING_TYPE_CP2_INDEX:
+	default:
+		switch (ring->me) {
+		case 0:
+			ref_and_mask = CP2 << ring->pipe;
+			break;
+		case 1:
+			ref_and_mask = CP6 << ring->pipe;
+			break;
+		default:
+			return;
+		}
+		break;
+	case RADEON_RING_TYPE_GFX_INDEX:
+		ref_and_mask = CP0;
+		break;
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
+				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
+				 WAIT_REG_MEM_ENGINE(1)));   /* pfp */
+	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+	radeon_ring_write(ring, ref_and_mask);
+	radeon_ring_write(ring, ref_and_mask);
+	radeon_ring_write(ring, 0x20); /* poll interval */
+}
+
+/**
+ * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Emits a fence sequnce number on the gfx ring and flushes
+ * GPU caches.
+ */
+void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
+			     struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* Workaround for cache flush problems. First send a dummy EOP
+	 * event down the pipe with seq one below.
+	 */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+				 EOP_TC_ACTION_EN |
+				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+				 EVENT_INDEX(5)));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+				DATA_SEL(1) | INT_SEL(0));
+	radeon_ring_write(ring, fence->seq - 1);
+	radeon_ring_write(ring, 0);
+
+	/* Then send the real EOP event down the pipe. */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+				 EOP_TC_ACTION_EN |
+				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+				 EVENT_INDEX(5)));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+/**
+ * cik_fence_compute_ring_emit - emit a fence on the compute ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Emits a fence sequnce number on the compute ring and flushes
+ * GPU caches.
+ */
+void cik_fence_compute_ring_emit(struct radeon_device *rdev,
+				 struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* RELEASE_MEM - flush caches, send int */
+	radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
+	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+				 EOP_TC_ACTION_EN |
+				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+				 EVENT_INDEX(5)));
+	radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(addr));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+/**
+ * cik_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+	radeon_ring_write(ring, lower_32_bits(addr));
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+
+	if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+		/* Prevent the PFP from running ahead of the semaphore wait */
+		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+		radeon_ring_write(ring, 0x0);
+	}
+
+	return true;
+}
+
+/**
+ * cik_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @resv: reservation object to sync to
+ *
+ * Copy GPU paging using the CP DMA engine (CIK+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
+				    uint64_t src_offset, uint64_t dst_offset,
+				    unsigned num_gpu_pages,
+				    struct reservation_object *resv)
+{
+	struct radeon_fence *fence;
+	struct radeon_sync sync;
+	int ring_index = rdev->asic->copy.blit_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes, control;
+	int i, num_loops;
+	int r = 0;
+
+	radeon_sync_create(&sync);
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_rings(rdev, &sync, ring->idx);
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0x1fffff)
+			cur_size_in_bytes = 0x1fffff;
+		size_in_bytes -= cur_size_in_bytes;
+		control = 0;
+		if (size_in_bytes == 0)
+			control |= PACKET3_DMA_DATA_CP_SYNC;
+		radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
+		radeon_ring_write(ring, control);
+		radeon_ring_write(ring, lower_32_bits(src_offset));
+		radeon_ring_write(ring, upper_32_bits(src_offset));
+		radeon_ring_write(ring, lower_32_bits(dst_offset));
+		radeon_ring_write(ring, upper_32_bits(dst_offset));
+		radeon_ring_write(ring, cur_size_in_bytes);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_sync_free(rdev, &sync, fence);
+
+	return fence;
+}
+
+/*
+ * IB stuff
+ */
+/**
+ * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: radeon indirect buffer object
+ *
+ * Emits a DE (drawing engine) or CE (constant engine) IB
+ * on the gfx ring.  IBs are usually generated by userspace
+ * acceleration drivers and submitted to the kernel for
+ * scheduling on the ring.  This function schedules the IB
+ * on the gfx ring for execution by the GPU.
+ */
+void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
+	u32 header, control = INDIRECT_BUFFER_VALID;
+
+	if (ib->is_const_ib) {
+		/* set switch buffer packet before const IB */
+		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		radeon_ring_write(ring, 0);
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+	} else {
+		u32 next_rptr;
+		if (ring->rptr_save_reg) {
+			next_rptr = ring->wptr + 3 + 4;
+			radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+			radeon_ring_write(ring, ((ring->rptr_save_reg -
+						  PACKET3_SET_UCONFIG_REG_START) >> 2));
+			radeon_ring_write(ring, next_rptr);
+		} else if (rdev->wb.enabled) {
+			next_rptr = ring->wptr + 5 + 4;
+			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+			radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
+			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
+			radeon_ring_write(ring, next_rptr);
+		}
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+	}
+
+	control |= ib->length_dw | (vm_id << 24);
+
+	radeon_ring_write(ring, header);
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+	radeon_ring_write(ring, control);
+}
+
+/**
+ * cik_ib_test - basic gfx ring IB test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (CIK).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
+	ib.ptr[2] = 0xDEADBEEF;
+	ib.length_dw = 3;
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		radeon_scratch_free(rdev, scratch);
+		radeon_ib_free(rdev, &ib);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		return r;
+	}
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		radeon_scratch_free(rdev, scratch);
+		radeon_ib_free(rdev, &ib);
+		return r;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		radeon_scratch_free(rdev, scratch);
+		radeon_ib_free(rdev, &ib);
+		return -ETIMEDOUT;
+	}
+	r = 0;
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	radeon_ib_free(rdev, &ib);
+	return r;
+}
+
+/*
+ * CP.
+ * On CIK, gfx and compute now have independant command processors.
+ *
+ * GFX
+ * Gfx consists of a single ring and can process both gfx jobs and
+ * compute jobs.  The gfx CP consists of three microengines (ME):
+ * PFP - Pre-Fetch Parser
+ * ME - Micro Engine
+ * CE - Constant Engine
+ * The PFP and ME make up what is considered the Drawing Engine (DE).
+ * The CE is an asynchronous engine used for updating buffer desciptors
+ * used by the DE so that they can be loaded into cache in parallel
+ * while the DE is processing state update packets.
+ *
+ * Compute
+ * The compute CP consists of two microengines (ME):
+ * MEC1 - Compute MicroEngine 1
+ * MEC2 - Compute MicroEngine 2
+ * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
+ * The queues are exposed to userspace and are programmed directly
+ * by the compute runtime.
+ */
+/**
+ * cik_cp_gfx_enable - enable/disable the gfx CP MEs
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable or disable the MEs
+ *
+ * Halts or unhalts the gfx MEs.
+ */
+static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	}
+	udelay(50);
+}
+
+/**
+ * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Loads the gfx PFP, ME, and CE ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
+{
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
+		return -EINVAL;
+
+	cik_cp_gfx_enable(rdev, false);
+
+	if (rdev->new_fw) {
+		const struct gfx_firmware_header_v1_0 *pfp_hdr =
+			(const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+		const struct gfx_firmware_header_v1_0 *ce_hdr =
+			(const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+		const struct gfx_firmware_header_v1_0 *me_hdr =
+			(const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+		const __le32 *fw_data;
+		u32 fw_size;
+
+		radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+		radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+		radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+		/* PFP */
+		fw_data = (const __le32 *)
+			(rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+		WREG32(CP_PFP_UCODE_ADDR, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+		WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version));
+
+		/* CE */
+		fw_data = (const __le32 *)
+			(rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+		WREG32(CP_CE_UCODE_ADDR, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+		WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version));
+
+		/* ME */
+		fw_data = (const __be32 *)
+			(rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+		WREG32(CP_ME_RAM_WADDR, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+		WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version));
+		WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version));
+	} else {
+		const __be32 *fw_data;
+
+		/* PFP */
+		fw_data = (const __be32 *)rdev->pfp_fw->data;
+		WREG32(CP_PFP_UCODE_ADDR, 0);
+		for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
+			WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+		WREG32(CP_PFP_UCODE_ADDR, 0);
+
+		/* CE */
+		fw_data = (const __be32 *)rdev->ce_fw->data;
+		WREG32(CP_CE_UCODE_ADDR, 0);
+		for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
+			WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+		WREG32(CP_CE_UCODE_ADDR, 0);
+
+		/* ME */
+		fw_data = (const __be32 *)rdev->me_fw->data;
+		WREG32(CP_ME_RAM_WADDR, 0);
+		for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
+			WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+		WREG32(CP_ME_RAM_WADDR, 0);
+	}
+
+	return 0;
+}
+
+/**
+ * cik_cp_gfx_start - start the gfx ring
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Enables the ring and loads the clear state context and other
+ * packets required to init the ring.
+ * Returns 0 for success, error for failure.
+ */
+static int cik_cp_gfx_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+
+	/* init the CP */
+	WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
+	WREG32(CP_ENDIAN_SWAP, 0);
+	WREG32(CP_DEVICE_ID, 1);
+
+	cik_cp_gfx_enable(rdev, true);
+
+	r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* init the CE partitions.  CE only used for gfx on CIK */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+	radeon_ring_write(ring, 0x8000);
+	radeon_ring_write(ring, 0x8000);
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	radeon_ring_write(ring, 0x80000000);
+	radeon_ring_write(ring, 0x80000000);
+
+	for (i = 0; i < cik_default_size; i++)
+		radeon_ring_write(ring, cik_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	return 0;
+}
+
+/**
+ * cik_cp_gfx_fini - stop the gfx ring
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the gfx ring and tear down the driver ring
+ * info.
+ */
+static void cik_cp_gfx_fini(struct radeon_device *rdev)
+{
+	cik_cp_gfx_enable(rdev, false);
+	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+}
+
+/**
+ * cik_cp_gfx_resume - setup the gfx ring buffer registers
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Program the location and size of the gfx ring buffer
+ * and test it to make sure it's working.
+ * Returns 0 for success, error for failure.
+ */
+static int cik_cp_gfx_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	u64 rb_addr;
+	int r;
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	if (rdev->family != CHIP_HAWAII)
+		WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* set the RB to use vmid 0 */
+	WREG32(CP_RB_VMID, 0);
+
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	/* ring 0 - compute and gfx */
+	/* Set ring buffer size */
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB0_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB0_WPTR, ring->wptr);
+
+	/* set the wb address wether it's enabled or not */
+	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+
+	/* scratch register shadowing is no longer supported */
+	WREG32(SCRATCH_UMSK, 0);
+
+	if (!rdev->wb.enabled)
+		tmp |= RB_NO_UPDATE;
+
+	mdelay(1);
+	WREG32(CP_RB0_CNTL, tmp);
+
+	rb_addr = ring->gpu_addr >> 8;
+	WREG32(CP_RB0_BASE, rb_addr);
+	WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
+
+	/* start the ring */
+	cik_cp_gfx_start(rdev);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	if (r) {
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		return r;
+	}
+
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else
+		rptr = RREG32(CP_RB0_RPTR);
+
+	return rptr;
+}
+
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring)
+{
+	return RREG32(CP_RB0_WPTR);
+}
+
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	WREG32(CP_RB0_WPTR, ring->wptr);
+	(void)RREG32(CP_RB0_WPTR);
+}
+
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled) {
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	} else {
+		mutex_lock(&rdev->srbm_mutex);
+		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+		rptr = RREG32(CP_HQD_PQ_RPTR);
+		cik_srbm_select(rdev, 0, 0, 0, 0);
+		mutex_unlock(&rdev->srbm_mutex);
+	}
+
+	return rptr;
+}
+
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
+{
+	u32 wptr;
+
+	if (rdev->wb.enabled) {
+		/* XXX check if swapping is necessary on BE */
+		wptr = rdev->wb.wb[ring->wptr_offs/4];
+	} else {
+		mutex_lock(&rdev->srbm_mutex);
+		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+		wptr = RREG32(CP_HQD_PQ_WPTR);
+		cik_srbm_select(rdev, 0, 0, 0, 0);
+		mutex_unlock(&rdev->srbm_mutex);
+	}
+
+	return wptr;
+}
+
+void cik_compute_set_wptr(struct radeon_device *rdev,
+			  struct radeon_ring *ring)
+{
+	/* XXX check if swapping is necessary on BE */
+	rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
+	WDOORBELL32(ring->doorbell_index, ring->wptr);
+}
+
+static void cik_compute_stop(struct radeon_device *rdev,
+			     struct radeon_ring *ring)
+{
+	u32 j, tmp;
+
+	cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+	/* Disable wptr polling. */
+	tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+	tmp &= ~WPTR_POLL_EN;
+	WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+	/* Disable HQD. */
+	if (RREG32(CP_HQD_ACTIVE) & 1) {
+		WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+		for (j = 0; j < rdev->usec_timeout; j++) {
+			if (!(RREG32(CP_HQD_ACTIVE) & 1))
+				break;
+			udelay(1);
+		}
+		WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
+		WREG32(CP_HQD_PQ_RPTR, 0);
+		WREG32(CP_HQD_PQ_WPTR, 0);
+	}
+	cik_srbm_select(rdev, 0, 0, 0, 0);
+}
+
+/**
+ * cik_cp_compute_enable - enable/disable the compute CP MEs
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable or disable the MEs
+ *
+ * Halts or unhalts the compute MEs.
+ */
+static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_MEC_CNTL, 0);
+	else {
+		/*
+		 * To make hibernation reliable we need to clear compute ring
+		 * configuration before halting the compute ring.
+		 */
+		mutex_lock(&rdev->srbm_mutex);
+		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+		mutex_unlock(&rdev->srbm_mutex);
+
+		WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	}
+	udelay(50);
+}
+
+/**
+ * cik_cp_compute_load_microcode - load the compute CP ME ucode
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Loads the compute MEC1&2 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
+{
+	int i;
+
+	if (!rdev->mec_fw)
+		return -EINVAL;
+
+	cik_cp_compute_enable(rdev, false);
+
+	if (rdev->new_fw) {
+		const struct gfx_firmware_header_v1_0 *mec_hdr =
+			(const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+		const __le32 *fw_data;
+		u32 fw_size;
+
+		radeon_ucode_print_gfx_hdr(&mec_hdr->header);
+
+		/* MEC1 */
+		fw_data = (const __le32 *)
+			(rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+		WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
+		WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version));
+
+		/* MEC2 */
+		if (rdev->family == CHIP_KAVERI) {
+			const struct gfx_firmware_header_v1_0 *mec2_hdr =
+				(const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+
+			fw_data = (const __le32 *)
+				(rdev->mec2_fw->data +
+				 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+			fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+			WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+			for (i = 0; i < fw_size; i++)
+				WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
+			WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version));
+		}
+	} else {
+		const __be32 *fw_data;
+
+		/* MEC1 */
+		fw_data = (const __be32 *)rdev->mec_fw->data;
+		WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+		for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
+			WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
+		WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+
+		if (rdev->family == CHIP_KAVERI) {
+			/* MEC2 */
+			fw_data = (const __be32 *)rdev->mec_fw->data;
+			WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+			for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
+				WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
+			WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cik_cp_compute_start - start the compute queues
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Enable the compute queues.
+ * Returns 0 for success, error for failure.
+ */
+static int cik_cp_compute_start(struct radeon_device *rdev)
+{
+	cik_cp_compute_enable(rdev, true);
+
+	return 0;
+}
+
+/**
+ * cik_cp_compute_fini - stop the compute queues
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the compute queues and tear down the driver queue
+ * info.
+ */
+static void cik_cp_compute_fini(struct radeon_device *rdev)
+{
+	int i, idx, r;
+
+	cik_cp_compute_enable(rdev, false);
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			idx = CAYMAN_RING_TYPE_CP1_INDEX;
+		else
+			idx = CAYMAN_RING_TYPE_CP2_INDEX;
+
+		if (rdev->ring[idx].mqd_obj) {
+			r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
+			if (unlikely(r != 0))
+				dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
+
+			radeon_bo_unpin(rdev->ring[idx].mqd_obj);
+			radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
+
+			radeon_bo_unref(&rdev->ring[idx].mqd_obj);
+			rdev->ring[idx].mqd_obj = NULL;
+		}
+	}
+}
+
+static void cik_mec_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->mec.hpd_eop_obj) {
+		r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
+		radeon_bo_unpin(rdev->mec.hpd_eop_obj);
+		radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
+
+		radeon_bo_unref(&rdev->mec.hpd_eop_obj);
+		rdev->mec.hpd_eop_obj = NULL;
+	}
+}
+
+#define MEC_HPD_SIZE 2048
+
+static int cik_mec_init(struct radeon_device *rdev)
+{
+	int r;
+	u32 *hpd;
+
+	/*
+	 * KV:    2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
+	 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
+	 */
+	if (rdev->family == CHIP_KAVERI)
+		rdev->mec.num_mec = 2;
+	else
+		rdev->mec.num_mec = 1;
+	rdev->mec.num_pipe = 4;
+	rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
+
+	if (rdev->mec.hpd_eop_obj == NULL) {
+		r = radeon_bo_create(rdev,
+				     rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
+				     PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
+				     &rdev->mec.hpd_eop_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
+			return r;
+		}
+	}
+
+	r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
+	if (unlikely(r != 0)) {
+		cik_mec_fini(rdev);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
+			  &rdev->mec.hpd_eop_gpu_addr);
+	if (r) {
+		dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
+		cik_mec_fini(rdev);
+		return r;
+	}
+	r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
+	if (r) {
+		dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
+		cik_mec_fini(rdev);
+		return r;
+	}
+
+	/* clear memory.  Not sure if this is required or not */
+	memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
+
+	radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
+	radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
+
+	return 0;
+}
+
+struct hqd_registers
+{
+	u32 cp_mqd_base_addr;
+	u32 cp_mqd_base_addr_hi;
+	u32 cp_hqd_active;
+	u32 cp_hqd_vmid;
+	u32 cp_hqd_persistent_state;
+	u32 cp_hqd_pipe_priority;
+	u32 cp_hqd_queue_priority;
+	u32 cp_hqd_quantum;
+	u32 cp_hqd_pq_base;
+	u32 cp_hqd_pq_base_hi;
+	u32 cp_hqd_pq_rptr;
+	u32 cp_hqd_pq_rptr_report_addr;
+	u32 cp_hqd_pq_rptr_report_addr_hi;
+	u32 cp_hqd_pq_wptr_poll_addr;
+	u32 cp_hqd_pq_wptr_poll_addr_hi;
+	u32 cp_hqd_pq_doorbell_control;
+	u32 cp_hqd_pq_wptr;
+	u32 cp_hqd_pq_control;
+	u32 cp_hqd_ib_base_addr;
+	u32 cp_hqd_ib_base_addr_hi;
+	u32 cp_hqd_ib_rptr;
+	u32 cp_hqd_ib_control;
+	u32 cp_hqd_iq_timer;
+	u32 cp_hqd_iq_rptr;
+	u32 cp_hqd_dequeue_request;
+	u32 cp_hqd_dma_offload;
+	u32 cp_hqd_sema_cmd;
+	u32 cp_hqd_msg_type;
+	u32 cp_hqd_atomic0_preop_lo;
+	u32 cp_hqd_atomic0_preop_hi;
+	u32 cp_hqd_atomic1_preop_lo;
+	u32 cp_hqd_atomic1_preop_hi;
+	u32 cp_hqd_hq_scheduler0;
+	u32 cp_hqd_hq_scheduler1;
+	u32 cp_mqd_control;
+};
+
+struct bonaire_mqd
+{
+	u32 header;
+	u32 dispatch_initiator;
+	u32 dimensions[3];
+	u32 start_idx[3];
+	u32 num_threads[3];
+	u32 pipeline_stat_enable;
+	u32 perf_counter_enable;
+	u32 pgm[2];
+	u32 tba[2];
+	u32 tma[2];
+	u32 pgm_rsrc[2];
+	u32 vmid;
+	u32 resource_limits;
+	u32 static_thread_mgmt01[2];
+	u32 tmp_ring_size;
+	u32 static_thread_mgmt23[2];
+	u32 restart[3];
+	u32 thread_trace_enable;
+	u32 reserved1;
+	u32 user_data[16];
+	u32 vgtcs_invoke_count[2];
+	struct hqd_registers queue_state;
+	u32 dequeue_cntr;
+	u32 interrupt_queue[64];
+};
+
+/**
+ * cik_cp_compute_resume - setup the compute queue registers
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Program the compute queues and test them to make sure they
+ * are working.
+ * Returns 0 for success, error for failure.
+ */
+static int cik_cp_compute_resume(struct radeon_device *rdev)
+{
+	int r, i, j, idx;
+	u32 tmp;
+	bool use_doorbell = true;
+	u64 hqd_gpu_addr;
+	u64 mqd_gpu_addr;
+	u64 eop_gpu_addr;
+	u64 wb_gpu_addr;
+	u32 *buf;
+	struct bonaire_mqd *mqd;
+
+	r = cik_cp_compute_start(rdev);
+	if (r)
+		return r;
+
+	/* fix up chicken bits */
+	tmp = RREG32(CP_CPF_DEBUG);
+	tmp |= (1 << 23);
+	WREG32(CP_CPF_DEBUG, tmp);
+
+	/* init the pipes */
+	mutex_lock(&rdev->srbm_mutex);
+
+	for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); ++i) {
+		int me = (i < 4) ? 1 : 2;
+		int pipe = (i < 4) ? i : (i - 4);
+
+		cik_srbm_select(rdev, me, pipe, 0, 0);
+
+		eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2) ;
+		/* write the EOP addr */
+		WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
+		WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
+
+		/* set the VMID assigned */
+		WREG32(CP_HPD_EOP_VMID, 0);
+
+		/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+		tmp = RREG32(CP_HPD_EOP_CONTROL);
+		tmp &= ~EOP_SIZE_MASK;
+		tmp |= order_base_2(MEC_HPD_SIZE / 8);
+		WREG32(CP_HPD_EOP_CONTROL, tmp);
+
+	}
+	cik_srbm_select(rdev, 0, 0, 0, 0);
+	mutex_unlock(&rdev->srbm_mutex);
+
+	/* init the queues.  Just two for now. */
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			idx = CAYMAN_RING_TYPE_CP1_INDEX;
+		else
+			idx = CAYMAN_RING_TYPE_CP2_INDEX;
+
+		if (rdev->ring[idx].mqd_obj == NULL) {
+			r = radeon_bo_create(rdev,
+					     sizeof(struct bonaire_mqd),
+					     PAGE_SIZE, true,
+					     RADEON_GEM_DOMAIN_GTT, 0, NULL,
+					     NULL, &rdev->ring[idx].mqd_obj);
+			if (r) {
+				dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
+				return r;
+			}
+		}
+
+		r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
+		if (unlikely(r != 0)) {
+			cik_cp_compute_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
+				  &mqd_gpu_addr);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
+			cik_cp_compute_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
+			cik_cp_compute_fini(rdev);
+			return r;
+		}
+
+		/* init the mqd struct */
+		memset(buf, 0, sizeof(struct bonaire_mqd));
+
+		mqd = (struct bonaire_mqd *)buf;
+		mqd->header = 0xC0310800;
+		mqd->static_thread_mgmt01[0] = 0xffffffff;
+		mqd->static_thread_mgmt01[1] = 0xffffffff;
+		mqd->static_thread_mgmt23[0] = 0xffffffff;
+		mqd->static_thread_mgmt23[1] = 0xffffffff;
+
+		mutex_lock(&rdev->srbm_mutex);
+		cik_srbm_select(rdev, rdev->ring[idx].me,
+				rdev->ring[idx].pipe,
+				rdev->ring[idx].queue, 0);
+
+		/* disable wptr polling */
+		tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+		tmp &= ~WPTR_POLL_EN;
+		WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+
+		/* enable doorbell? */
+		mqd->queue_state.cp_hqd_pq_doorbell_control =
+			RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
+		if (use_doorbell)
+			mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
+		else
+			mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
+		WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
+		       mqd->queue_state.cp_hqd_pq_doorbell_control);
+
+		/* disable the queue if it's active */
+		mqd->queue_state.cp_hqd_dequeue_request = 0;
+		mqd->queue_state.cp_hqd_pq_rptr = 0;
+		mqd->queue_state.cp_hqd_pq_wptr= 0;
+		if (RREG32(CP_HQD_ACTIVE) & 1) {
+			WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (!(RREG32(CP_HQD_ACTIVE) & 1))
+					break;
+				udelay(1);
+			}
+			WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
+			WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
+			WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
+		}
+
+		/* set the pointer to the MQD */
+		mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
+		mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
+		WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
+		WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
+		/* set MQD vmid to 0 */
+		mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
+		mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
+		WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
+
+		/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+		hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
+		mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
+		mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+		WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
+		WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
+
+		/* set up the HQD, this is similar to CP_RB0_CNTL */
+		mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
+		mqd->queue_state.cp_hqd_pq_control &=
+			~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
+
+		mqd->queue_state.cp_hqd_pq_control |=
+			order_base_2(rdev->ring[idx].ring_size / 8);
+		mqd->queue_state.cp_hqd_pq_control |=
+			(order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
+#ifdef __BIG_ENDIAN
+		mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
+#endif
+		mqd->queue_state.cp_hqd_pq_control &=
+			~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
+		mqd->queue_state.cp_hqd_pq_control |=
+			PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
+		WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
+
+		/* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
+		if (i == 0)
+			wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
+		else
+			wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
+		mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
+		mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+		WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
+		WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
+		       mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
+
+		/* set the wb address wether it's enabled or not */
+		if (i == 0)
+			wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
+		else
+			wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
+		mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
+		mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
+			upper_32_bits(wb_gpu_addr) & 0xffff;
+		WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
+		       mqd->queue_state.cp_hqd_pq_rptr_report_addr);
+		WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+		       mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
+
+		/* enable the doorbell if requested */
+		if (use_doorbell) {
+			mqd->queue_state.cp_hqd_pq_doorbell_control =
+				RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
+			mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
+			mqd->queue_state.cp_hqd_pq_doorbell_control |=
+				DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
+			mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
+			mqd->queue_state.cp_hqd_pq_doorbell_control &=
+				~(DOORBELL_SOURCE | DOORBELL_HIT);
+
+		} else {
+			mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
+		}
+		WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
+		       mqd->queue_state.cp_hqd_pq_doorbell_control);
+
+		/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+		rdev->ring[idx].wptr = 0;
+		mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
+		WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
+		mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR);
+
+		/* set the vmid for the queue */
+		mqd->queue_state.cp_hqd_vmid = 0;
+		WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
+
+		/* activate the queue */
+		mqd->queue_state.cp_hqd_active = 1;
+		WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
+
+		cik_srbm_select(rdev, 0, 0, 0, 0);
+		mutex_unlock(&rdev->srbm_mutex);
+
+		radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
+		radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
+
+		rdev->ring[idx].ready = true;
+		r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
+		if (r)
+			rdev->ring[idx].ready = false;
+	}
+
+	return 0;
+}
+
+static void cik_cp_enable(struct radeon_device *rdev, bool enable)
+{
+	cik_cp_gfx_enable(rdev, enable);
+	cik_cp_compute_enable(rdev, enable);
+}
+
+static int cik_cp_load_microcode(struct radeon_device *rdev)
+{
+	int r;
+
+	r = cik_cp_gfx_load_microcode(rdev);
+	if (r)
+		return r;
+	r = cik_cp_compute_load_microcode(rdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static void cik_cp_fini(struct radeon_device *rdev)
+{
+	cik_cp_gfx_fini(rdev);
+	cik_cp_compute_fini(rdev);
+}
+
+static int cik_cp_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	cik_enable_gui_idle_interrupt(rdev, false);
+
+	r = cik_cp_load_microcode(rdev);
+	if (r)
+		return r;
+
+	r = cik_cp_gfx_resume(rdev);
+	if (r)
+		return r;
+	r = cik_cp_compute_resume(rdev);
+	if (r)
+		return r;
+
+	cik_enable_gui_idle_interrupt(rdev, true);
+
+	return 0;
+}
+
+static void cik_print_gpu_status_regs(struct radeon_device *rdev)
+{
+	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS2=0x%08X\n",
+		RREG32(GRBM_STATUS2));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
+		RREG32(GRBM_STATUS_SE2));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
+		RREG32(GRBM_STATUS_SE3));
+	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(SRBM_STATUS));
+	dev_info(rdev->dev, "  SRBM_STATUS2=0x%08X\n",
+		RREG32(SRBM_STATUS2));
+	dev_info(rdev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
+		RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
+	dev_info(rdev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
+		 RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
+	dev_info(rdev->dev, "  CP_STAT = 0x%08x\n", RREG32(CP_STAT));
+	dev_info(rdev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
+		 RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
+		 RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
+		 RREG32(CP_STALLED_STAT3));
+	dev_info(rdev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
+		 RREG32(CP_CPF_BUSY_STAT));
+	dev_info(rdev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
+		 RREG32(CP_CPF_STALLED_STAT1));
+	dev_info(rdev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
+	dev_info(rdev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
+	dev_info(rdev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
+		 RREG32(CP_CPC_STALLED_STAT1));
+	dev_info(rdev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
+}
+
+/**
+ * cik_gpu_check_soft_reset - check which blocks are busy
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check which blocks are busy and return the relevant reset
+ * mask to be used by cik_gpu_soft_reset().
+ * Returns a mask of the blocks to be reset.
+ */
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(GRBM_STATUS);
+	if (tmp & (PA_BUSY | SC_BUSY |
+		   BCI_BUSY | SX_BUSY |
+		   TA_BUSY | VGT_BUSY |
+		   DB_BUSY | CB_BUSY |
+		   GDS_BUSY | SPI_BUSY |
+		   IA_BUSY | IA_BUSY_NO_DMA))
+		reset_mask |= RADEON_RESET_GFX;
+
+	if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
+		reset_mask |= RADEON_RESET_CP;
+
+	/* GRBM_STATUS2 */
+	tmp = RREG32(GRBM_STATUS2);
+	if (tmp & RLC_BUSY)
+		reset_mask |= RADEON_RESET_RLC;
+
+	/* SDMA0_STATUS_REG */
+	tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
+	if (!(tmp & SDMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* SDMA1_STATUS_REG */
+	tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
+	if (!(tmp & SDMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(SRBM_STATUS2);
+	if (tmp & SDMA_BUSY)
+		reset_mask |= RADEON_RESET_DMA;
+
+	if (tmp & SDMA1_BUSY)
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & IH_BUSY)
+		reset_mask |= RADEON_RESET_IH;
+
+	if (tmp & SEM_BUSY)
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (tmp & GRBM_RQ_PENDING)
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (tmp & VMC_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+		   MCC_BUSY | MCD_BUSY))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (evergreen_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+/**
+ * cik_gpu_soft_reset - soft reset GPU
+ *
+ * @rdev: radeon_device pointer
+ * @reset_mask: mask of which blocks to reset
+ *
+ * Soft reset the blocks specified in @reset_mask.
+ */
+static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	cik_print_gpu_status_regs(rdev);
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+	/* disable CG/PG */
+	cik_fini_pg(rdev);
+	cik_fini_cg(rdev);
+
+	/* stop the rlc */
+	cik_rlc_stop(rdev);
+
+	/* Disable GFX parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+	/* Disable MEC parsing/prefetching */
+	WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* sdma0 */
+		tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
+		tmp |= SDMA_HALT;
+		WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	}
+	if (reset_mask & RADEON_RESET_DMA1) {
+		/* sdma1 */
+		tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
+		tmp |= SDMA_HALT;
+		WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+	}
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
+		grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= SOFT_RESET_CP;
+
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+	}
+
+	if (reset_mask & RADEON_RESET_DMA)
+		srbm_soft_reset |= SOFT_RESET_SDMA;
+
+	if (reset_mask & RADEON_RESET_DMA1)
+		srbm_soft_reset |= SOFT_RESET_SDMA1;
+
+	if (reset_mask & RADEON_RESET_DISPLAY)
+		srbm_soft_reset |= SOFT_RESET_DC;
+
+	if (reset_mask & RADEON_RESET_RLC)
+		grbm_soft_reset |= SOFT_RESET_RLC;
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= SOFT_RESET_SEM;
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= SOFT_RESET_IH;
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= SOFT_RESET_VMC;
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		if (reset_mask & RADEON_RESET_MC)
+			srbm_soft_reset |= SOFT_RESET_MC;
+	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	udelay(50);
+
+	cik_print_gpu_status_regs(rdev);
+}
+
+struct kv_reset_save_regs {
+	u32 gmcon_reng_execute;
+	u32 gmcon_misc;
+	u32 gmcon_misc3;
+};
+
+static void kv_save_regs_for_reset(struct radeon_device *rdev,
+				   struct kv_reset_save_regs *save)
+{
+	save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
+	save->gmcon_misc = RREG32(GMCON_MISC);
+	save->gmcon_misc3 = RREG32(GMCON_MISC3);
+
+	WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
+	WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
+						STCTRL_STUTTER_EN));
+}
+
+static void kv_restore_regs_for_reset(struct radeon_device *rdev,
+				      struct kv_reset_save_regs *save)
+{
+	int i;
+
+	WREG32(GMCON_PGFSM_WRITE, 0);
+	WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0);
+	WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x210000);
+	WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x21003);
+	WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x2b00);
+	WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0);
+	WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x420000);
+	WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x120202);
+	WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
+	WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
+	WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
+	WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
+
+	WREG32(GMCON_MISC3, save->gmcon_misc3);
+	WREG32(GMCON_MISC, save->gmcon_misc);
+	WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
+}
+
+static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	struct kv_reset_save_regs kv_save = { 0 };
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* disable cg/pg */
+	cik_fini_pg(rdev);
+	cik_fini_cg(rdev);
+
+	/* Disable GFX parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+	/* Disable MEC parsing/prefetching */
+	WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
+
+	/* sdma0 */
+	tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
+	tmp |= SDMA_HALT;
+	WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	/* sdma1 */
+	tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
+	tmp |= SDMA_HALT;
+	WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+	/* XXX other engines? */
+
+	/* halt the rlc, disable cp internal ints */
+	cik_rlc_stop(rdev);
+
+	udelay(50);
+
+	/* disable mem access */
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+	}
+
+	if (rdev->flags & RADEON_IS_IGP)
+		kv_save_regs_for_reset(rdev, &kv_save);
+
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* reset */
+	radeon_pci_config_reset(rdev);
+
+	udelay(100);
+
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+
+	/* does asic init need to be run first??? */
+	if (rdev->flags & RADEON_IS_IGP)
+		kv_restore_regs_for_reset(rdev, &kv_save);
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @rdev: radeon_device pointer
+ * @hard: force hard reset
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+int cik_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	u32 reset_mask;
+
+	if (hard) {
+		cik_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
+	reset_mask = cik_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	/* try soft reset */
+	cik_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = cik_gpu_check_soft_reset(rdev);
+
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		cik_gpu_pci_config_reset(rdev);
+
+	reset_mask = cik_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * cik_gfx_is_lockup - check if the 3D engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the 3D engine is locked up (CIK).
+ * Returns true if the engine is locked, false if not.
+ */
+bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = cik_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* MC */
+/**
+ * cik_mc_program - program the GPU memory controller
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set the location of vram, gart, and AGP in the GPU's
+ * physical address space (CIK).
+ */
+static void cik_mc_program(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	evergreen_mc_stop(rdev, &save);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+	       rdev->mc.vram_start >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+	       rdev->mc.vram_end >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+	       rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	/* XXX double check these! */
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	WREG32(MC_VM_AGP_BASE, 0);
+	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	evergreen_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/**
+ * cik_mc_init - initialize the memory controller driver params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the amount of vram, vram width, and decide how to place
+ * vram and gart within the GPU's physical address space (CIK).
+ * Returns 0 for success.
+ */
+static int cik_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	case 4:
+		numchan = 3;
+		break;
+	case 5:
+		numchan = 6;
+		break;
+	case 6:
+		numchan = 10;
+		break;
+	case 7:
+		numchan = 12;
+		break;
+	case 8:
+		numchan = 16;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* size in MB on si */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	si_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+/*
+ * GART
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the radeon vm/hsa code.
+ */
+/**
+ * cik_pcie_gart_tlb_flush - gart tlb flush callback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Flush the TLB for the VMID 0 page table (CIK).
+ */
+void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* flush hdp cache */
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	WREG32(VM_INVALIDATE_REQUEST, 0x1);
+}
+
+/**
+ * cik_pcie_gart_enable - gart enable
+ *
+ * @rdev: radeon_device pointer
+ *
+ * This sets up the TLBs, programs the page tables for VMID0,
+ * sets up the hw for VMIDs 1-15 which are allocated on
+ * demand, and sets up the global locations for the LDS, GDS,
+ * and GPUVM for FSA64 clients (CIK).
+ * Returns 0 for success, errors for failure.
+ */
+static int cik_pcie_gart_enable(struct radeon_device *rdev)
+{
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_FRAGMENT_PROCESSING |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       BANK_SELECT(4) |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+	WREG32(0x15D4, 0);
+	WREG32(0x15D8, 0);
+	WREG32(0x15DC, 0);
+
+	/* restore context1-15 */
+	/* set vm size, must be a multiple of 4 */
+	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
+	for (i = 1; i < 16; i++) {
+		if (i < 8)
+			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+			       rdev->vm_manager.saved_table_addr[i]);
+		else
+			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
+			       rdev->vm_manager.saved_table_addr[i]);
+	}
+
+	/* enable context1-15 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	if (rdev->family == CHIP_KAVERI) {
+		u32 tmp = RREG32(CHUB_CONTROL);
+		tmp &= ~BYPASS_VM;
+		WREG32(CHUB_CONTROL, tmp);
+	}
+
+	/* XXX SH_MEM regs */
+	/* where to put LDS, scratch, GPUVM in FSA64 space */
+	mutex_lock(&rdev->srbm_mutex);
+	for (i = 0; i < 16; i++) {
+		cik_srbm_select(rdev, 0, 0, 0, i);
+		/* CP and shaders */
+		WREG32(SH_MEM_CONFIG, SH_MEM_CONFIG_GFX_DEFAULT);
+		WREG32(SH_MEM_APE1_BASE, 1);
+		WREG32(SH_MEM_APE1_LIMIT, 0);
+		WREG32(SH_MEM_BASES, 0);
+		/* SDMA GFX */
+		WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
+		WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
+		WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
+		WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
+		/* XXX SDMA RLC - todo */
+	}
+	cik_srbm_select(rdev, 0, 0, 0, 0);
+	mutex_unlock(&rdev->srbm_mutex);
+
+	cik_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+/**
+ * cik_pcie_gart_disable - gart disable
+ *
+ * @rdev: radeon_device pointer
+ *
+ * This disables all VM page table (CIK).
+ */
+static void cik_pcie_gart_disable(struct radeon_device *rdev)
+{
+	unsigned i;
+
+	for (i = 1; i < 16; ++i) {
+		uint32_t reg;
+		if (i < 8)
+			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+		else
+			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+		rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+	}
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL,
+	       ENABLE_L2_FRAGMENT_PROCESSING |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+/**
+ * cik_pcie_gart_fini - vm fini callback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tears down the driver GART/VM setup (CIK).
+ */
+static void cik_pcie_gart_fini(struct radeon_device *rdev)
+{
+	cik_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+/* vm parser */
+/**
+ * cik_ib_parse - vm ib_parse callback
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer pointer
+ *
+ * CIK uses hw IB checking so this is a nop (CIK).
+ */
+int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	return 0;
+}
+
+/*
+ * vm
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the radeon vm/hsa code.
+ */
+/**
+ * cik_vm_init - cik vm init callback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Inits cik specific vm parameters (number of VMs, base of vram for
+ * VMIDs 1-15) (CIK).
+ * Returns 0 for success.
+ */
+int cik_vm_init(struct radeon_device *rdev)
+{
+	/*
+	 * number of VMs
+	 * VMID 0 is reserved for System
+	 * radeon graphics/compute will use VMIDs 1-15
+	 */
+	rdev->vm_manager.nvm = 16;
+	/* base offset of vram pages */
+	if (rdev->flags & RADEON_IS_IGP) {
+		u64 tmp = RREG32(MC_VM_FB_OFFSET);
+		tmp <<= 22;
+		rdev->vm_manager.vram_base_offset = tmp;
+	} else
+		rdev->vm_manager.vram_base_offset = 0;
+
+	return 0;
+}
+
+/**
+ * cik_vm_fini - cik vm fini callback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down any asic specific VM setup (CIK).
+ */
+void cik_vm_fini(struct radeon_device *rdev)
+{
+}
+
+/**
+ * cik_vm_decode_fault - print human readable fault info
+ *
+ * @rdev: radeon_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (CIK).
+ */
+static void cik_vm_decode_fault(struct radeon_device *rdev,
+				u32 status, u32 addr, u32 mc_client)
+{
+	u32 mc_id;
+	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
+	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
+	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+	if (rdev->family == CHIP_HAWAII)
+		mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+	else
+		mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+
+	printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+	       protections, vmid, addr,
+	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
+	       block, mc_client, mc_id);
+}
+
+/**
+ * cik_vm_flush - cik vm flush using the CP
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (CIK).
+ */
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		  unsigned vm_id, uint64_t pd_addr)
+{
+	int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+				 WRITE_DATA_DST_SEL(0)));
+	if (vm_id < 8) {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
+	} else {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
+	}
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, pd_addr >> 12);
+
+	/* update SH_MEM_* regs */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, VMID(vm_id));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, SH_MEM_BASES >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, 0); /* SH_MEM_BASES */
+	radeon_ring_write(ring, SH_MEM_CONFIG_GFX_DEFAULT); /* SH_MEM_CONFIG */
+	radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
+	radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
+
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, VMID(0));
+
+	/* HDP flush */
+	cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 1 << vm_id);
+
+	/* wait for the invalidate to complete */
+	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
+				 WAIT_REG_MEM_ENGINE(0))); /* me */
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0); /* ref */
+	radeon_ring_write(ring, 0); /* mask */
+	radeon_ring_write(ring, 0x20); /* poll interval */
+
+	/* compute doesn't have PFP */
+	if (usepfp) {
+		/* sync PFP to ME, otherwise we might get invalid PFP reads */
+		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+		radeon_ring_write(ring, 0x0);
+	}
+}
+
+/*
+ * RLC
+ * The RLC is a multi-purpose microengine that handles a
+ * variety of functions, the most important of which is
+ * the interrupt controller.
+ */
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+					  bool enable)
+{
+	u32 tmp = RREG32(CP_INT_CNTL_RING0);
+
+	if (enable)
+		tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	else
+		tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
+}
+
+static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_LB_CNTL);
+	if (enable)
+		tmp |= LOAD_BALANCE_ENABLE;
+	else
+		tmp &= ~LOAD_BALANCE_ENABLE;
+	WREG32(RLC_LB_CNTL, tmp);
+}
+
+static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
+{
+	u32 i, j, k;
+	u32 mask;
+
+	for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
+		for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
+			cik_select_se_sh(rdev, i, j);
+			for (k = 0; k < rdev->usec_timeout; k++) {
+				if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
+	for (k = 0; k < rdev->usec_timeout; k++) {
+		if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_CNTL);
+	if (tmp != rlc)
+		WREG32(RLC_CNTL, rlc);
+}
+
+static u32 cik_halt_rlc(struct radeon_device *rdev)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_CNTL);
+
+	if (data & RLC_ENABLE) {
+		u32 i;
+
+		data &= ~RLC_ENABLE;
+		WREG32(RLC_CNTL, data);
+
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
+				break;
+			udelay(1);
+		}
+
+		cik_wait_for_rlc_serdes(rdev);
+	}
+
+	return orig;
+}
+
+void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
+{
+	u32 tmp, i, mask;
+
+	tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
+	WREG32(RLC_GPR_REG2, tmp);
+
+	mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(RLC_GPM_STAT) & mask) == mask)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
+	WREG32(RLC_GPR_REG2, tmp);
+}
+
+/**
+ * cik_rlc_stop - stop the RLC ME
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Halt the RLC ME (MicroEngine) (CIK).
+ */
+static void cik_rlc_stop(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, 0);
+
+	cik_enable_gui_idle_interrupt(rdev, false);
+
+	cik_wait_for_rlc_serdes(rdev);
+}
+
+/**
+ * cik_rlc_start - start the RLC ME
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unhalt the RLC ME (MicroEngine) (CIK).
+ */
+static void cik_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+
+	cik_enable_gui_idle_interrupt(rdev, true);
+
+	udelay(50);
+}
+
+/**
+ * cik_rlc_resume - setup the RLC hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the RLC registers, load the ucode,
+ * and start the RLC (CIK).
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_rlc_resume(struct radeon_device *rdev)
+{
+	u32 i, size, tmp;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	cik_rlc_stop(rdev);
+
+	/* disable CG */
+	tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
+	WREG32(RLC_CGCG_CGLS_CTRL, tmp);
+
+	si_rlc_reset(rdev);
+
+	cik_init_pg(rdev);
+
+	cik_init_cg(rdev);
+
+	WREG32(RLC_LB_CNTR_INIT, 0);
+	WREG32(RLC_LB_CNTR_MAX, 0x00008000);
+
+	cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+	WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+	WREG32(RLC_LB_PARAMS, 0x00600408);
+	WREG32(RLC_LB_CNTL, 0x80000004);
+
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	if (rdev->new_fw) {
+		const struct rlc_firmware_header_v1_0 *hdr =
+			(const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+		const __le32 *fw_data = (const __le32 *)
+			(rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+		radeon_ucode_print_rlc_hdr(&hdr->header);
+
+		size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+		WREG32(RLC_GPM_UCODE_ADDR, 0);
+		for (i = 0; i < size; i++)
+			WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+		WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version));
+	} else {
+		const __be32 *fw_data;
+
+		switch (rdev->family) {
+		case CHIP_BONAIRE:
+		case CHIP_HAWAII:
+		default:
+			size = BONAIRE_RLC_UCODE_SIZE;
+			break;
+		case CHIP_KAVERI:
+			size = KV_RLC_UCODE_SIZE;
+			break;
+		case CHIP_KABINI:
+			size = KB_RLC_UCODE_SIZE;
+			break;
+		case CHIP_MULLINS:
+			size = ML_RLC_UCODE_SIZE;
+			break;
+		}
+
+		fw_data = (const __be32 *)rdev->rlc_fw->data;
+		WREG32(RLC_GPM_UCODE_ADDR, 0);
+		for (i = 0; i < size; i++)
+			WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
+		WREG32(RLC_GPM_UCODE_ADDR, 0);
+	}
+
+	/* XXX - find out what chips support lbpw */
+	cik_enable_lbpw(rdev, false);
+
+	if (rdev->family == CHIP_BONAIRE)
+		WREG32(RLC_DRIVER_DMA_STATUS, 0);
+
+	cik_rlc_start(rdev);
+
+	return 0;
+}
+
+static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
+{
+	u32 data, orig, tmp, tmp2;
+
+	orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+		cik_enable_gui_idle_interrupt(rdev, true);
+
+		tmp = cik_halt_rlc(rdev);
+
+		cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+		WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+		tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
+		WREG32(RLC_SERDES_WR_CTRL, tmp2);
+
+		cik_update_rlc(rdev, tmp);
+
+		data |= CGCG_EN | CGLS_EN;
+	} else {
+		cik_enable_gui_idle_interrupt(rdev, false);
+
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+
+		data &= ~(CGCG_EN | CGLS_EN);
+	}
+
+	if (orig != data)
+		WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
+{
+	u32 data, orig, tmp = 0;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
+		if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
+			if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+				orig = data = RREG32(CP_MEM_SLP_CNTL);
+				data |= CP_MEM_LS_EN;
+				if (orig != data)
+					WREG32(CP_MEM_SLP_CNTL, data);
+			}
+		}
+
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data |= 0x00000001;
+		data &= 0xfffffffd;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		tmp = cik_halt_rlc(rdev);
+
+		cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+		WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+		data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
+		WREG32(RLC_SERDES_WR_CTRL, data);
+
+		cik_update_rlc(rdev, tmp);
+
+		if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
+			orig = data = RREG32(CGTS_SM_CTRL_REG);
+			data &= ~SM_MODE_MASK;
+			data |= SM_MODE(0x2);
+			data |= SM_MODE_ENABLE;
+			data &= ~CGTS_OVERRIDE;
+			if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
+			    (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
+				data &= ~CGTS_LS_OVERRIDE;
+			data &= ~ON_MONITOR_ADD_MASK;
+			data |= ON_MONITOR_ADD_EN;
+			data |= ON_MONITOR_ADD(0x96);
+			if (orig != data)
+				WREG32(CGTS_SM_CTRL_REG, data);
+		}
+	} else {
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data |= 0x00000003;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		data = RREG32(RLC_MEM_SLP_CNTL);
+		if (data & RLC_MEM_LS_EN) {
+			data &= ~RLC_MEM_LS_EN;
+			WREG32(RLC_MEM_SLP_CNTL, data);
+		}
+
+		data = RREG32(CP_MEM_SLP_CNTL);
+		if (data & CP_MEM_LS_EN) {
+			data &= ~CP_MEM_LS_EN;
+			WREG32(CP_MEM_SLP_CNTL, data);
+		}
+
+		orig = data = RREG32(CGTS_SM_CTRL_REG);
+		data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
+		if (orig != data)
+			WREG32(CGTS_SM_CTRL_REG, data);
+
+		tmp = cik_halt_rlc(rdev);
+
+		cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+		WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+		data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
+		WREG32(RLC_SERDES_WR_CTRL, data);
+
+		cik_update_rlc(rdev, tmp);
+	}
+}
+
+static const u32 mc_cg_registers[] =
+{
+	MC_HUB_MISC_HUB_CG,
+	MC_HUB_MISC_SIP_CG,
+	MC_HUB_MISC_VM_CG,
+	MC_XPB_CLK_GAT,
+	ATC_MISC_CG,
+	MC_CITF_MISC_WR_CG,
+	MC_CITF_MISC_RD_CG,
+	MC_CITF_MISC_VM_CG,
+	VM_L2_CG,
+};
+
+static void cik_enable_mc_ls(struct radeon_device *rdev,
+			     bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
+			data |= MC_LS_ENABLE;
+		else
+			data &= ~MC_LS_ENABLE;
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void cik_enable_mc_mgcg(struct radeon_device *rdev,
+			       bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+			data |= MC_CG_ENABLE;
+		else
+			data &= ~MC_CG_ENABLE;
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
+				 bool enable)
+{
+	u32 orig, data;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+		WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
+		WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
+	} else {
+		orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
+		data |= 0xff000000;
+		if (data != orig)
+			WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
+
+		orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
+		data |= 0xff000000;
+		if (data != orig)
+			WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
+	}
+}
+
+static void cik_enable_sdma_mgls(struct radeon_device *rdev,
+				 bool enable)
+{
+	u32 orig, data;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
+		orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+		data |= 0x100;
+		if (orig != data)
+			WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+		orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+		data |= 0x100;
+		if (orig != data)
+			WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+	} else {
+		orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+		data &= ~0x100;
+		if (orig != data)
+			WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+		orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+		data &= ~0x100;
+		if (orig != data)
+			WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+	}
+}
+
+static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
+				bool enable)
+{
+	u32 orig, data;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
+		data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+		data = 0xfff;
+		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+		orig = data = RREG32(UVD_CGC_CTRL);
+		data |= DCM;
+		if (orig != data)
+			WREG32(UVD_CGC_CTRL, data);
+	} else {
+		data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+		data &= ~0xfff;
+		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+		orig = data = RREG32(UVD_CGC_CTRL);
+		data &= ~DCM;
+		if (orig != data)
+			WREG32(UVD_CGC_CTRL, data);
+	}
+}
+
+static void cik_enable_bif_mgls(struct radeon_device *rdev,
+			       bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+		data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+			REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+	else
+		data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+			  REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_CNTL2, data);
+}
+
+static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
+				bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+		data &= ~CLOCK_GATING_DIS;
+	else
+		data |= CLOCK_GATING_DIS;
+
+	if (orig != data)
+		WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void cik_enable_hdp_ls(struct radeon_device *rdev,
+			      bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_MEM_POWER_LS);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+		data |= HDP_LS_ENABLE;
+	else
+		data &= ~HDP_LS_ENABLE;
+
+	if (orig != data)
+		WREG32(HDP_MEM_POWER_LS, data);
+}
+
+void cik_update_cg(struct radeon_device *rdev,
+		   u32 block, bool enable)
+{
+
+	if (block & RADEON_CG_BLOCK_GFX) {
+		cik_enable_gui_idle_interrupt(rdev, false);
+		/* order matters! */
+		if (enable) {
+			cik_enable_mgcg(rdev, true);
+			cik_enable_cgcg(rdev, true);
+		} else {
+			cik_enable_cgcg(rdev, false);
+			cik_enable_mgcg(rdev, false);
+		}
+		cik_enable_gui_idle_interrupt(rdev, true);
+	}
+
+	if (block & RADEON_CG_BLOCK_MC) {
+		if (!(rdev->flags & RADEON_IS_IGP)) {
+			cik_enable_mc_mgcg(rdev, enable);
+			cik_enable_mc_ls(rdev, enable);
+		}
+	}
+
+	if (block & RADEON_CG_BLOCK_SDMA) {
+		cik_enable_sdma_mgcg(rdev, enable);
+		cik_enable_sdma_mgls(rdev, enable);
+	}
+
+	if (block & RADEON_CG_BLOCK_BIF) {
+		cik_enable_bif_mgls(rdev, enable);
+	}
+
+	if (block & RADEON_CG_BLOCK_UVD) {
+		if (rdev->has_uvd)
+			cik_enable_uvd_mgcg(rdev, enable);
+	}
+
+	if (block & RADEON_CG_BLOCK_HDP) {
+		cik_enable_hdp_mgcg(rdev, enable);
+		cik_enable_hdp_ls(rdev, enable);
+	}
+
+	if (block & RADEON_CG_BLOCK_VCE) {
+		vce_v2_0_enable_mgcg(rdev, enable);
+	}
+}
+
+static void cik_init_cg(struct radeon_device *rdev)
+{
+
+	cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
+
+	if (rdev->has_uvd)
+		si_init_uvd_internal_cg(rdev);
+
+	cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+			     RADEON_CG_BLOCK_SDMA |
+			     RADEON_CG_BLOCK_BIF |
+			     RADEON_CG_BLOCK_UVD |
+			     RADEON_CG_BLOCK_HDP), true);
+}
+
+static void cik_fini_cg(struct radeon_device *rdev)
+{
+	cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+			     RADEON_CG_BLOCK_SDMA |
+			     RADEON_CG_BLOCK_BIF |
+			     RADEON_CG_BLOCK_UVD |
+			     RADEON_CG_BLOCK_HDP), false);
+
+	cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
+}
+
+static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
+					  bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+		data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+	else
+		data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
+					  bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+		data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+	else
+		data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
+		data &= ~DISABLE_CP_PG;
+	else
+		data |= DISABLE_CP_PG;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
+		data &= ~DISABLE_GDS_PG;
+	else
+		data |= DISABLE_GDS_PG;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+#define CP_ME_TABLE_SIZE    96
+#define CP_ME_TABLE_OFFSET  2048
+#define CP_MEC_TABLE_OFFSET 4096
+
+void cik_init_cp_pg_table(struct radeon_device *rdev)
+{
+	volatile u32 *dst_ptr;
+	int me, i, max_me = 4;
+	u32 bo_offset = 0;
+	u32 table_offset, table_size;
+
+	if (rdev->family == CHIP_KAVERI)
+		max_me = 5;
+
+	if (rdev->rlc.cp_table_ptr == NULL)
+		return;
+
+	/* write the cp table buffer */
+	dst_ptr = rdev->rlc.cp_table_ptr;
+	for (me = 0; me < max_me; me++) {
+		if (rdev->new_fw) {
+			const __le32 *fw_data;
+			const struct gfx_firmware_header_v1_0 *hdr;
+
+			if (me == 0) {
+				hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+				fw_data = (const __le32 *)
+					(rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+				table_offset = le32_to_cpu(hdr->jt_offset);
+				table_size = le32_to_cpu(hdr->jt_size);
+			} else if (me == 1) {
+				hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+				fw_data = (const __le32 *)
+					(rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+				table_offset = le32_to_cpu(hdr->jt_offset);
+				table_size = le32_to_cpu(hdr->jt_size);
+			} else if (me == 2) {
+				hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+				fw_data = (const __le32 *)
+					(rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+				table_offset = le32_to_cpu(hdr->jt_offset);
+				table_size = le32_to_cpu(hdr->jt_size);
+			} else if (me == 3) {
+				hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+				fw_data = (const __le32 *)
+					(rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+				table_offset = le32_to_cpu(hdr->jt_offset);
+				table_size = le32_to_cpu(hdr->jt_size);
+			} else {
+				hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+				fw_data = (const __le32 *)
+					(rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+				table_offset = le32_to_cpu(hdr->jt_offset);
+				table_size = le32_to_cpu(hdr->jt_size);
+			}
+
+			for (i = 0; i < table_size; i ++) {
+				dst_ptr[bo_offset + i] =
+					cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+			}
+			bo_offset += table_size;
+		} else {
+			const __be32 *fw_data;
+			table_size = CP_ME_TABLE_SIZE;
+
+			if (me == 0) {
+				fw_data = (const __be32 *)rdev->ce_fw->data;
+				table_offset = CP_ME_TABLE_OFFSET;
+			} else if (me == 1) {
+				fw_data = (const __be32 *)rdev->pfp_fw->data;
+				table_offset = CP_ME_TABLE_OFFSET;
+			} else if (me == 2) {
+				fw_data = (const __be32 *)rdev->me_fw->data;
+				table_offset = CP_ME_TABLE_OFFSET;
+			} else {
+				fw_data = (const __be32 *)rdev->mec_fw->data;
+				table_offset = CP_MEC_TABLE_OFFSET;
+			}
+
+			for (i = 0; i < table_size; i ++) {
+				dst_ptr[bo_offset + i] =
+					cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+			}
+			bo_offset += table_size;
+		}
+	}
+}
+
+static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
+				bool enable)
+{
+	u32 data, orig;
+
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
+		orig = data = RREG32(RLC_PG_CNTL);
+		data |= GFX_PG_ENABLE;
+		if (orig != data)
+			WREG32(RLC_PG_CNTL, data);
+
+		orig = data = RREG32(RLC_AUTO_PG_CTRL);
+		data |= AUTO_PG_EN;
+		if (orig != data)
+			WREG32(RLC_AUTO_PG_CTRL, data);
+	} else {
+		orig = data = RREG32(RLC_PG_CNTL);
+		data &= ~GFX_PG_ENABLE;
+		if (orig != data)
+			WREG32(RLC_PG_CNTL, data);
+
+		orig = data = RREG32(RLC_AUTO_PG_CTRL);
+		data &= ~AUTO_PG_EN;
+		if (orig != data)
+			WREG32(RLC_AUTO_PG_CTRL, data);
+
+		data = RREG32(DB_RENDER_CONTROL);
+	}
+}
+
+static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
+{
+	u32 mask = 0, tmp, tmp1;
+	int i;
+
+	cik_select_se_sh(rdev, se, sh);
+	tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+	cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	tmp &= 0xffff0000;
+
+	tmp |= tmp1;
+	tmp >>= 16;
+
+	for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+
+	return (~tmp) & mask;
+}
+
+static void cik_init_ao_cu_mask(struct radeon_device *rdev)
+{
+	u32 i, j, k, active_cu_number = 0;
+	u32 mask, counter, cu_bitmap;
+	u32 tmp = 0;
+
+	for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
+		for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
+			mask = 1;
+			cu_bitmap = 0;
+			counter = 0;
+			for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
+				if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
+					if (counter < 2)
+						cu_bitmap |= mask;
+					counter ++;
+				}
+				mask <<= 1;
+			}
+
+			active_cu_number += counter;
+			tmp |= (cu_bitmap << (i * 16 + j * 8));
+		}
+	}
+
+	WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+	tmp = RREG32(RLC_MAX_PG_CU);
+	tmp &= ~MAX_PU_CU_MASK;
+	tmp |= MAX_PU_CU(active_cu_number);
+	WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
+				       bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
+		data |= STATIC_PER_CU_PG_ENABLE;
+	else
+		data &= ~STATIC_PER_CU_PG_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
+					bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
+		data |= DYN_PER_CU_PG_ENABLE;
+	else
+		data &= ~DYN_PER_CU_PG_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
+
+static void cik_init_gfx_cgpg(struct radeon_device *rdev)
+{
+	u32 data, orig;
+	u32 i;
+
+	if (rdev->rlc.cs_data) {
+		WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+		WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
+		WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
+		WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
+	} else {
+		WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+		for (i = 0; i < 3; i++)
+			WREG32(RLC_GPM_SCRATCH_DATA, 0);
+	}
+	if (rdev->rlc.reg_list) {
+		WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
+		for (i = 0; i < rdev->rlc.reg_list_size; i++)
+			WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
+	}
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	data |= GFX_PG_SRC;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+
+	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+	WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
+
+	data = RREG32(CP_RB_WPTR_POLL_CNTL);
+	data &= ~IDLE_POLL_COUNT_MASK;
+	data |= IDLE_POLL_COUNT(0x60);
+	WREG32(CP_RB_WPTR_POLL_CNTL, data);
+
+	data = 0x10101010;
+	WREG32(RLC_PG_DELAY, data);
+
+	data = RREG32(RLC_PG_DELAY_2);
+	data &= ~0xff;
+	data |= 0x3;
+	WREG32(RLC_PG_DELAY_2, data);
+
+	data = RREG32(RLC_AUTO_PG_CTRL);
+	data &= ~GRBM_REG_SGIT_MASK;
+	data |= GRBM_REG_SGIT(0x700);
+	WREG32(RLC_AUTO_PG_CTRL, data);
+
+}
+
+static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
+{
+	cik_enable_gfx_cgpg(rdev, enable);
+	cik_enable_gfx_static_mgpg(rdev, enable);
+	cik_enable_gfx_dynamic_mgpg(rdev, enable);
+}
+
+u32 cik_get_csb_size(struct radeon_device *rdev)
+{
+	u32 count = 0;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (rdev->rlc.cs_data == NULL)
+		return 0;
+
+	/* begin clear state */
+	count += 2;
+	/* context control state */
+	count += 3;
+
+	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT)
+				count += 2 + ext->reg_count;
+			else
+				return 0;
+		}
+	}
+	/* pa_sc_raster_config/pa_sc_raster_config1 */
+	count += 4;
+	/* end clear state */
+	count += 2;
+	/* clear state */
+	count += 2;
+
+	return count;
+}
+
+void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+	u32 count = 0, i;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (rdev->rlc.cs_data == NULL)
+		return;
+	if (buffer == NULL)
+		return;
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
+
+	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT) {
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+				for (i = 0; i < ext->reg_count; i++)
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
+			} else {
+				return;
+			}
+		}
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+	switch (rdev->family) {
+	case CHIP_BONAIRE:
+		buffer[count++] = cpu_to_le32(0x16000012);
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	case CHIP_KAVERI:
+		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	case CHIP_HAWAII:
+		buffer[count++] = cpu_to_le32(0x3a00161a);
+		buffer[count++] = cpu_to_le32(0x0000002e);
+		break;
+	default:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
+}
+
+static void cik_init_pg(struct radeon_device *rdev)
+{
+	if (rdev->pg_flags) {
+		cik_enable_sck_slowdown_on_pu(rdev, true);
+		cik_enable_sck_slowdown_on_pd(rdev, true);
+		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
+			cik_init_gfx_cgpg(rdev);
+			cik_enable_cp_pg(rdev, true);
+			cik_enable_gds_pg(rdev, true);
+		}
+		cik_init_ao_cu_mask(rdev);
+		cik_update_gfx_pg(rdev, true);
+	}
+}
+
+static void cik_fini_pg(struct radeon_device *rdev)
+{
+	if (rdev->pg_flags) {
+		cik_update_gfx_pg(rdev, false);
+		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
+			cik_enable_cp_pg(rdev, false);
+			cik_enable_gds_pg(rdev, false);
+		}
+	}
+}
+
+/*
+ * Interrupts
+ * Starting with r6xx, interrupts are handled via a ring buffer.
+ * Ring buffers are areas of GPU accessible memory that the GPU
+ * writes interrupt vectors into and the host reads vectors out of.
+ * There is a rptr (read pointer) that determines where the
+ * host is currently reading, and a wptr (write pointer)
+ * which determines where the GPU has written.  When the
+ * pointers are equal, the ring is idle.  When the GPU
+ * writes vectors to the ring buffer, it increments the
+ * wptr.  When there is an interrupt, the host then starts
+ * fetching commands and processing them until the pointers are
+ * equal again at which point it updates the rptr.
+ */
+
+/**
+ * cik_enable_interrupts - Enable the interrupt ring buffer
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Enable the interrupt ring buffer (CIK).
+ */
+static void cik_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+/**
+ * cik_disable_interrupts - Disable the interrupt ring buffer
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disable the interrupt ring buffer (CIK).
+ */
+static void cik_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.rptr = 0;
+}
+
+/**
+ * cik_disable_interrupt_state - Disable all interrupt sources
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clear all interrupt enable bits used by the driver (CIK).
+ */
+static void cik_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* gfx ring */
+	tmp = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
+	/* sdma */
+	tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+	/* compute queues */
+	WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
+	WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
+	WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
+	WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
+	WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
+	WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
+	WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
+	WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
+	/* grbm */
+	WREG32(GRBM_INT_CNTL, 0);
+	/* SRBM */
+	WREG32(SRBM_INT_CNTL, 0);
+	/* vline/vblank, etc. */
+	WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	if (rdev->num_crtc >= 4) {
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+	/* pflip */
+	if (rdev->num_crtc >= 2) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	/* dac hotplug */
+	WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
+
+	/* digital hotplug */
+	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD1_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD2_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD3_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD4_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD5_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+/**
+ * cik_irq_init - init and enable the interrupt ring
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (CIK).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int cik_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	cik_disable_interrupts(rdev);
+
+	/* init rlc */
+	ret = cik_rlc_resume(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* XXX this should actually be a bus address, not an MC address. same on older asics */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+
+	if (rdev->wb.enabled)
+		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+	/* set the writeback address whether it's enabled or not */
+	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	cik_disable_interrupt_state(rdev);
+
+	pci_set_master(rdev->pdev);
+
+	/* enable irqs */
+	cik_enable_interrupts(rdev);
+
+	return ret;
+}
+
+/**
+ * cik_irq_set - enable/disable interrupt sources
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Enable interrupt sources on the GPU (vblanks, hpd,
+ * etc.) (CIK).
+ * Returns 0 for success, errors for failure.
+ */
+int cik_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl;
+	u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
+	u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
+	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+	u32 grbm_int_cntl = 0;
+	u32 dma_cntl, dma_cntl1;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		cik_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		cik_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+
+	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+
+	dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
+	cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+	cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+	cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+	cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+	cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+	cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+	cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+	cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+
+	/* enable CP interrupts on all rings */
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		DRM_DEBUG("cik_irq_set: sw int gfx\n");
+		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+		struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+		DRM_DEBUG("si_irq_set: sw int cp1\n");
+		if (ring->me == 1) {
+			switch (ring->pipe) {
+			case 0:
+				cp_m1p0 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 1:
+				cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 2:
+				cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 3:
+				cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			default:
+				DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
+				break;
+			}
+		} else if (ring->me == 2) {
+			switch (ring->pipe) {
+			case 0:
+				cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 1:
+				cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 2:
+				cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 3:
+				cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			default:
+				DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
+				break;
+			}
+		} else {
+			DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
+		}
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+		struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+		DRM_DEBUG("si_irq_set: sw int cp2\n");
+		if (ring->me == 1) {
+			switch (ring->pipe) {
+			case 0:
+				cp_m1p0 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 1:
+				cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 2:
+				cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 3:
+				cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			default:
+				DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
+				break;
+			}
+		} else if (ring->me == 2) {
+			switch (ring->pipe) {
+			case 0:
+				cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 1:
+				cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 2:
+				cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			case 3:
+				cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+				break;
+			default:
+				DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
+				break;
+			}
+		} else {
+			DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
+		}
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("cik_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+		DRM_DEBUG("cik_irq_set: sw int dma1\n");
+		dma_cntl1 |= TRAP_ENABLE;
+	}
+
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("cik_irq_set: vblank 0\n");
+		crtc1 |= VBLANK_INTERRUPT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("cik_irq_set: vblank 1\n");
+		crtc2 |= VBLANK_INTERRUPT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[2] ||
+	    atomic_read(&rdev->irq.pflip[2])) {
+		DRM_DEBUG("cik_irq_set: vblank 2\n");
+		crtc3 |= VBLANK_INTERRUPT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[3] ||
+	    atomic_read(&rdev->irq.pflip[3])) {
+		DRM_DEBUG("cik_irq_set: vblank 3\n");
+		crtc4 |= VBLANK_INTERRUPT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[4] ||
+	    atomic_read(&rdev->irq.pflip[4])) {
+		DRM_DEBUG("cik_irq_set: vblank 4\n");
+		crtc5 |= VBLANK_INTERRUPT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[5] ||
+	    atomic_read(&rdev->irq.pflip[5])) {
+		DRM_DEBUG("cik_irq_set: vblank 5\n");
+		crtc6 |= VBLANK_INTERRUPT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("cik_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("cik_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("cik_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("cik_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("cik_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("cik_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+	}
+
+	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+
+	WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
+	WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
+
+	WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
+	WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
+	WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
+	WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
+	WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
+	WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
+	WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
+	WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
+
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+	WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+	if (rdev->num_crtc >= 4) {
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+		WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	}
+
+	if (rdev->num_crtc >= 2) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_MASK);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_MASK);
+	}
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_MASK);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_MASK);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_MASK);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_MASK);
+	}
+
+	WREG32(DC_HPD1_INT_CONTROL, hpd1);
+	WREG32(DC_HPD2_INT_CONTROL, hpd2);
+	WREG32(DC_HPD3_INT_CONTROL, hpd3);
+	WREG32(DC_HPD4_INT_CONTROL, hpd4);
+	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+	/* posting read */
+	RREG32(SRBM_STATUS);
+
+	return 0;
+}
+
+/**
+ * cik_irq_ack - ack interrupt sources
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Ack interrupt sources on the GPU (vblanks, hpd,
+ * etc.) (CIK).  Certain interrupts sources are sw
+ * generated and do not require an explicit ack.
+ */
+static inline void cik_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
+
+	rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
+		EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
+		EVERGREEN_CRTC1_REGISTER_OFFSET);
+	if (rdev->num_crtc >= 4) {
+		rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
+			EVERGREEN_CRTC2_REGISTER_OFFSET);
+		rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
+			EVERGREEN_CRTC3_REGISTER_OFFSET);
+	}
+	if (rdev->num_crtc >= 6) {
+		rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
+			EVERGREEN_CRTC4_REGISTER_OFFSET);
+		rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
+			EVERGREEN_CRTC5_REGISTER_OFFSET);
+	}
+
+	if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+		       GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+	if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+		WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
+		WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+	if (rdev->num_crtc >= 4) {
+		if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+			       GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+			       GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+			WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+			WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+			WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+			WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->num_crtc >= 6) {
+		if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+			       GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+			       GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+			WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+			WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+			WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+			WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		tmp |= DC_HPDx_RX_INT_ACK;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		tmp |= DC_HPDx_RX_INT_ACK;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		tmp |= DC_HPDx_RX_INT_ACK;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_RX_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_RX_INT_ACK;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
+		tmp |= DC_HPDx_RX_INT_ACK;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+}
+
+/**
+ * cik_irq_disable - disable interrupts
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disable interrupts on the hw (CIK).
+ */
+static void cik_irq_disable(struct radeon_device *rdev)
+{
+	cik_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	cik_irq_ack(rdev);
+	cik_disable_interrupt_state(rdev);
+}
+
+/**
+ * cik_irq_disable - disable interrupts for suspend
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disable interrupts and stop the RLC (CIK).
+ * Used for suspend.
+ */
+static void cik_irq_suspend(struct radeon_device *rdev)
+{
+	cik_irq_disable(rdev);
+	cik_rlc_stop(rdev);
+}
+
+/**
+ * cik_irq_fini - tear down interrupt support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disable interrupts on the hw and free the IH ring
+ * buffer (CIK).
+ * Used for driver unload.
+ */
+static void cik_irq_fini(struct radeon_device *rdev)
+{
+	cik_irq_suspend(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+/**
+ * cik_get_ih_wptr - get the IH ring buffer wptr
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (CIK).  Also check for
+ * ring buffer overflow and deal with it.
+ * Used by cik_irq_process().
+ * Returns the value of the wptr.
+ */
+static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		wptr &= ~RB_OVERFLOW;
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+/*        CIK IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [63:60]  - reserved
+ * [71:64]  - RINGID
+ *            CP:
+ *            ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
+ *            QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
+ *                     - for gfx, hw shader state (0=PS...5=LS, 6=CS)
+ *            ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
+ *            PIPE_ID - ME0 0=3D
+ *                    - ME1&2 compute dispatcher (4 pipes each)
+ *            SDMA:
+ *            INSTANCE_ID [1:0], QUEUE_ID[1:0]
+ *            INSTANCE_ID - 0 = sdma0, 1 = sdma1
+ *            QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
+ * [79:72]  - VMID
+ * [95:80]  - PASID
+ * [127:96] - reserved
+ */
+/**
+ * cik_irq_process - interrupt handler
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Interrupt hander (CIK).  Walk the IH ring,
+ * ack interrupts and schedule work to handle
+ * interrupt events.
+ * Returns irq process return code.
+ */
+int cik_irq_process(struct radeon_device *rdev)
+{
+	struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data, ring_id;
+	u8 me_id, pipe_id, queue_id;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_dp = false;
+	bool queue_reset = false;
+	u32 addr, status, mc_client;
+	bool queue_thermal = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	wptr = cik_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	cik_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[0]) {
+					drm_handle_vblank(rdev->ddev, 0);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[0]))
+					radeon_crtc_handle_vblank(rdev, 0);
+				rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D1 vblank\n");
+
+				break;
+			case 1: /* D1 vline */
+				if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D1 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 2: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[1]) {
+					drm_handle_vblank(rdev->ddev, 1);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[1]))
+					radeon_crtc_handle_vblank(rdev, 1);
+				rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D2 vblank\n");
+
+				break;
+			case 1: /* D2 vline */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D2 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 3: /* D3 vblank/vline */
+			switch (src_data) {
+			case 0: /* D3 vblank */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[2]) {
+					drm_handle_vblank(rdev->ddev, 2);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[2]))
+					radeon_crtc_handle_vblank(rdev, 2);
+				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D3 vblank\n");
+
+				break;
+			case 1: /* D3 vline */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D3 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 4: /* D4 vblank/vline */
+			switch (src_data) {
+			case 0: /* D4 vblank */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[3]) {
+					drm_handle_vblank(rdev->ddev, 3);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[3]))
+					radeon_crtc_handle_vblank(rdev, 3);
+				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D4 vblank\n");
+
+				break;
+			case 1: /* D4 vline */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D4 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D5 vblank/vline */
+			switch (src_data) {
+			case 0: /* D5 vblank */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[4]) {
+					drm_handle_vblank(rdev->ddev, 4);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[4]))
+					radeon_crtc_handle_vblank(rdev, 4);
+				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D5 vblank\n");
+
+				break;
+			case 1: /* D5 vline */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D5 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 6: /* D6 vblank/vline */
+			switch (src_data) {
+			case 0: /* D6 vblank */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[5]) {
+					drm_handle_vblank(rdev->ddev, 5);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[5]))
+					radeon_crtc_handle_vblank(rdev, 5);
+				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D6 vblank\n");
+
+				break;
+			case 1: /* D6 vline */
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D6 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 8: /* D1 page flip */
+		case 10: /* D2 page flip */
+		case 12: /* D3 page flip */
+		case 14: /* D4 page flip */
+		case 16: /* D5 page flip */
+		case 18: /* D6 page flip */
+			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+			if (radeon_use_pflipirq > 0)
+				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+			break;
+		case 42: /* HPD hotplug */
+			switch (src_data) {
+			case 0:
+				if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD1\n");
+
+				break;
+			case 1:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD2\n");
+
+				break;
+			case 2:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD3\n");
+
+				break;
+			case 3:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD4\n");
+
+				break;
+			case 4:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD5\n");
+
+				break;
+			case 5:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD6\n");
+
+				break;
+			case 6:
+				if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+				queue_dp = true;
+				DRM_DEBUG("IH: HPD_RX 1\n");
+
+				break;
+			case 7:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+				queue_dp = true;
+				DRM_DEBUG("IH: HPD_RX 2\n");
+
+				break;
+			case 8:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+				queue_dp = true;
+				DRM_DEBUG("IH: HPD_RX 3\n");
+
+				break;
+			case 9:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+				queue_dp = true;
+				DRM_DEBUG("IH: HPD_RX 4\n");
+
+				break;
+			case 10:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+				queue_dp = true;
+				DRM_DEBUG("IH: HPD_RX 5\n");
+
+				break;
+			case 11:
+				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+				queue_dp = true;
+				DRM_DEBUG("IH: HPD_RX 6\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 96:
+			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+			WREG32(SRBM_INT_ACK, 0x1);
+			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
+		case 146:
+		case 147:
+			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+			mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			if (addr == 0x0 && status == 0x0)
+				break;
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				addr);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				status);
+			cik_vm_decode_fault(rdev, status, addr, mc_client);
+			break;
+		case 167: /* VCE */
+			DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
+			switch (src_data) {
+			case 0:
+				radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX);
+				break;
+			case 1:
+				radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX);
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 176: /* GFX RB CP_INT */
+		case 177: /* GFX IB CP_INT */
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			/* XXX check the bitfield order! */
+			me_id = (ring_id & 0x60) >> 5;
+			pipe_id = (ring_id & 0x18) >> 3;
+			queue_id = (ring_id & 0x7) >> 0;
+			switch (me_id) {
+			case 0:
+				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+				break;
+			case 1:
+			case 2:
+				if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+				if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+				break;
+			}
+			break;
+		case 184: /* CP Privileged reg access */
+			DRM_ERROR("Illegal register access in command stream\n");
+			/* XXX check the bitfield order! */
+			me_id = (ring_id & 0x60) >> 5;
+			pipe_id = (ring_id & 0x18) >> 3;
+			queue_id = (ring_id & 0x7) >> 0;
+			switch (me_id) {
+			case 0:
+				/* This results in a full GPU reset, but all we need to do is soft
+				 * reset the CP for gfx
+				 */
+				queue_reset = true;
+				break;
+			case 1:
+				/* XXX compute */
+				queue_reset = true;
+				break;
+			case 2:
+				/* XXX compute */
+				queue_reset = true;
+				break;
+			}
+			break;
+		case 185: /* CP Privileged inst */
+			DRM_ERROR("Illegal instruction in command stream\n");
+			/* XXX check the bitfield order! */
+			me_id = (ring_id & 0x60) >> 5;
+			pipe_id = (ring_id & 0x18) >> 3;
+			queue_id = (ring_id & 0x7) >> 0;
+			switch (me_id) {
+			case 0:
+				/* This results in a full GPU reset, but all we need to do is soft
+				 * reset the CP for gfx
+				 */
+				queue_reset = true;
+				break;
+			case 1:
+				/* XXX compute */
+				queue_reset = true;
+				break;
+			case 2:
+				/* XXX compute */
+				queue_reset = true;
+				break;
+			}
+			break;
+		case 224: /* SDMA trap event */
+			/* XXX check the bitfield order! */
+			me_id = (ring_id & 0x3) >> 0;
+			queue_id = (ring_id & 0xc) >> 2;
+			DRM_DEBUG("IH: SDMA trap\n");
+			switch (me_id) {
+			case 0:
+				switch (queue_id) {
+				case 0:
+					radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+					break;
+				case 1:
+					/* XXX compute */
+					break;
+				case 2:
+					/* XXX compute */
+					break;
+				}
+				break;
+			case 1:
+				switch (queue_id) {
+				case 0:
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+					break;
+				case 1:
+					/* XXX compute */
+					break;
+				case 2:
+					/* XXX compute */
+					break;
+				}
+				break;
+			}
+			break;
+		case 230: /* thermal low to high */
+			DRM_DEBUG("IH: thermal low to high\n");
+			rdev->pm.dpm.thermal.high_to_low = false;
+			queue_thermal = true;
+			break;
+		case 231: /* thermal high to low */
+			DRM_DEBUG("IH: thermal high to low\n");
+			rdev->pm.dpm.thermal.high_to_low = true;
+			queue_thermal = true;
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		case 241: /* SDMA Privileged inst */
+		case 247: /* SDMA Privileged inst */
+			DRM_ERROR("Illegal instruction in SDMA command stream\n");
+			/* XXX check the bitfield order! */
+			me_id = (ring_id & 0x3) >> 0;
+			queue_id = (ring_id & 0xc) >> 2;
+			switch (me_id) {
+			case 0:
+				switch (queue_id) {
+				case 0:
+					queue_reset = true;
+					break;
+				case 1:
+					/* XXX compute */
+					queue_reset = true;
+					break;
+				case 2:
+					/* XXX compute */
+					queue_reset = true;
+					break;
+				}
+				break;
+			case 1:
+				switch (queue_id) {
+				case 0:
+					queue_reset = true;
+					break;
+				case 1:
+					/* XXX compute */
+					queue_reset = true;
+					break;
+				case 2:
+					/* XXX compute */
+					queue_reset = true;
+					break;
+				}
+				break;
+			}
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+		WREG32(IH_RB_RPTR, rptr);
+	}
+	if (queue_dp)
+		schedule_work(&rdev->dp_work);
+	if (queue_hotplug)
+		schedule_delayed_work(&rdev->hotplug_work, 0);
+	if (queue_reset) {
+		rdev->needs_reset = true;
+		wake_up_all(&rdev->fence_queue);
+	}
+	if (queue_thermal)
+		schedule_work(&rdev->pm.dpm.thermal.work);
+	rdev->ih.rptr = rptr;
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = cik_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * startup/shutdown callbacks
+ */
+static void cik_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails cik_uvd_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void cik_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = uvd_v4_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD 4.2 resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void cik_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static void cik_vce_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+		/*
+		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
+		 * to early fails cik_vce_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable vce here.
+		 */
+		rdev->has_vce = 0;
+		return;
+	}
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void cik_vce_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = vce_v2_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void cik_vce_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	r = vce_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+		return;
+	}
+}
+
+/**
+ * cik_startup - program the asic to a functional state
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Programs the asic to a functional state (CIK).
+ * Called by cik_init() and cik_resume().
+ * Returns 0 for success, error for failure.
+ */
+static int cik_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 nop;
+	int r;
+
+	/* enable pcie gen2/3 link */
+	cik_pcie_gen3_enable(rdev);
+	/* enable aspm */
+	cik_program_aspm(rdev);
+
+	/* scratch needs to be initialized before MC */
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	cik_mc_program(rdev);
+
+	if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
+		r = ci_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	r = cik_pcie_gart_enable(rdev);
+	if (r)
+		return r;
+	cik_gpu_init(rdev);
+
+	/* allocate rlc buffers */
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family == CHIP_KAVERI) {
+			rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
+			rdev->rlc.reg_list_size =
+				(u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
+		} else {
+			rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
+			rdev->rlc.reg_list_size =
+				(u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
+		}
+	}
+	rdev->rlc.cs_data = ci_cs_data;
+	rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+	rdev->rlc.cp_table_size += 64 * 1024; /* GDS */
+	r = sumo_rlc_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init rlc BOs!\n");
+		return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	/* allocate mec buffers */
+	r = cik_mec_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init MEC BOs!\n");
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	cik_uvd_start(rdev);
+	cik_vce_start(rdev);
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = cik_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	cik_irq_set(rdev);
+
+	if (rdev->family == CHIP_HAWAII) {
+		if (rdev->new_fw)
+			nop = PACKET3(PACKET3_NOP, 0x3FFF);
+		else
+			nop = RADEON_CP_PACKET2;
+	} else {
+		nop = PACKET3(PACKET3_NOP, 0x3FFF);
+	}
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     nop);
+	if (r)
+		return r;
+
+	/* set up the compute queues */
+	/* type-2 packets are deprecated on MEC, use type-3 instead */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
+			     nop);
+	if (r)
+		return r;
+	ring->me = 1; /* first MEC */
+	ring->pipe = 0; /* first pipe */
+	ring->queue = 0; /* first queue */
+	ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
+
+	/* type-2 packets are deprecated on MEC, use type-3 instead */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
+			     nop);
+	if (r)
+		return r;
+	/* dGPU only have 1 MEC */
+	ring->me = 1; /* first MEC */
+	ring->pipe = 0; /* first pipe */
+	ring->queue = 1; /* second queue */
+	ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+	if (r)
+		return r;
+
+	r = cik_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = cik_sdma_resume(rdev);
+	if (r)
+		return r;
+
+	cik_uvd_resume(rdev);
+	cik_vce_resume(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_vm_manager_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+/**
+ * cik_resume - resume the asic to a functional state
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Programs the asic to a functional state (CIK).
+ * Called at resume.
+ * Returns 0 for success, error for failure.
+ */
+int cik_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	cik_init_golden_registers(rdev);
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_resume(rdev);
+
+	rdev->accel_working = true;
+	r = cik_startup(rdev);
+	if (r) {
+		DRM_ERROR("cik startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+/**
+ * cik_suspend - suspend the asic
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Bring the chip into a state suitable for suspend (CIK).
+ * Called at suspend.
+ * Returns 0 for success.
+ */
+int cik_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	cik_cp_enable(rdev, false);
+	cik_sdma_enable(rdev, false);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	if (rdev->has_vce)
+		radeon_vce_suspend(rdev);
+	cik_fini_pg(rdev);
+	cik_fini_cg(rdev);
+	cik_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	cik_pcie_gart_disable(rdev);
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+/**
+ * cik_init - asic specific driver and hw init
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup asic specific driver variables and program the hw
+ * to a functional state (CIK).
+ * Called at driver startup.
+ * Returns 0 for success, errors for failure.
+ */
+int cik_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	cik_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	cik_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+
+	/* initialize memory controller */
+	r = cik_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
+			r = cik_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
+		    !rdev->mc_fw) {
+			r = cik_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+	r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+	r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 256 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 256 * 1024);
+
+	cik_uvd_init(rdev);
+	cik_vce_init(rdev);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = cik_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		cik_cp_fini(rdev);
+		cik_sdma_fini(rdev);
+		cik_irq_fini(rdev);
+		sumo_rlc_fini(rdev);
+		cik_mec_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_vm_manager_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		cik_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cik_fini - asic specific driver and hw fini
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the asic specific driver variables and program the hw
+ * to an idle state (CIK).
+ * Called at driver unload.
+ */
+void cik_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	cik_cp_fini(rdev);
+	cik_sdma_fini(rdev);
+	cik_fini_pg(rdev);
+	cik_fini_cg(rdev);
+	cik_irq_fini(rdev);
+	sumo_rlc_fini(rdev);
+	cik_mec_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	uvd_v1_0_fini(rdev);
+	radeon_uvd_fini(rdev);
+	radeon_vce_fini(rdev);
+	cik_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+void dce8_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS/eDP FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
+		break;
+	case 10:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
+		break;
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
+/* display watermark setup */
+/**
+ * dce8_line_buffer_adjust - Set up the line buffer
+ *
+ * @rdev: radeon_device pointer
+ * @radeon_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
+static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
+				   struct radeon_crtc *radeon_crtc,
+				   struct drm_display_mode *mode)
+{
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+	/*
+	 * Line Buffer Setup
+	 * There are 6 line buffers, one for each display controllers.
+	 * There are 3 partitions per LB. Select the number of partitions
+	 * to enable based on the display width.  For display widths larger
+	 * than 4096, you need use to use 2 display controllers and combine
+	 * them using the stereo blender.
+	 */
+	if (radeon_crtc->base.enabled && mode) {
+		if (mode->crtc_hdisplay < 1920) {
+			tmp = 1;
+			buffer_alloc = 2;
+		} else if (mode->crtc_hdisplay < 2560) {
+			tmp = 2;
+			buffer_alloc = 2;
+		} else if (mode->crtc_hdisplay < 4096) {
+			tmp = 0;
+			buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
+		} else {
+			DRM_DEBUG_KMS("Mode too big for LB!\n");
+			tmp = 0;
+			buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
+		}
+	} else {
+		tmp = 1;
+		buffer_alloc = 0;
+	}
+
+	WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
+	       LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
+
+	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+			break;
+		udelay(1);
+	}
+
+	if (radeon_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		default:
+			return 4096 * 2;
+		case 1:
+			return 1920 * 2;
+		case 2:
+			return 2560 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	case 4:
+		return 3;
+	case 5:
+		return 6;
+	case 6:
+		return 10;
+	case 7:
+		return 12;
+	case 8:
+		return 16;
+	}
+}
+
+struct dce8_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+/**
+ * dce8_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
+{
+	/* Calculate raw DRAM Bandwidth */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce8_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce8_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, bandwidth;
+	fixed20_12 a, b;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(32);
+	b.full = dfixed_mul(a, disp_clk);
+
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce8_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = dce8_dram_bandwidth(wm);
+	u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce8_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce8_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
+{
+	/* First calculate the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = dce8_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	u32 tmp, dmif_size = 12288;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
+
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
+{
+	if (dce8_average_bandwidth(wm) <=
+	    (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce8_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
+{
+	if (dce8_average_bandwidth(wm) <=
+	    (dce8_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce8_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (dce8_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce8_program_watermarks - program display watermarks
+ *
+ * @rdev: radeon_device pointer
+ * @radeon_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce8_program_watermarks(struct radeon_device *rdev,
+				    struct radeon_crtc *radeon_crtc,
+				    u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &radeon_crtc->base.mode;
+	struct dce8_wm_params wm_low, wm_high;
+	u32 active_time;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 tmp, wm_mask;
+
+	if (radeon_crtc->base.enabled && num_heads && mode) {
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
+
+		/* watermark for high clocks */
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+		    rdev->pm.dpm_enabled) {
+			wm_high.yclk =
+				radeon_dpm_get_mclk(rdev, false) * 10;
+			wm_high.sclk =
+				radeon_dpm_get_sclk(rdev, false) * 10;
+		} else {
+			wm_high.yclk = rdev->pm.current_mclk * 10;
+			wm_high.sclk = rdev->pm.current_sclk * 10;
+		}
+
+		wm_high.disp_clk = mode->clock;
+		wm_high.src_width = mode->crtc_hdisplay;
+		wm_high.active_time = active_time;
+		wm_high.blank_time = line_time - wm_high.active_time;
+		wm_high.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_high.interlaced = true;
+		wm_high.vsc = radeon_crtc->vsc;
+		wm_high.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm_high.vtaps = 2;
+		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_high.lb_size = lb_size;
+		wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
+		wm_high.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+		    !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+		    !dce8_check_latency_hiding(&wm_high) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+		}
+
+		/* watermark for low clocks */
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+		    rdev->pm.dpm_enabled) {
+			wm_low.yclk =
+				radeon_dpm_get_mclk(rdev, true) * 10;
+			wm_low.sclk =
+				radeon_dpm_get_sclk(rdev, true) * 10;
+		} else {
+			wm_low.yclk = rdev->pm.current_mclk * 10;
+			wm_low.sclk = rdev->pm.current_sclk * 10;
+		}
+
+		wm_low.disp_clk = mode->clock;
+		wm_low.src_width = mode->crtc_hdisplay;
+		wm_low.active_time = active_time;
+		wm_low.blank_time = line_time - wm_low.active_time;
+		wm_low.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_low.interlaced = true;
+		wm_low.vsc = radeon_crtc->vsc;
+		wm_low.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm_low.vtaps = 2;
+		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_low.lb_size = lb_size;
+		wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
+		wm_low.num_heads = num_heads;
+
+		/* set for low clocks */
+		latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+		    !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+		    !dce8_check_latency_hiding(&wm_low) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+		}
+
+		/* Save number of lines the linebuffer leads before the scanout */
+		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
+	}
+
+	/* select wm A */
+	wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
+	tmp = wm_mask;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
+
+	/* save values for DPM */
+	radeon_crtc->line_time = line_time;
+	radeon_crtc->wm_high = latency_watermark_a;
+	radeon_crtc->wm_low = latency_watermark_b;
+}
+
+/**
+ * dce8_bandwidth_update - program display watermarks
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+void dce8_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	radeon_update_display_priority(rdev);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < rdev->num_crtc; i++) {
+		mode = &rdev->mode_info.crtcs[i]->base.mode;
+		lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
+		dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+	}
+}
+
+/**
+ * cik_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
+
+static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
+			     u32 cntl_reg, u32 status_reg)
+{
+	int r, i;
+	struct atom_clock_dividers dividers;
+	uint32_t tmp;
+
+	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+					   clock, false, &dividers);
+	if (r)
+		return r;
+
+	tmp = RREG32_SMC(cntl_reg);
+	tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
+	tmp |= dividers.post_divider;
+	WREG32_SMC(cntl_reg, tmp);
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32_SMC(status_reg) & DCLK_STATUS)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	int r = 0;
+
+	r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
+	if (r)
+		return r;
+
+	r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
+	return r;
+}
+
+int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
+{
+	int r, i;
+	struct atom_clock_dividers dividers;
+	u32 tmp;
+
+	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+					   ecclk, false, &dividers);
+	if (r)
+		return r;
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	tmp = RREG32_SMC(CG_ECLK_CNTL);
+	tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK);
+	tmp |= dividers.post_divider;
+	WREG32_SMC(CG_ECLK_CNTL, tmp);
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void cik_pcie_gen3_enable(struct radeon_device *rdev)
+{
+	struct pci_dev *root = rdev->pdev->bus->self;
+	enum pci_bus_speed speed_cap;
+	int bridge_pos, gpu_pos;
+	u32 speed_cntl, current_data_rate;
+	int i;
+	u16 tmp16;
+
+	if (pci_is_root_bus(rdev->pdev->bus))
+		return;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	speed_cap = pcie_get_speed_cap(root);
+	if (speed_cap == PCI_SPEED_UNKNOWN)
+		return;
+
+	if ((speed_cap != PCIE_SPEED_8_0GT) &&
+	    (speed_cap != PCIE_SPEED_5_0GT))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+		LC_CURRENT_DATA_RATE_SHIFT;
+	if (speed_cap == PCIE_SPEED_8_0GT) {
+		if (current_data_rate == 2) {
+			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
+	} else if (speed_cap == PCIE_SPEED_5_0GT) {
+		if (current_data_rate == 1) {
+			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+	}
+
+	bridge_pos = pci_pcie_cap(root);
+	if (!bridge_pos)
+		return;
+
+	gpu_pos = pci_pcie_cap(rdev->pdev);
+	if (!gpu_pos)
+		return;
+
+	if (speed_cap == PCIE_SPEED_8_0GT) {
+		/* re-try equalization if gen3 is not already enabled */
+		if (current_data_rate != 2) {
+			u16 bridge_cfg, gpu_cfg;
+			u16 bridge_cfg2, gpu_cfg2;
+			u32 max_lw, current_lw, tmp;
+
+			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+			pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+			if (current_lw < max_lw) {
+				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+				if (tmp & LC_RENEGOTIATION_SUPPORT) {
+					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+				}
+			}
+
+			for (i = 0; i < 10; i++) {
+				/* check status */
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+					break;
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_REDO_EQ;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				mdelay(100);
+
+				/* linkctl */
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+				/* linkctl2 */
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp &= ~LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+			}
+		}
+	}
+
+	/* set the link speed */
+	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+	tmp16 &= ~0xf;
+	if (speed_cap == PCIE_SPEED_8_0GT)
+		tmp16 |= 3; /* gen3 */
+	else if (speed_cap == PCIE_SPEED_5_0GT)
+		tmp16 |= 2; /* gen2 */
+	else
+		tmp16 |= 1; /* gen1 */
+	pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void cik_program_aspm(struct radeon_device *rdev)
+{
+	u32 data, orig;
+	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+	bool disable_clkreq = false;
+
+	if (radeon_aspm == 0)
+		return;
+
+	/* XXX double check IGPs */
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+	data &= ~LC_XMIT_N_FTS_MASK;
+	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+	data |= LC_GO_TO_RECOVERY;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
+	data |= P_IGNORE_EDB_ERR;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_P_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+	data |= LC_PMI_TO_L1_DIS;
+	if (!disable_l0s)
+		data |= LC_L0S_INACTIVITY(7);
+
+	if (!disable_l1) {
+		data |= LC_L1_INACTIVITY(7);
+		data &= ~LC_PMI_TO_L1_DIS;
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+		if (!disable_plloff_in_l1) {
+			bool clk_req_support;
+
+			orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
+
+			orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
+
+			orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
+
+			orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
+
+			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+			data |= LC_DYN_LANES_PWR_STATE(3);
+			if (orig != data)
+				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+			if (!disable_clkreq &&
+			    !pci_is_root_bus(rdev->pdev->bus)) {
+				struct pci_dev *root = rdev->pdev->bus->self;
+				u32 lnkcap;
+
+				clk_req_support = false;
+				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+					clk_req_support = true;
+			} else {
+				clk_req_support = false;
+			}
+
+			if (clk_req_support) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+				orig = data = RREG32_SMC(THM_CLK_CNTL);
+				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+				if (orig != data)
+					WREG32_SMC(THM_CLK_CNTL, data);
+
+				orig = data = RREG32_SMC(MISC_CLK_CTRL);
+				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+				if (orig != data)
+					WREG32_SMC(MISC_CLK_CTRL, data);
+
+				orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
+				data &= ~BCLK_AS_XCLK;
+				if (orig != data)
+					WREG32_SMC(CG_CLKPIN_CNTL, data);
+
+				orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
+				data &= ~FORCE_BIF_REFCLK_EN;
+				if (orig != data)
+					WREG32_SMC(CG_CLKPIN_CNTL_2, data);
+
+				orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
+				data &= ~MPLL_CLKOUT_SEL_MASK;
+				data |= MPLL_CLKOUT_SEL(4);
+				if (orig != data)
+					WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
+			}
+		}
+	} else {
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+	}
+
+	orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_CNTL2, data);
+
+	if (!disable_l0s) {
+		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+			data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+				data &= ~LC_L0S_INACTIVITY_MASK;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+			}
+		}
+	}
+}
diff --git a/drivers/gpu/drm/radeon/cik_blit_shaders.c b/drivers/gpu/drm/radeon/cik_blit_shaders.c
new file mode 100644
index 0000000..ff13118
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_blit_shaders.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+const u32 cik_default_state[] =
+{
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0046900,
+	0x00000008,
+	0x00000000, /* DB_DEPTH_BOUNDS_MIN */
+	0x00000000, /* DB_DEPTH_BOUNDS_MAX */
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0036900,
+	0x0000000f,
+	0x00000000, /* DB_DEPTH_INFO */
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0046900,
+	0x00000100,
+	0xffffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+
+	0xc0046900,
+	0x00000105,
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc00c6900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+	0x00000000, /* DB_EQAA */
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000,
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000,
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000,
+
+	0xc01b6900,
+	0x000002f5,
+	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+	0xffffffff,
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 cik_default_size = ARRAY_SIZE(cik_default_state);
diff --git a/drivers/gpu/drm/radeon/cik_blit_shaders.h b/drivers/gpu/drm/radeon/cik_blit_shaders.h
new file mode 100644
index 0000000..dfe7314
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_blit_shaders.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CIK_BLIT_SHADERS_H
+#define CIK_BLIT_SHADERS_H
+
+extern const u32 cik_default_state[];
+
+extern const u32 cik_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
new file mode 100644
index 0000000..318377d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __CIK_REG_H__
+#define __CIK_REG_H__
+
+#define CIK_DIDT_IND_INDEX                        0xca00
+#define CIK_DIDT_IND_DATA                         0xca04
+
+#define CIK_DC_GPIO_HPD_MASK                      0x65b0
+#define CIK_DC_GPIO_HPD_A                         0x65b4
+#define CIK_DC_GPIO_HPD_EN                        0x65b8
+#define CIK_DC_GPIO_HPD_Y                         0x65bc
+
+#define CIK_GRPH_CONTROL                          0x6804
+#       define CIK_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define CIK_GRPH_DEPTH_8BPP                0
+#       define CIK_GRPH_DEPTH_16BPP               1
+#       define CIK_GRPH_DEPTH_32BPP               2
+#       define CIK_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define CIK_ADDR_SURF_2_BANK               0
+#       define CIK_ADDR_SURF_4_BANK               1
+#       define CIK_ADDR_SURF_8_BANK               2
+#       define CIK_ADDR_SURF_16_BANK              3
+#       define CIK_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define CIK_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define CIK_ADDR_SURF_BANK_WIDTH_1         0
+#       define CIK_ADDR_SURF_BANK_WIDTH_2         1
+#       define CIK_ADDR_SURF_BANK_WIDTH_4         2
+#       define CIK_ADDR_SURF_BANK_WIDTH_8         3
+#       define CIK_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define CIK_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define CIK_GRPH_FORMAT_ARGB1555           0
+#       define CIK_GRPH_FORMAT_ARGB565            1
+#       define CIK_GRPH_FORMAT_ARGB4444           2
+#       define CIK_GRPH_FORMAT_AI88               3
+#       define CIK_GRPH_FORMAT_MONO16             4
+#       define CIK_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define CIK_GRPH_FORMAT_ARGB8888           0
+#       define CIK_GRPH_FORMAT_ARGB2101010        1
+#       define CIK_GRPH_FORMAT_32BPP_DIG          2
+#       define CIK_GRPH_FORMAT_8B_ARGB2101010     3
+#       define CIK_GRPH_FORMAT_BGRA1010102        4
+#       define CIK_GRPH_FORMAT_8B_BGRA1010102     5
+#       define CIK_GRPH_FORMAT_RGB111110          6
+#       define CIK_GRPH_FORMAT_BGR101111          7
+#       define CIK_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define CIK_ADDR_SURF_BANK_HEIGHT_1        0
+#       define CIK_ADDR_SURF_BANK_HEIGHT_2        1
+#       define CIK_ADDR_SURF_BANK_HEIGHT_4        2
+#       define CIK_ADDR_SURF_BANK_HEIGHT_8        3
+#       define CIK_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define CIK_ADDR_SURF_TILE_SPLIT_64B       0
+#       define CIK_ADDR_SURF_TILE_SPLIT_128B      1
+#       define CIK_ADDR_SURF_TILE_SPLIT_256B      2
+#       define CIK_ADDR_SURF_TILE_SPLIT_512B      3
+#       define CIK_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define CIK_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define CIK_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define CIK_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define CIK_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define CIK_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define CIK_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define CIK_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define CIK_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define CIK_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define CIK_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define CIK_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define CIK_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define CIK_GRPH_PIPE_CONFIG(x)		 (((x) & 0x1f) << 24)
+#       define CIK_ADDR_SURF_P2			 0
+#       define CIK_ADDR_SURF_P4_8x16		 4
+#       define CIK_ADDR_SURF_P4_16x16		 5
+#       define CIK_ADDR_SURF_P4_16x32		 6
+#       define CIK_ADDR_SURF_P4_32x32		 7
+#       define CIK_ADDR_SURF_P8_16x16_8x16	 8
+#       define CIK_ADDR_SURF_P8_16x32_8x16	 9
+#       define CIK_ADDR_SURF_P8_32x32_8x16	 10
+#       define CIK_ADDR_SURF_P8_16x32_16x16	 11
+#       define CIK_ADDR_SURF_P8_32x32_16x16	 12
+#       define CIK_ADDR_SURF_P8_32x32_16x32	 13
+#       define CIK_ADDR_SURF_P8_32x64_32x32	 14
+#       define CIK_GRPH_MICRO_TILE_MODE(x)       (((x) & 0x7) << 29)
+#       define CIK_DISPLAY_MICRO_TILING          0
+#       define CIK_THIN_MICRO_TILING             1
+#       define CIK_DEPTH_MICRO_TILING            2
+#       define CIK_ROTATED_MICRO_TILING          4
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define CIK_CUR_CONTROL                           0x6998
+#       define CIK_CURSOR_EN                      (1 << 0)
+#       define CIK_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define CIK_CURSOR_MONO                    0
+#       define CIK_CURSOR_24_1                    1
+#       define CIK_CURSOR_24_8_PRE_MULT           2
+#       define CIK_CURSOR_24_8_UNPRE_MULT         3
+#       define CIK_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define CIK_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define CIK_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define CIK_CURSOR_URGENT_ALWAYS           0
+#       define CIK_CURSOR_URGENT_1_8              1
+#       define CIK_CURSOR_URGENT_1_4              2
+#       define CIK_CURSOR_URGENT_3_8              3
+#       define CIK_CURSOR_URGENT_1_2              4
+#define CIK_CUR_SURFACE_ADDRESS                   0x699c
+#       define CIK_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define CIK_CUR_SIZE                              0x69a0
+#define CIK_CUR_SURFACE_ADDRESS_HIGH              0x69a4
+#define CIK_CUR_POSITION                          0x69a8
+#define CIK_CUR_HOT_SPOT                          0x69ac
+#define CIK_CUR_COLOR1                            0x69b0
+#define CIK_CUR_COLOR2                            0x69b4
+#define CIK_CUR_UPDATE                            0x69b8
+#       define CIK_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define CIK_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define CIK_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define CIK_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+#define CIK_ALPHA_CONTROL                         0x6af0
+#       define CIK_CURSOR_ALPHA_BLND_ENA          (1 << 1)
+
+#define CIK_LB_DATA_FORMAT                        0x6b00
+#       define CIK_INTERLEAVE_EN                  (1 << 3)
+
+#define CIK_LB_DESKTOP_HEIGHT                     0x6b0c
+
+#define SQ_IND_INDEX					0x8DE0
+#define SQ_CMD						0x8DEC
+#define SQ_IND_DATA					0x8DE4
+
+/*
+ * The TCP_WATCHx_xxxx addresses that are shown here are in dwords,
+ * and that's why they are multiplied by 4
+ */
+#define TCP_WATCH0_ADDR_H				(0x32A0*4)
+#define TCP_WATCH1_ADDR_H				(0x32A3*4)
+#define TCP_WATCH2_ADDR_H				(0x32A6*4)
+#define TCP_WATCH3_ADDR_H				(0x32A9*4)
+#define TCP_WATCH0_ADDR_L				(0x32A1*4)
+#define TCP_WATCH1_ADDR_L				(0x32A4*4)
+#define TCP_WATCH2_ADDR_L				(0x32A7*4)
+#define TCP_WATCH3_ADDR_L				(0x32AA*4)
+#define TCP_WATCH0_CNTL					(0x32A2*4)
+#define TCP_WATCH1_CNTL					(0x32A5*4)
+#define TCP_WATCH2_CNTL					(0x32A8*4)
+#define TCP_WATCH3_CNTL					(0x32AB*4)
+
+#define CPC_INT_CNTL					0xC2D0
+
+#define CP_HQD_IQ_RPTR					0xC970u
+#define SDMA0_RLC0_RB_CNTL				0xD400u
+#define	SDMA_RB_VMID(x)					(x << 24)
+#define	SDMA0_RLC0_RB_BASE				0xD404u
+#define	SDMA0_RLC0_RB_BASE_HI				0xD408u
+#define	SDMA0_RLC0_RB_RPTR				0xD40Cu
+#define	SDMA0_RLC0_RB_WPTR				0xD410u
+#define	SDMA0_RLC0_RB_WPTR_POLL_CNTL			0xD414u
+#define	SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI			0xD418u
+#define	SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO			0xD41Cu
+#define	SDMA0_RLC0_RB_RPTR_ADDR_HI			0xD420u
+#define	SDMA0_RLC0_RB_RPTR_ADDR_LO			0xD424u
+#define	SDMA0_RLC0_IB_CNTL				0xD428u
+#define	SDMA0_RLC0_IB_RPTR				0xD42Cu
+#define	SDMA0_RLC0_IB_OFFSET				0xD430u
+#define	SDMA0_RLC0_IB_BASE_LO				0xD434u
+#define	SDMA0_RLC0_IB_BASE_HI				0xD438u
+#define	SDMA0_RLC0_IB_SIZE				0xD43Cu
+#define	SDMA0_RLC0_SKIP_CNTL				0xD440u
+#define	SDMA0_RLC0_CONTEXT_STATUS			0xD444u
+#define	SDMA_RLC_IDLE					(1 << 2)
+#define	SDMA0_RLC0_DOORBELL				0xD448u
+#define	SDMA_OFFSET(x)					(x << 0)
+#define	SDMA_DB_ENABLE					(1 << 28)
+#define	SDMA0_RLC0_VIRTUAL_ADDR				0xD49Cu
+#define	SDMA_ATC					(1 << 0)
+#define	SDMA_VA_PTR32					(1 << 4)
+#define	SDMA_VA_SHARED_BASE(x)				(x << 8)
+#define	SDMA0_RLC0_APE1_CNTL				0xD4A0u
+#define	SDMA0_RLC0_DOORBELL_LOG				0xD4A4u
+#define	SDMA0_RLC0_WATERMARK				0xD4A8u
+#define	SDMA0_CNTL					0xD010
+#define	SDMA1_CNTL					0xD810
+
+enum {
+	MAX_TRAPID = 8,		/* 3 bits in the bitfield.  */
+	MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+	ADDRESS_WATCH_REG_ADDR_HI = 0,
+	ADDRESS_WATCH_REG_ADDR_LO,
+	ADDRESS_WATCH_REG_CNTL,
+	ADDRESS_WATCH_REG_MAX
+};
+
+enum {				/*  not defined in the CI/KV reg file  */
+	ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+	ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+	ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+	/* extend the mask to 26 bits in order to match the low address field */
+	ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+	ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+union TCP_WATCH_CNTL_BITS {
+	struct {
+		uint32_t mask:24;
+		uint32_t vmid:4;
+		uint32_t atc:1;
+		uint32_t mode:2;
+		uint32_t valid:1;
+	} bitfields, bits;
+	uint32_t u32All;
+	signed int i32All;
+	float f32All;
+};
+
+#endif
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
new file mode 100644
index 0000000..9c351dc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -0,0 +1,1001 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_ucode.h"
+#include "radeon_asic.h"
+#include "radeon_trace.h"
+#include "cikd.h"
+
+/* sdma */
+#define CIK_SDMA_UCODE_SIZE 1050
+#define CIK_SDMA_UCODE_VERSION 64
+
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines.  These engines are used for compute
+ * and gfx.  There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things.  It also has support for tiling/detiling of
+ * buffers.
+ */
+
+/**
+ * cik_sdma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	u32 rptr, reg;
+
+	if (rdev->wb.enabled) {
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	} else {
+		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+			reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
+		else
+			reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
+
+		rptr = RREG32(reg);
+	}
+
+	return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+	else
+		reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+	return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (CIK+).
+ */
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+	else
+		reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+	(void)RREG32(reg);
+}
+
+/**
+ * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (CIK).
+ */
+void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
+			      struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 5;
+		while ((next_rptr & 7) != 4)
+			next_rptr++;
+		next_rptr += 4;
+		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
+		radeon_ring_write(ring, 1); /* number of DWs to follow */
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* IB packet must end on a 8 DW boundary */
+	while ((ring->wptr & 7) != 4)
+		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
+	radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
+	radeon_ring_write(ring, ib->length_dw);
+
+}
+
+/**
+ * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
+					 int ridx)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+	u32 ref_and_mask;
+
+	if (ridx == R600_RING_TYPE_DMA_INDEX)
+		ref_and_mask = SDMA0;
+	else
+		ref_and_mask = SDMA1;
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+	radeon_ring_write(ring, ref_and_mask); /* reference */
+	radeon_ring_write(ring, ref_and_mask); /* mask */
+	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+/**
+ * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (CIK).
+ */
+void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* write the fence */
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+	radeon_ring_write(ring, lower_32_bits(addr));
+	radeon_ring_write(ring, upper_32_bits(addr));
+	radeon_ring_write(ring, fence->seq);
+	/* generate an interrupt */
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
+	/* flush HDP */
+	cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
+}
+
+/**
+ * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (CIK).
+ */
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait)
+{
+	u64 addr = semaphore->gpu_addr;
+	u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
+	radeon_ring_write(ring, addr & 0xfffffff8);
+	radeon_ring_write(ring, upper_32_bits(addr));
+
+	return true;
+}
+
+/**
+ * cik_sdma_gfx_stop - stop the gfx async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the gfx async dma ring buffers (CIK).
+ */
+static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl, reg_offset;
+	int i;
+
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			reg_offset = SDMA0_REGISTER_OFFSET;
+		else
+			reg_offset = SDMA1_REGISTER_OFFSET;
+		rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
+		rb_cntl &= ~SDMA_RB_ENABLE;
+		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+		WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
+	}
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+
+	/* FIXME use something else than big hammer but after few days can not
+	 * seem to find good combination so reset SDMA blocks as it seems we
+	 * do not shut them down properly. This fix hibernation and does not
+	 * affect suspend to ram.
+	 */
+	WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+	(void)RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+	(void)RREG32(SRBM_SOFT_RESET);
+}
+
+/**
+ * cik_sdma_rlc_stop - stop the compute async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the compute async dma queues (CIK).
+ */
+static void cik_sdma_rlc_stop(struct radeon_device *rdev)
+{
+	/* XXX todo */
+}
+
+/**
+ * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable/disable preemption.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
+{
+	uint32_t reg_offset, value;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			reg_offset = SDMA0_REGISTER_OFFSET;
+		else
+			reg_offset = SDMA1_REGISTER_OFFSET;
+		value = RREG32(SDMA0_CNTL + reg_offset);
+		if (enable)
+			value |= AUTO_CTXSW_ENABLE;
+		else
+			value &= ~AUTO_CTXSW_ENABLE;
+		WREG32(SDMA0_CNTL + reg_offset, value);
+	}
+}
+
+/**
+ * cik_sdma_enable - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+void cik_sdma_enable(struct radeon_device *rdev, bool enable)
+{
+	u32 me_cntl, reg_offset;
+	int i;
+
+	if (enable == false) {
+		cik_sdma_gfx_stop(rdev);
+		cik_sdma_rlc_stop(rdev);
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			reg_offset = SDMA0_REGISTER_OFFSET;
+		else
+			reg_offset = SDMA1_REGISTER_OFFSET;
+		me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
+		if (enable)
+			me_cntl &= ~SDMA_HALT;
+		else
+			me_cntl |= SDMA_HALT;
+		WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
+	}
+
+	cik_sdma_ctx_switch_enable(rdev, enable);
+}
+
+/**
+ * cik_sdma_gfx_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_gfx_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 rb_cntl, ib_cntl;
+	u32 rb_bufsz;
+	u32 reg_offset, wb_offset;
+	int i, r;
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0) {
+			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+			reg_offset = SDMA0_REGISTER_OFFSET;
+			wb_offset = R600_WB_DMA_RPTR_OFFSET;
+		} else {
+			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+			reg_offset = SDMA1_REGISTER_OFFSET;
+			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+		}
+
+		WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+		WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = order_base_2(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
+		WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
+
+		/* set the wb address whether it's enabled or not */
+		WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
+		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+		WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
+		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+		if (rdev->wb.enabled)
+			rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
+
+		WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+		WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
+
+		ring->wptr = 0;
+		WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
+
+		/* enable DMA RB */
+		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
+
+		ib_cntl = SDMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+		ib_cntl |= SDMA_IB_SWAP_ENABLE;
+#endif
+		/* enable DMA IBs */
+		WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
+
+		ring->ready = true;
+
+		r = radeon_ring_test(rdev, ring->idx, ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+	}
+
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * cik_sdma_rlc_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the compute DMA queues and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_rlc_resume(struct radeon_device *rdev)
+{
+	/* XXX todo */
+	return 0;
+}
+
+/**
+ * cik_sdma_load_microcode - load the sDMA ME ucode
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_sdma_load_microcode(struct radeon_device *rdev)
+{
+	int i;
+
+	if (!rdev->sdma_fw)
+		return -EINVAL;
+
+	/* halt the MEs */
+	cik_sdma_enable(rdev, false);
+
+	if (rdev->new_fw) {
+		const struct sdma_firmware_header_v1_0 *hdr =
+			(const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
+		const __le32 *fw_data;
+		u32 fw_size;
+
+		radeon_ucode_print_sdma_hdr(&hdr->header);
+
+		/* sdma0 */
+		fw_data = (const __le32 *)
+			(rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+		WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+		WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+		/* sdma1 */
+		fw_data = (const __le32 *)
+			(rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+		WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+		WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+	} else {
+		const __be32 *fw_data;
+
+		/* sdma0 */
+		fw_data = (const __be32 *)rdev->sdma_fw->data;
+		WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+		for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+			WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+		WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+		/* sdma1 */
+		fw_data = (const __be32 *)rdev->sdma_fw->data;
+		WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+		for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+			WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+		WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+	}
+
+	WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+	WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+	return 0;
+}
+
+/**
+ * cik_sdma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA engines and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	r = cik_sdma_load_microcode(rdev);
+	if (r)
+		return r;
+
+	/* unhalt the MEs */
+	cik_sdma_enable(rdev, true);
+
+	/* start the gfx rings and rlc compute queues */
+	r = cik_sdma_gfx_resume(rdev);
+	if (r)
+		return r;
+	r = cik_sdma_rlc_resume(rdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+/**
+ * cik_sdma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (CIK).
+ */
+void cik_sdma_fini(struct radeon_device *rdev)
+{
+	/* halt the MEs */
+	cik_sdma_enable(rdev, false);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+	/* XXX - compute dma queue tear down */
+}
+
+/**
+ * cik_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @resv: reservation object to sync to
+ *
+ * Copy GPU paging using the DMA engine (CIK).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
+				  uint64_t src_offset, uint64_t dst_offset,
+				  unsigned num_gpu_pages,
+				  struct reservation_object *resv)
+{
+	struct radeon_fence *fence;
+	struct radeon_sync sync;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes;
+	int i, num_loops;
+	int r = 0;
+
+	radeon_sync_create(&sync);
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_rings(rdev, &sync, ring->idx);
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0x1fffff)
+			cur_size_in_bytes = 0x1fffff;
+		size_in_bytes -= cur_size_in_bytes;
+		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
+		radeon_ring_write(ring, cur_size_in_bytes);
+		radeon_ring_write(ring, 0); /* src/dst endian swap */
+		radeon_ring_write(ring, lower_32_bits(src_offset));
+		radeon_ring_write(ring, upper_32_bits(src_offset));
+		radeon_ring_write(ring, lower_32_bits(dst_offset));
+		radeon_ring_write(ring, upper_32_bits(dst_offset));
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_sync_free(rdev, &sync, fence);
+
+	return fence;
+}
+
+/**
+ * cik_sdma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_ring_test(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	unsigned i;
+	int r;
+	unsigned index;
+	u32 tmp;
+	u64 gpu_addr;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		index = R600_WB_DMA_RING_TEST_OFFSET;
+	else
+		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+	gpu_addr = rdev->wb.gpu_addr + index;
+
+	tmp = 0xCAFEDEAD;
+	rdev->wb.wb[index/4] = cpu_to_le32(tmp);
+
+	r = radeon_ring_lock(rdev, ring, 5);
+	if (r) {
+		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+	radeon_ring_write(ring, lower_32_bits(gpu_addr));
+	radeon_ring_write(ring, upper_32_bits(gpu_addr));
+	radeon_ring_write(ring, 1); /* number of DWs to follow */
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	return r;
+}
+
+/**
+ * cik_sdma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (CIK).
+ * Returns 0 on success, error on failure.
+ */
+int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	unsigned i;
+	unsigned index;
+	int r;
+	u32 tmp = 0;
+	u64 gpu_addr;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		index = R600_WB_DMA_RING_TEST_OFFSET;
+	else
+		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+	gpu_addr = rdev->wb.gpu_addr + index;
+
+	tmp = 0xCAFEDEAD;
+	rdev->wb.wb[index/4] = cpu_to_le32(tmp);
+
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+	ib.ptr[1] = lower_32_bits(gpu_addr);
+	ib.ptr[2] = upper_32_bits(gpu_addr);
+	ib.ptr[3] = 1;
+	ib.ptr[4] = 0xDEADBEEF;
+	ib.length_dw = 5;
+
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		return r;
+	}
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		return r;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		return -ETIMEDOUT;
+	}
+	r = 0;
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+	radeon_ib_free(rdev, &ib);
+	return r;
+}
+
+/**
+ * cik_sdma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (CIK).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = cik_gpu_check_soft_reset(rdev);
+	u32 mask;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		mask = RADEON_RESET_DMA;
+	else
+		mask = RADEON_RESET_DMA1;
+
+	if (!(reset_mask & mask)) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+			    struct radeon_ib *ib,
+			    uint64_t pe, uint64_t src,
+			    unsigned count)
+{
+	while (count) {
+		unsigned bytes = count * 8;
+		if (bytes > 0x1FFFF8)
+			bytes = 0x1FFFF8;
+
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+		ib->ptr[ib->length_dw++] = bytes;
+		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+		ib->ptr[ib->length_dw++] = lower_32_bits(src);
+		ib->ptr[ib->length_dw++] = upper_32_bits(src);
+		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+		pe += bytes;
+		src += bytes;
+		count -= bytes / 8;
+	}
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update PTEs by writing them manually using sDMA (CIK).
+ */
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		/* for non-physically contiguous pages (system) */
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+		ib->ptr[ib->length_dw++] = pe;
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+		ib->ptr[ib->length_dw++] = ndw;
+		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+			if (flags & R600_PTE_SYSTEM) {
+				value = radeon_vm_map_gart(rdev, addr);
+			} else if (flags & R600_PTE_VALID) {
+				value = addr;
+			} else {
+				value = 0;
+			}
+			addr += incr;
+			value |= flags;
+			ib->ptr[ib->length_dw++] = value;
+			ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		}
+	}
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count;
+		if (ndw > 0x7FFFF)
+			ndw = 0x7FFFF;
+
+		if (flags & R600_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+		pe += ndw * 8;
+		addr += ndw * incr;
+		count -= ndw;
+	}
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
+{
+	while (ib->length_dw & 0x7)
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+}
+
+/**
+ * cik_dma_vm_flush - cik vm flush using sDMA
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (CIK).
+ */
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		      unsigned vm_id, uint64_t pd_addr)
+{
+	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+			  SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	if (vm_id < 8) {
+		radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
+	} else {
+		radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
+	}
+	radeon_ring_write(ring, pd_addr >> 12);
+
+	/* update SH_MEM_* regs */
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+	radeon_ring_write(ring, VMID(vm_id));
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, SH_MEM_BASES >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
+	radeon_ring_write(ring, 1);
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+	radeon_ring_write(ring, VMID(0));
+
+	/* flush HDP */
+	cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
+
+	/* flush TLB */
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 1 << vm_id);
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0); /* reference */
+	radeon_ring_write(ring, 0); /* mask */
+	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
new file mode 100644
index 0000000..cda16fc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -0,0 +1,2172 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef CIK_H
+#define CIK_H
+
+#define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
+
+#define CIK_RB_BITMAP_WIDTH_PER_SH     2
+#define HAWAII_RB_BITMAP_WIDTH_PER_SH  4
+
+/* DIDT IND registers */
+#define DIDT_SQ_CTRL0                                     0x0
+#       define DIDT_CTRL_EN                               (1 << 0)
+#define DIDT_DB_CTRL0                                     0x20
+#define DIDT_TD_CTRL0                                     0x40
+#define DIDT_TCP_CTRL0                                    0x60
+
+/* SMC IND registers */
+#define DPM_TABLE_475                                     0x3F768
+#       define SamuBootLevel(x)                           ((x) << 0)
+#       define SamuBootLevel_MASK                         0x000000ff
+#       define SamuBootLevel_SHIFT                        0
+#       define AcpBootLevel(x)                            ((x) << 8)
+#       define AcpBootLevel_MASK                          0x0000ff00
+#       define AcpBootLevel_SHIFT                         8
+#       define VceBootLevel(x)                            ((x) << 16)
+#       define VceBootLevel_MASK                          0x00ff0000
+#       define VceBootLevel_SHIFT                         16
+#       define UvdBootLevel(x)                            ((x) << 24)
+#       define UvdBootLevel_MASK                          0xff000000
+#       define UvdBootLevel_SHIFT                         24
+
+#define FIRMWARE_FLAGS                                    0x3F800
+#       define INTERRUPTS_ENABLED                         (1 << 0)
+
+#define NB_DPM_CONFIG_1                                   0x3F9E8
+#       define Dpm0PgNbPsLo(x)                            ((x) << 0)
+#       define Dpm0PgNbPsLo_MASK                          0x000000ff
+#       define Dpm0PgNbPsLo_SHIFT                         0
+#       define Dpm0PgNbPsHi(x)                            ((x) << 8)
+#       define Dpm0PgNbPsHi_MASK                          0x0000ff00
+#       define Dpm0PgNbPsHi_SHIFT                         8
+#       define DpmXNbPsLo(x)                              ((x) << 16)
+#       define DpmXNbPsLo_MASK                            0x00ff0000
+#       define DpmXNbPsLo_SHIFT                           16
+#       define DpmXNbPsHi(x)                              ((x) << 24)
+#       define DpmXNbPsHi_MASK                            0xff000000
+#       define DpmXNbPsHi_SHIFT                           24
+
+#define	SMC_SYSCON_RESET_CNTL				0x80000000
+#       define RST_REG                                  (1 << 0)
+#define	SMC_SYSCON_CLOCK_CNTL_0				0x80000004
+#       define CK_DISABLE                               (1 << 0)
+#       define CKEN                                     (1 << 24)
+
+#define	SMC_SYSCON_MISC_CNTL				0x80000010
+
+#define SMC_SYSCON_MSG_ARG_0                              0x80000068
+
+#define SMC_PC_C                                          0x80000370
+
+#define SMC_SCRATCH9                                      0x80000424
+
+#define RCU_UC_EVENTS                                     0xC0000004
+#       define BOOT_SEQ_DONE                              (1 << 7)
+
+#define GENERAL_PWRMGT                                    0xC0200000
+#       define GLOBAL_PWRMGT_EN                           (1 << 0)
+#       define STATIC_PM_EN                               (1 << 1)
+#       define THERMAL_PROTECTION_DIS                     (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                    (1 << 3)
+#       define SW_SMIO_INDEX(x)                           ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                         (1 << 6)
+#       define SW_SMIO_INDEX_SHIFT                        6
+#       define VOLT_PWRMGT_EN                             (1 << 10)
+#       define GPU_COUNTER_CLK                            (1 << 15)
+#       define DYN_SPREAD_SPECTRUM_EN                     (1 << 23)
+
+#define CNB_PWRMGT_CNTL                                   0xC0200004
+#       define GNB_SLOW_MODE(x)                           ((x) << 0)
+#       define GNB_SLOW_MODE_MASK                         (3 << 0)
+#       define GNB_SLOW_MODE_SHIFT                        0
+#       define GNB_SLOW                                   (1 << 2)
+#       define FORCE_NB_PS1                               (1 << 3)
+#       define DPM_ENABLED                                (1 << 4)
+
+#define SCLK_PWRMGT_CNTL                                  0xC0200008
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define RESET_BUSY_CNT                             (1 << 4)
+#       define RESET_SCLK_CNT                             (1 << 5)
+#       define DYNAMIC_PM_EN                              (1 << 21)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0xC0200014
+#       define CURRENT_STATE_MASK                         (0xf << 4)
+#       define CURRENT_STATE_SHIFT                        4
+#       define CURR_MCLK_INDEX_MASK                       (0xf << 8)
+#       define CURR_MCLK_INDEX_SHIFT                      8
+#       define CURR_SCLK_INDEX_MASK                       (0x1f << 16)
+#       define CURR_SCLK_INDEX_SHIFT                      16
+
+#define CG_SSP                                            0xC0200044
+#       define SST(x)                                     ((x) << 0)
+#       define SST_MASK                                   (0xffff << 0)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0xC0200060
+#       define DISP_GAP(x)                                ((x) << 0)
+#       define DISP_GAP_MASK                              (3 << 0)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP_GAP_MCHG(x)                           ((x) << 24)
+#       define DISP_GAP_MCHG_MASK                         (3 << 24)
+
+#define SMU_VOLTAGE_STATUS                                0xC0200094
+#       define SMU_VOLTAGE_CURRENT_LEVEL_MASK             (0xff << 1)
+#       define SMU_VOLTAGE_CURRENT_LEVEL_SHIFT            1
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1                0xC02000F0
+#       define CURR_PCIE_INDEX_MASK                       (0xf << 24)
+#       define CURR_PCIE_INDEX_SHIFT                      24
+
+#define CG_ULV_PARAMETER                                  0xC0200158
+
+#define CG_FTV_0                                          0xC02001A8
+#define CG_FTV_1                                          0xC02001AC
+#define CG_FTV_2                                          0xC02001B0
+#define CG_FTV_3                                          0xC02001B4
+#define CG_FTV_4                                          0xC02001B8
+#define CG_FTV_5                                          0xC02001BC
+#define CG_FTV_6                                          0xC02001C0
+#define CG_FTV_7                                          0xC02001C4
+
+#define CG_DISPLAY_GAP_CNTL2                              0xC0200230
+
+#define LCAC_SX0_OVR_SEL                                  0xC0400D04
+#define LCAC_SX0_OVR_VAL                                  0xC0400D08
+
+#define LCAC_MC0_CNTL                                     0xC0400D30
+#define LCAC_MC0_OVR_SEL                                  0xC0400D34
+#define LCAC_MC0_OVR_VAL                                  0xC0400D38
+#define LCAC_MC1_CNTL                                     0xC0400D3C
+#define LCAC_MC1_OVR_SEL                                  0xC0400D40
+#define LCAC_MC1_OVR_VAL                                  0xC0400D44
+
+#define LCAC_MC2_OVR_SEL                                  0xC0400D4C
+#define LCAC_MC2_OVR_VAL                                  0xC0400D50
+
+#define LCAC_MC3_OVR_SEL                                  0xC0400D58
+#define LCAC_MC3_OVR_VAL                                  0xC0400D5C
+
+#define LCAC_CPL_CNTL                                     0xC0400D80
+#define LCAC_CPL_OVR_SEL                                  0xC0400D84
+#define LCAC_CPL_OVR_VAL                                  0xC0400D88
+
+/* dGPU */
+#define	CG_THERMAL_CTRL					0xC0300004
+#define 	DPM_EVENT_SRC(x)			((x) << 0)
+#define 	DPM_EVENT_SRC_MASK			(7 << 0)
+#define		DIG_THERM_DPM(x)			((x) << 14)
+#define		DIG_THERM_DPM_MASK			0x003FC000
+#define		DIG_THERM_DPM_SHIFT			14
+#define	CG_THERMAL_STATUS				0xC0300008
+#define		FDO_PWM_DUTY(x)				((x) << 9)
+#define		FDO_PWM_DUTY_MASK			(0xff << 9)
+#define		FDO_PWM_DUTY_SHIFT			9
+#define	CG_THERMAL_INT					0xC030000C
+#define		CI_DIG_THERM_INTH(x)			((x) << 8)
+#define		CI_DIG_THERM_INTH_MASK			0x0000FF00
+#define		CI_DIG_THERM_INTH_SHIFT			8
+#define		CI_DIG_THERM_INTL(x)			((x) << 16)
+#define		CI_DIG_THERM_INTL_MASK			0x00FF0000
+#define		CI_DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+#define	CG_MULT_THERMAL_CTRL				0xC0300010
+#define		TEMP_SEL(x)				((x) << 20)
+#define		TEMP_SEL_MASK				(0xff << 20)
+#define		TEMP_SEL_SHIFT				20
+#define	CG_MULT_THERMAL_STATUS				0xC0300014
+#define		ASIC_MAX_TEMP(x)			((x) << 0)
+#define		ASIC_MAX_TEMP_MASK			0x000001ff
+#define		ASIC_MAX_TEMP_SHIFT			0
+#define		CTF_TEMP(x)				((x) << 9)
+#define		CTF_TEMP_MASK				0x0003fe00
+#define		CTF_TEMP_SHIFT				9
+
+#define	CG_FDO_CTRL0					0xC0300064
+#define		FDO_STATIC_DUTY(x)			((x) << 0)
+#define		FDO_STATIC_DUTY_MASK			0x000000FF
+#define		FDO_STATIC_DUTY_SHIFT			0
+#define	CG_FDO_CTRL1					0xC0300068
+#define		FMAX_DUTY100(x)				((x) << 0)
+#define		FMAX_DUTY100_MASK			0x000000FF
+#define		FMAX_DUTY100_SHIFT			0
+#define	CG_FDO_CTRL2					0xC030006C
+#define		TMIN(x)					((x) << 0)
+#define		TMIN_MASK				0x000000FF
+#define		TMIN_SHIFT				0
+#define		FDO_PWM_MODE(x)				((x) << 11)
+#define		FDO_PWM_MODE_MASK			(7 << 11)
+#define		FDO_PWM_MODE_SHIFT			11
+#define		TACH_PWM_RESP_RATE(x)			((x) << 25)
+#define		TACH_PWM_RESP_RATE_MASK			(0x7f << 25)
+#define		TACH_PWM_RESP_RATE_SHIFT		25
+#define CG_TACH_CTRL                                    0xC0300070
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0xC0300074
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
+#define CG_ECLK_CNTL                                    0xC05000AC
+#       define ECLK_DIVIDER_MASK                        0x7f
+#       define ECLK_DIR_CNTL_EN                         (1 << 8)
+#define CG_ECLK_STATUS                                  0xC05000B0
+#       define ECLK_STATUS                              (1 << 0)
+
+#define	CG_SPLL_FUNC_CNTL				0xC0500140
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_PWRON				(1 << 1)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 5)
+#define		SPLL_REF_DIV_MASK			(0x3f << 5)
+#define		SPLL_PDIV_A(x)				((x) << 20)
+#define		SPLL_PDIV_A_MASK			(0x7f << 20)
+#define		SPLL_PDIV_A_SHIFT			20
+#define	CG_SPLL_FUNC_CNTL_2				0xC0500144
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define	CG_SPLL_FUNC_CNTL_3				0xC0500148
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_FB_DIV_SHIFT			0
+#define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_FUNC_CNTL_4				0xC050014C
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0xC0500164
+#define		SSEN					(1 << 0)
+#define		CLK_S(x)				((x) << 4)
+#define		CLK_S_MASK				(0xfff << 4)
+#define		CLK_S_SHIFT				4
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0xC0500168
+#define		CLK_V(x)				((x) << 0)
+#define		CLK_V_MASK				(0x3ffffff << 0)
+#define		CLK_V_SHIFT				0
+
+#define	MPLL_BYPASSCLK_SEL				0xC050019C
+#	define MPLL_CLKOUT_SEL(x)			((x) << 8)
+#	define MPLL_CLKOUT_SEL_MASK			0xFF00
+#define CG_CLKPIN_CNTL                                    0xC05001A0
+#       define XTALIN_DIVIDE                              (1 << 1)
+#       define BCLK_AS_XCLK                               (1 << 2)
+#define CG_CLKPIN_CNTL_2                                  0xC05001A4
+#       define FORCE_BIF_REFCLK_EN                        (1 << 3)
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+#define	THM_CLK_CNTL					0xC05001A8
+#	define CMON_CLK_SEL(x)				((x) << 0)
+#	define CMON_CLK_SEL_MASK			0xFF
+#	define TMON_CLK_SEL(x)				((x) << 8)
+#	define TMON_CLK_SEL_MASK			0xFF00
+#define	MISC_CLK_CTRL					0xC05001AC
+#	define DEEP_SLEEP_CLK_SEL(x)			((x) << 0)
+#	define DEEP_SLEEP_CLK_SEL_MASK			0xFF
+#	define ZCLK_SEL(x)				((x) << 8)
+#	define ZCLK_SEL_MASK				0xFF00
+
+/* KV/KB */
+#define	CG_THERMAL_INT_CTRL				0xC2100028
+#define		DIG_THERM_INTH(x)			((x) << 0)
+#define		DIG_THERM_INTH_MASK			0x000000FF
+#define		DIG_THERM_INTH_SHIFT			0
+#define		DIG_THERM_INTL(x)			((x) << 8)
+#define		DIG_THERM_INTL_MASK			0x0000FF00
+#define		DIG_THERM_INTL_SHIFT			8
+#define 	THERM_INTH_MASK				(1 << 24)
+#define 	THERM_INTL_MASK				(1 << 25)
+
+/* PCIE registers idx/data 0x38/0x3c */
+#define PB0_PIF_PWRDOWN_0                                 0x1100012 /* PCIE */
+#       define PLL_POWER_STATE_IN_TXS2_0(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_0(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_SHIFT             10
+#       define PLL_RAMP_UP_TIME_0(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_0_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_0_SHIFT                   24
+#define PB0_PIF_PWRDOWN_1                                 0x1100013 /* PCIE */
+#       define PLL_POWER_STATE_IN_TXS2_1(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_1(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_SHIFT             10
+#       define PLL_RAMP_UP_TIME_1(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_1_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_1_SHIFT                   24
+
+#define PCIE_CNTL2                                        0x1001001c /* PCIE */
+#       define SLV_MEM_LS_EN                              (1 << 16)
+#       define SLV_MEM_AGGRESSIVE_LS_EN                   (1 << 17)
+#       define MST_MEM_LS_EN                              (1 << 18)
+#       define REPLAY_MEM_LS_EN                           (1 << 19)
+
+#define PCIE_LC_STATUS1                                   0x1400028 /* PCIE */
+#       define LC_REVERSE_RCVR                            (1 << 0)
+#       define LC_REVERSE_XMIT                            (1 << 1)
+#       define LC_OPERATING_LINK_WIDTH_MASK               (0x7 << 2)
+#       define LC_OPERATING_LINK_WIDTH_SHIFT              2
+#       define LC_DETECTED_LINK_WIDTH_MASK                (0x7 << 5)
+#       define LC_DETECTED_LINK_WIDTH_SHIFT               5
+
+#define PCIE_P_CNTL                                       0x1400040 /* PCIE */
+#       define P_IGNORE_EDB_ERR                           (1 << 6)
+
+#define PB1_PIF_PWRDOWN_0                                 0x2100012 /* PCIE */
+#define PB1_PIF_PWRDOWN_1                                 0x2100013 /* PCIE */
+
+#define PCIE_LC_CNTL                                      0x100100A0 /* PCIE */
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+
+#define PCIE_LC_LINK_WIDTH_CNTL                           0x100100A2 /* PCIE */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#       define LC_DYN_LANES_PWR_STATE(x)                  ((x) << 21)
+#       define LC_DYN_LANES_PWR_STATE_MASK                (0x3 << 21)
+#       define LC_DYN_LANES_PWR_STATE_SHIFT               21
+#define PCIE_LC_N_FTS_CNTL                                0x100100a3 /* PCIE */
+#       define LC_XMIT_N_FTS(x)                           ((x) << 0)
+#       define LC_XMIT_N_FTS_MASK                         (0xff << 0)
+#       define LC_XMIT_N_FTS_SHIFT                        0
+#       define LC_XMIT_N_FTS_OVERRIDE_EN                  (1 << 8)
+#       define LC_N_FTS_MASK                              (0xff << 24)
+#define PCIE_LC_SPEED_CNTL                                0x100100A4 /* PCIE */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_GEN3_EN_STRAP                           (1 << 1)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 2)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_MASK         (0x3 << 3)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT        3
+#       define LC_FORCE_EN_SW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_SW_SPEED_CHANGE               (1 << 6)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 7)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 8)
+#       define LC_INITIATE_LINK_SPEED_CHANGE              (1 << 9)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 10)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     10
+#       define LC_CURRENT_DATA_RATE_MASK                  (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+#       define LC_CURRENT_DATA_RATE_SHIFT                 13
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 16)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 18)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 19)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN3               (1 << 20)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN3                (1 << 21)
+
+#define PCIE_LC_CNTL2                                     0x100100B1 /* PCIE */
+#       define LC_ALLOW_PDWN_IN_L1                        (1 << 17)
+#       define LC_ALLOW_PDWN_IN_L23                       (1 << 18)
+
+#define PCIE_LC_CNTL3                                     0x100100B5 /* PCIE */
+#       define LC_GO_TO_RECOVERY                          (1 << 30)
+#define PCIE_LC_CNTL4                                     0x100100B6 /* PCIE */
+#       define LC_REDO_EQ                                 (1 << 5)
+#       define LC_SET_QUIESCE                             (1 << 13)
+
+/* direct registers */
+#define PCIE_INDEX  					0x38
+#define PCIE_DATA  					0x3C
+
+#define SMC_IND_INDEX_0  				0x200
+#define SMC_IND_DATA_0  				0x204
+
+#define SMC_IND_ACCESS_CNTL  				0x240
+#define		AUTO_INCREMENT_IND_0			(1 << 0)
+
+#define SMC_MESSAGE_0  					0x250
+#define		SMC_MSG_MASK				0xffff
+#define SMC_RESP_0  					0x254
+#define		SMC_RESP_MASK				0xffff
+
+#define SMC_MSG_ARG_0  					0x290
+
+#define VGA_HDP_CONTROL  				0x328
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+
+#define DMIF_ADDR_CALC  				0xC00
+
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0ca0
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define	SRBM_GFX_CNTL				        0xE44
+#define		PIPEID(x)					((x) << 0)
+#define		MEID(x)						((x) << 2)
+#define		VMID(x)						((x) << 4)
+#define		QUEUEID(x)					((x) << 8)
+
+#define	SRBM_STATUS2				        0xE4C
+#define		SDMA_BUSY 				(1 << 5)
+#define		SDMA1_BUSY 				(1 << 6)
+#define	SRBM_STATUS				        0xE50
+#define		UVD_RQ_PENDING 				(1 << 1)
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		IH_BUSY 				(1 << 17)
+#define		UVD_BUSY 				(1 << 19)
+
+#define	SRBM_SOFT_RESET				        0xE60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_R0PLL			(1 << 4)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_SDMA1			(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_SDMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB		       	(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+#define		SOFT_RESET_VCE				(1 << 24)
+
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)		((x) << 2)
+#define		L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)		((x) << 4)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 19)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define		INVALIDATE_CACHE_MODE(x)			((x) << 26)
+#define			INVALIDATE_PTE_AND_PDE_CACHES		0
+#define			INVALIDATE_ONLY_PTE_CACHES		1
+#define			INVALIDATE_ONLY_PDE_CACHES		2
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		L2_CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define		PAGE_TABLE_BLOCK_SIZE(x)			(((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT0_CNTL2				0x1430
+#define VM_CONTEXT1_CNTL2				0x1434
+#define	VM_CONTEXT8_PAGE_TABLE_BASE_ADDR		0x1438
+#define	VM_CONTEXT9_PAGE_TABLE_BASE_ADDR		0x143c
+#define	VM_CONTEXT10_PAGE_TABLE_BASE_ADDR		0x1440
+#define	VM_CONTEXT11_PAGE_TABLE_BASE_ADDR		0x1444
+#define	VM_CONTEXT12_PAGE_TABLE_BASE_ADDR		0x1448
+#define	VM_CONTEXT13_PAGE_TABLE_BASE_ADDR		0x144c
+#define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x1450
+#define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x1454
+
+#define VM_INVALIDATE_REQUEST				0x1478
+#define VM_INVALIDATE_RESPONSE				0x147c
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+#define		PROTECTIONS_MASK			(0xf << 0)
+#define		PROTECTIONS_SHIFT			0
+		/* bit 0: range
+		 * bit 1: pde0
+		 * bit 2: valid
+		 * bit 3: read
+		 * bit 4: write
+		 */
+#define		MEMORY_CLIENT_ID_MASK			(0xff << 12)
+#define		HAWAII_MEMORY_CLIENT_ID_MASK		(0x1ff << 12)
+#define		MEMORY_CLIENT_ID_SHIFT			12
+#define		MEMORY_CLIENT_RW_MASK			(1 << 24)
+#define		MEMORY_CLIENT_RW_SHIFT			24
+#define		FAULT_VMID_MASK				(0xf << 25)
+#define		FAULT_VMID_SHIFT			25
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT		0x14E4
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
+
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153c
+#define	VM_CONTEXT1_PAGE_TABLE_BASE_ADDR		0x1540
+#define	VM_CONTEXT2_PAGE_TABLE_BASE_ADDR		0x1544
+#define	VM_CONTEXT3_PAGE_TABLE_BASE_ADDR		0x1548
+#define	VM_CONTEXT4_PAGE_TABLE_BASE_ADDR		0x154c
+#define	VM_CONTEXT5_PAGE_TABLE_BASE_ADDR		0x1550
+#define	VM_CONTEXT6_PAGE_TABLE_BASE_ADDR		0x1554
+#define	VM_CONTEXT7_PAGE_TABLE_BASE_ADDR		0x1558
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155c
+#define	VM_CONTEXT1_PAGE_TABLE_START_ADDR		0x1560
+
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT1_PAGE_TABLE_END_ADDR			0x1580
+
+#define VM_L2_CG           				0x15c0
+#define		MC_CG_ENABLE				(1 << 18)
+#define		MC_LS_ENABLE				(1 << 19)
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x0000f000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define CHUB_CONTROL					0x1864
+#define		BYPASS_VM					(1 << 0)
+
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+
+#define	MC_VM_MX_L1_TLB_CNTL				0x2064
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+#define	MC_VM_FB_OFFSET					0x2068
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+
+#define MC_HUB_MISC_HUB_CG           			0x20b8
+#define MC_HUB_MISC_VM_CG           			0x20bc
+
+#define MC_HUB_MISC_SIP_CG           			0x20c0
+
+#define MC_XPB_CLK_GAT           			0x2478
+
+#define MC_CITF_MISC_RD_CG           			0x2648
+#define MC_CITF_MISC_WR_CG           			0x264c
+#define MC_CITF_MISC_VM_CG           			0x2650
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		NOOFGROUPS_SHIFT				12
+#define		NOOFGROUPS_MASK					0x00001000
+
+#define	MC_ARB_DRAM_TIMING				0x2774
+#define	MC_ARB_DRAM_TIMING2				0x2778
+
+#define MC_ARB_BURST_TIME                               0x2808
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0x1f << 0)
+#define		STATE0_SHIFT				0
+#define		STATE1(x)				((x) << 5)
+#define		STATE1_MASK				(0x1f << 5)
+#define		STATE1_SHIFT				5
+#define		STATE2(x)				((x) << 10)
+#define		STATE2_MASK				(0x1f << 10)
+#define		STATE2_SHIFT				10
+#define		STATE3(x)				((x) << 15)
+#define		STATE3_MASK				(0x1f << 15)
+#define		STATE3_SHIFT				15
+
+#define MC_SEQ_RAS_TIMING                               0x28a0
+#define MC_SEQ_CAS_TIMING                               0x28a4
+#define MC_SEQ_MISC_TIMING                              0x28a8
+#define MC_SEQ_MISC_TIMING2                             0x28ac
+#define MC_SEQ_PMG_TIMING                               0x28b0
+#define MC_SEQ_RD_CTL_D0                                0x28b4
+#define MC_SEQ_RD_CTL_D1                                0x28b8
+#define MC_SEQ_WR_CTL_D0                                0x28bc
+#define MC_SEQ_WR_CTL_D1                                0x28c0
+
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+#define MC_PMG_AUTO_CMD           			0x28d0
+
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x28e8
+#define		TRAIN_DONE_D0      			(1 << 30)
+#define		TRAIN_DONE_D1      			(1 << 31)
+
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+
+#define MC_SEQ_MISC0           				0x2a00
+#define 	MC_SEQ_MISC0_VEN_ID_SHIFT               8
+#define 	MC_SEQ_MISC0_VEN_ID_MASK                0x00000f00
+#define 	MC_SEQ_MISC0_VEN_ID_VALUE               3
+#define 	MC_SEQ_MISC0_REV_ID_SHIFT               12
+#define 	MC_SEQ_MISC0_REV_ID_MASK                0x0000f000
+#define 	MC_SEQ_MISC0_REV_ID_VALUE               1
+#define 	MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define 	MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define 	MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0x2a04
+#define MC_SEQ_RESERVE_M                                0x2a08
+#define MC_PMG_CMD_EMRS                                 0x2a0c
+
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#define MC_SEQ_MISC5                                    0x2a54
+#define MC_SEQ_MISC6                                    0x2a58
+
+#define MC_SEQ_MISC7                                    0x2a64
+
+#define MC_SEQ_RAS_TIMING_LP                            0x2a6c
+#define MC_SEQ_CAS_TIMING_LP                            0x2a70
+#define MC_SEQ_MISC_TIMING_LP                           0x2a74
+#define MC_SEQ_MISC_TIMING2_LP                          0x2a78
+#define MC_SEQ_WR_CTL_D0_LP                             0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP                             0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP                           0x2a88
+
+#define MC_PMG_CMD_MRS                                  0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP                             0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP                             0x2b20
+
+#define MC_PMG_CMD_MRS1                                 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0x2b48
+#define MC_SEQ_PMG_TIMING_LP                            0x2b4c
+
+#define MC_SEQ_WR_CTL_2                                 0x2b54
+#define MC_SEQ_WR_CTL_2_LP                              0x2b58
+#define MC_PMG_CMD_MRS2                                 0x2b5c
+#define MC_SEQ_PMG_CMD_MRS2_LP                          0x2b60
+
+#define	MCLK_PWRMGT_CNTL				0x2ba0
+#       define DLL_SPEED(x)				((x) << 0)
+#       define DLL_SPEED_MASK				(0x1f << 0)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCK0_PDNB                              (1 << 8)
+#       define MRDCK1_PDNB                              (1 << 9)
+#       define MRDCK0_RESET                             (1 << 16)
+#       define MRDCK1_RESET                             (1 << 17)
+#       define DLL_READY_READ                           (1 << 24)
+#define	DLL_CNTL					0x2ba4
+#       define MRDCK0_BYPASS                            (1 << 24)
+#       define MRDCK1_BYPASS                            (1 << 25)
+
+#define	MPLL_FUNC_CNTL					0x2bb4
+#define		BWCTRL(x)				((x) << 20)
+#define		BWCTRL_MASK				(0xff << 20)
+#define	MPLL_FUNC_CNTL_1				0x2bb8
+#define		VCO_MODE(x)				((x) << 0)
+#define		VCO_MODE_MASK				(3 << 0)
+#define		CLKFRAC(x)				((x) << 4)
+#define		CLKFRAC_MASK				(0xfff << 4)
+#define		CLKF(x)					((x) << 16)
+#define		CLKF_MASK				(0xfff << 16)
+#define	MPLL_FUNC_CNTL_2				0x2bbc
+#define	MPLL_AD_FUNC_CNTL				0x2bc0
+#define		YCLK_POST_DIV(x)			((x) << 0)
+#define		YCLK_POST_DIV_MASK			(7 << 0)
+#define	MPLL_DQ_FUNC_CNTL				0x2bc4
+#define		YCLK_SEL(x)				((x) << 4)
+#define		YCLK_SEL_MASK				(1 << 4)
+
+#define	MPLL_SS1					0x2bcc
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	MPLL_SS2					0x2bd0
+#define		CLKS(x)					((x) << 0)
+#define		CLKS_MASK				(0xfff << 0)
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define 	CLOCK_GATING_DIS			(1 << 23)
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL					0x2F4C
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+#define HDP_MEM_POWER_LS				0x2F50
+#define 	HDP_LS_ENABLE				(1 << 0)
+
+#define ATC_MISC_CG           				0x3350
+
+#define GMCON_RENG_EXECUTE				0x3508
+#define 	RENG_EXECUTE_ON_PWR_UP			(1 << 0)
+#define GMCON_MISC					0x350c
+#define 	RENG_EXECUTE_ON_REG_UPDATE		(1 << 11)
+#define 	STCTRL_STUTTER_EN			(1 << 16)
+
+#define GMCON_PGFSM_CONFIG				0x3538
+#define GMCON_PGFSM_WRITE				0x353c
+#define GMCON_PGFSM_READ				0x3540
+#define GMCON_MISC3					0x3544
+
+#define MC_SEQ_CNTL_3                                     0x3600
+#       define CAC_EN                                     (1 << 31)
+#define MC_SEQ_G5PDX_CTRL                                 0x3604
+#define MC_SEQ_G5PDX_CTRL_LP                              0x3608
+#define MC_SEQ_G5PDX_CMD0                                 0x360c
+#define MC_SEQ_G5PDX_CMD0_LP                              0x3610
+#define MC_SEQ_G5PDX_CMD1                                 0x3614
+#define MC_SEQ_G5PDX_CMD1_LP                              0x3618
+
+#define MC_SEQ_PMG_DVS_CTL                                0x3628
+#define MC_SEQ_PMG_DVS_CTL_LP                             0x362c
+#define MC_SEQ_PMG_DVS_CMD                                0x3630
+#define MC_SEQ_PMG_DVS_CMD_LP                             0x3634
+#define MC_SEQ_DLL_STBY                                   0x3638
+#define MC_SEQ_DLL_STBY_LP                                0x363c
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_RB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define	BIF_LNCNT_RESET					0x5220
+#       define RESET_LNCNT_EN                           (1 << 0)
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+
+#define GPU_HDP_FLUSH_REQ				0x54DC
+#define GPU_HDP_FLUSH_DONE				0x54E0
+#define		CP0					(1 << 0)
+#define		CP1					(1 << 1)
+#define		CP2					(1 << 2)
+#define		CP3					(1 << 3)
+#define		CP4					(1 << 4)
+#define		CP5					(1 << 5)
+#define		CP6					(1 << 6)
+#define		CP7					(1 << 7)
+#define		CP8					(1 << 8)
+#define		CP9					(1 << 9)
+#define		SDMA0					(1 << 10)
+#define		SDMA1					(1 << 11)
+
+/* 0x6b04, 0x7704, 0x10304, 0x10f04, 0x11b04, 0x12704 */
+#define	LB_MEMORY_CTRL					0x6b04
+#define		LB_MEMORY_SIZE(x)			((x) << 0)
+#define		LB_MEMORY_CONFIG(x)			((x) << 20)
+
+#define	DPG_WATERMARK_MASK_CONTROL			0x6cc8
+#       define LATENCY_WATERMARK_MASK(x)		((x) << 8)
+#define	DPG_PIPE_LATENCY_CONTROL			0x6ccc
+#       define LATENCY_LOW_WATERMARK(x)			((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)		((x) << 16)
+
+/* 0x6b24, 0x7724, 0x10324, 0x10f24, 0x11b24, 0x12724 */
+#define LB_VLINE_STATUS                                 0x6b24
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6b2c, 0x772c, 0x1032c, 0x10f2c, 0x11b2c, 0x1272c */
+#define LB_VBLANK_STATUS                                0x6b2c
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b20, 0x7720, 0x10320, 0x10f20, 0x11b20, 0x12720 */
+#define LB_INTERRUPT_MASK                               0x6b20
+#       define VBLANK_INTERRUPT_MASK                    (1 << 0)
+#       define VLINE_INTERRUPT_MASK                     (1 << 4)
+#       define VLINE2_INTERRUPT_MASK                    (1 << 8)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE6                 0x6780
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL                                0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DAC_AUTODETECT_INT_CONTROL			0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_SENSE_DELAYED                    (1 << 4)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL                          0x6cd4
+#       define STUTTER_ENABLE                             (1 << 0)
+
+/* DCE8 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL                 0x6fb4
+#       define FMT_DYNAMIC_EXP_EN            (1 << 0)
+#       define FMT_DYNAMIC_EXP_MODE          (1 << 4)
+        /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL                          0x6fb8
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6fc8
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_MODE             (1 << 1)
+#       define FMT_TRUNCATE_DEPTH(x)         ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH(x)   ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH(x)  ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x6fe4
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+
+#define	GRBM_STATUS2					0x8008
+#define		ME0PIPE1_CMDFIFO_AVAIL_MASK			0x0000000F
+#define		ME0PIPE1_CF_RQ_PENDING				(1 << 4)
+#define		ME0PIPE1_PF_RQ_PENDING				(1 << 5)
+#define		ME1PIPE0_RQ_PENDING				(1 << 6)
+#define		ME1PIPE1_RQ_PENDING				(1 << 7)
+#define		ME1PIPE2_RQ_PENDING				(1 << 8)
+#define		ME1PIPE3_RQ_PENDING				(1 << 9)
+#define		ME2PIPE0_RQ_PENDING				(1 << 10)
+#define		ME2PIPE1_RQ_PENDING				(1 << 11)
+#define		ME2PIPE2_RQ_PENDING				(1 << 12)
+#define		ME2PIPE3_RQ_PENDING				(1 << 13)
+#define		RLC_RQ_PENDING 					(1 << 14)
+#define		RLC_BUSY 					(1 << 24)
+#define		TC_BUSY 					(1 << 25)
+#define		CPF_BUSY 					(1 << 28)
+#define		CPC_BUSY 					(1 << 29)
+#define		CPG_BUSY 					(1 << 30)
+
+#define	GRBM_STATUS					0x8010
+#define		ME0PIPE0_CMDFIFO_AVAIL_MASK			0x0000000F
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		ME0PIPE0_CF_RQ_PENDING				(1 << 7)
+#define		ME0PIPE0_PF_RQ_PENDING				(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		WD_BUSY_NO_DMA 					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		WD_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		BCI_BUSY					(1 << 23)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define	GRBM_STATUS_SE2					0x8038
+#define	GRBM_STATUS_SE3					0x803C
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_BCI_BUSY					(1 << 22)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)  /* All CP blocks */
+#define		SOFT_RESET_RLC					(1 << 2)  /* RLC */
+#define		SOFT_RESET_GFX					(1 << 16) /* GFX */
+#define		SOFT_RESET_CPF					(1 << 17) /* CP fetcher shared by gfx and compute */
+#define		SOFT_RESET_CPC					(1 << 18) /* CP Compute (MEC1/2) */
+#define		SOFT_RESET_CPG					(1 << 19) /* CP GFX (PFP, ME, CE) */
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define CP_CPC_STATUS					0x8210
+#define CP_CPC_BUSY_STAT				0x8214
+#define CP_CPC_STALLED_STAT1				0x8218
+#define CP_CPF_STATUS					0x821c
+#define CP_CPF_BUSY_STAT				0x8220
+#define CP_CPF_STALLED_STAT1				0x8224
+
+#define CP_MEC_CNTL					0x8234
+#define		MEC_ME2_HALT					(1 << 28)
+#define		MEC_ME1_HALT					(1 << 30)
+
+#define CP_MEC_CNTL					0x8234
+#define		MEC_ME2_HALT					(1 << 28)
+#define		MEC_ME1_HALT					(1 << 30)
+
+#define CP_STALLED_STAT3				0x8670
+#define CP_STALLED_STAT1				0x8674
+#define CP_STALLED_STAT2				0x8678
+
+#define CP_STAT						0x8680
+
+#define CP_ME_CNTL					0x86D8
+#define		CP_CE_HALT					(1 << 24)
+#define		CP_PFP_HALT					(1 << 26)
+#define		CP_ME_HALT					(1 << 28)
+
+#define	CP_RB0_RPTR					0x8700
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_RB_WPTR_POLL_CNTL				0x8708
+#define		IDLE_POLL_COUNT(x)			((x) << 16)
+#define		IDLE_POLL_COUNT_MASK			(0xffff << 16)
+
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+
+#define	VGT_VTX_VECT_EJECT_REG				0x88B0
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+
+#define CC_GC_SHADER_ARRAY_CONFIG			0x89bc
+#define		INACTIVE_CUS_MASK			0xFFFF0000
+#define		INACTIVE_CUS_SHIFT			16
+#define GC_USER_SHADER_ARRAY_CONFIG			0x89c0
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_FRONTEND_PRIM_FIFO_SIZE(x)			((x) << 0)
+#define		SC_BACKEND_PRIM_FIFO_SIZE(x)			((x) << 6)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 15)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 23)
+
+#define	PA_SC_ENHANCE					0x8BF0
+#define		ENABLE_PA_SC_OUT_OF_ORDER			(1 << 0)
+#define		DISABLE_PA_SC_GUIDANCE				(1 << 13)
+
+#define	SQ_CONFIG					0x8C00
+
+#define	SH_MEM_BASES					0x8C28
+/* if PTR32, these are the bases for scratch and lds */
+#define		PRIVATE_BASE(x)					((x) << 0) /* scratch */
+#define		SHARED_BASE(x)					((x) << 16) /* LDS */
+#define	SH_MEM_APE1_BASE				0x8C2C
+/* if PTR32, this is the base location of GPUVM */
+#define	SH_MEM_APE1_LIMIT				0x8C30
+/* if PTR32, this is the upper limit of GPUVM */
+#define	SH_MEM_CONFIG					0x8C34
+#define		PTR32						(1 << 0)
+#define		ALIGNMENT_MODE(x)				((x) << 2)
+#define			SH_MEM_ALIGNMENT_MODE_DWORD			0
+#define			SH_MEM_ALIGNMENT_MODE_DWORD_STRICT		1
+#define			SH_MEM_ALIGNMENT_MODE_STRICT			2
+#define			SH_MEM_ALIGNMENT_MODE_UNALIGNED			3
+#define		DEFAULT_MTYPE(x)				((x) << 4)
+#define		APE1_MTYPE(x)					((x) << 7)
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+#define	MTYPE_CACHED					0
+#define	MTYPE_NONCACHED					3
+
+#define	SX_DEBUG_1					0x9060
+
+#define	SPI_CONFIG_CNTL					0x9100
+
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+
+#define	TA_CNTL_AUX					0x9508
+
+#define DB_DEBUG					0x9830
+#define DB_DEBUG2					0x9834
+#define DB_DEBUG3					0x9838
+
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+
+#define	GB_TILE_MODE0					0x9910
+#       define ARRAY_MODE(x)					((x) << 2)
+#              define	ARRAY_LINEAR_GENERAL			0
+#              define	ARRAY_LINEAR_ALIGNED			1
+#              define	ARRAY_1D_TILED_THIN1			2
+#              define	ARRAY_2D_TILED_THIN1			4
+#              define	ARRAY_PRT_TILED_THIN1			5
+#              define	ARRAY_PRT_2D_TILED_THIN1		6
+#       define PIPE_CONFIG(x)					((x) << 6)
+#              define	ADDR_SURF_P2				0
+#              define	ADDR_SURF_P4_8x16			4
+#              define	ADDR_SURF_P4_16x16			5
+#              define	ADDR_SURF_P4_16x32			6
+#              define	ADDR_SURF_P4_32x32			7
+#              define	ADDR_SURF_P8_16x16_8x16			8
+#              define	ADDR_SURF_P8_16x32_8x16			9
+#              define	ADDR_SURF_P8_32x32_8x16			10
+#              define	ADDR_SURF_P8_16x32_16x16		11
+#              define	ADDR_SURF_P8_32x32_16x16		12
+#              define	ADDR_SURF_P8_32x32_16x32		13
+#              define	ADDR_SURF_P8_32x64_32x32		14
+#              define	ADDR_SURF_P16_32x32_8x16		16
+#              define	ADDR_SURF_P16_32x32_16x16		17
+#       define TILE_SPLIT(x)					((x) << 11)
+#              define	ADDR_SURF_TILE_SPLIT_64B		0
+#              define	ADDR_SURF_TILE_SPLIT_128B		1
+#              define	ADDR_SURF_TILE_SPLIT_256B		2
+#              define	ADDR_SURF_TILE_SPLIT_512B		3
+#              define	ADDR_SURF_TILE_SPLIT_1KB		4
+#              define	ADDR_SURF_TILE_SPLIT_2KB		5
+#              define	ADDR_SURF_TILE_SPLIT_4KB		6
+#       define MICRO_TILE_MODE_NEW(x)				((x) << 22)
+#              define	ADDR_SURF_DISPLAY_MICRO_TILING		0
+#              define	ADDR_SURF_THIN_MICRO_TILING		1
+#              define	ADDR_SURF_DEPTH_MICRO_TILING		2
+#              define	ADDR_SURF_ROTATED_MICRO_TILING		3
+#       define SAMPLE_SPLIT(x)					((x) << 25)
+#              define	ADDR_SURF_SAMPLE_SPLIT_1		0
+#              define	ADDR_SURF_SAMPLE_SPLIT_2		1
+#              define	ADDR_SURF_SAMPLE_SPLIT_4		2
+#              define	ADDR_SURF_SAMPLE_SPLIT_8		3
+
+#define	GB_MACROTILE_MODE0					0x9990
+#       define BANK_WIDTH(x)					((x) << 0)
+#              define	ADDR_SURF_BANK_WIDTH_1			0
+#              define	ADDR_SURF_BANK_WIDTH_2			1
+#              define	ADDR_SURF_BANK_WIDTH_4			2
+#              define	ADDR_SURF_BANK_WIDTH_8			3
+#       define BANK_HEIGHT(x)					((x) << 2)
+#              define	ADDR_SURF_BANK_HEIGHT_1			0
+#              define	ADDR_SURF_BANK_HEIGHT_2			1
+#              define	ADDR_SURF_BANK_HEIGHT_4			2
+#              define	ADDR_SURF_BANK_HEIGHT_8			3
+#       define MACRO_TILE_ASPECT(x)				((x) << 4)
+#              define	ADDR_SURF_MACRO_ASPECT_1		0
+#              define	ADDR_SURF_MACRO_ASPECT_2		1
+#              define	ADDR_SURF_MACRO_ASPECT_4		2
+#              define	ADDR_SURF_MACRO_ASPECT_8		3
+#       define NUM_BANKS(x)					((x) << 6)
+#              define	ADDR_SURF_2_BANK			0
+#              define	ADDR_SURF_4_BANK			1
+#              define	ADDR_SURF_8_BANK			2
+#              define	ADDR_SURF_16_BANK			3
+
+#define	CB_HW_CONTROL					0x9A10
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	TCP_CHAN_STEER_LO				0xac0c
+#define	TCP_CHAN_STEER_HI				0xac10
+
+#define	TC_CFG_L1_LOAD_POLICY0				0xAC68
+#define	TC_CFG_L1_LOAD_POLICY1				0xAC6C
+#define	TC_CFG_L1_STORE_POLICY				0xAC70
+#define	TC_CFG_L2_LOAD_POLICY0				0xAC74
+#define	TC_CFG_L2_LOAD_POLICY1				0xAC78
+#define	TC_CFG_L2_STORE_POLICY0				0xAC7C
+#define	TC_CFG_L2_STORE_POLICY1				0xAC80
+#define	TC_CFG_L2_ATOMIC_POLICY				0xAC84
+#define	TC_CFG_L1_VOLATILE				0xAC88
+#define	TC_CFG_L2_VOLATILE				0xAC8C
+
+#define	CP_RB0_BASE					0xC100
+#define	CP_RB0_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+
+#define	CP_RB0_RPTR_ADDR				0xC10C
+#define		RB_RPTR_SWAP_32BIT				(2 << 0)
+#define	CP_RB0_RPTR_ADDR_HI				0xC110
+#define	CP_RB0_WPTR					0xC114
+
+#define	CP_DEVICE_ID					0xC12C
+#define	CP_ENDIAN_SWAP					0xC140
+#define	CP_RB_VMID					0xC144
+
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define	CP_ME_RAM_DATA					0xC160
+
+#define	CP_CE_UCODE_ADDR				0xC168
+#define	CP_CE_UCODE_DATA				0xC16C
+#define	CP_MEC_ME1_UCODE_ADDR				0xC170
+#define	CP_MEC_ME1_UCODE_DATA				0xC174
+#define	CP_MEC_ME2_UCODE_ADDR				0xC178
+#define	CP_MEC_ME2_UCODE_DATA				0xC17C
+
+#define CP_INT_CNTL_RING0                               0xC1A8
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define PRIV_INSTR_INT_ENABLE                    (1 << 22)
+#       define PRIV_REG_INT_ENABLE                      (1 << 23)
+#       define OPCODE_ERROR_INT_ENABLE                  (1 << 24)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
+
+#define CP_INT_STATUS_RING0                             0xC1B4
+#       define PRIV_INSTR_INT_STAT                      (1 << 22)
+#       define PRIV_REG_INT_STAT                        (1 << 23)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1 << 31)
+
+#define CP_MEM_SLP_CNTL                                 0xC1E4
+#       define CP_MEM_LS_EN                             (1 << 0)
+
+#define CP_CPF_DEBUG                                    0xC200
+
+#define CP_PQ_WPTR_POLL_CNTL                            0xC20C
+#define		WPTR_POLL_EN      			(1 << 31)
+
+#define CP_ME1_PIPE0_INT_CNTL                           0xC214
+#define CP_ME1_PIPE1_INT_CNTL                           0xC218
+#define CP_ME1_PIPE2_INT_CNTL                           0xC21C
+#define CP_ME1_PIPE3_INT_CNTL                           0xC220
+#define CP_ME2_PIPE0_INT_CNTL                           0xC224
+#define CP_ME2_PIPE1_INT_CNTL                           0xC228
+#define CP_ME2_PIPE2_INT_CNTL                           0xC22C
+#define CP_ME2_PIPE3_INT_CNTL                           0xC230
+#       define DEQUEUE_REQUEST_INT_ENABLE               (1 << 13)
+#       define WRM_POLL_TIMEOUT_INT_ENABLE              (1 << 17)
+#       define PRIV_REG_INT_ENABLE                      (1 << 23)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define GENERIC2_INT_ENABLE                      (1 << 29)
+#       define GENERIC1_INT_ENABLE                      (1 << 30)
+#       define GENERIC0_INT_ENABLE                      (1 << 31)
+#define CP_ME1_PIPE0_INT_STATUS                         0xC214
+#define CP_ME1_PIPE1_INT_STATUS                         0xC218
+#define CP_ME1_PIPE2_INT_STATUS                         0xC21C
+#define CP_ME1_PIPE3_INT_STATUS                         0xC220
+#define CP_ME2_PIPE0_INT_STATUS                         0xC224
+#define CP_ME2_PIPE1_INT_STATUS                         0xC228
+#define CP_ME2_PIPE2_INT_STATUS                         0xC22C
+#define CP_ME2_PIPE3_INT_STATUS                         0xC230
+#       define DEQUEUE_REQUEST_INT_STATUS               (1 << 13)
+#       define WRM_POLL_TIMEOUT_INT_STATUS              (1 << 17)
+#       define PRIV_REG_INT_STATUS                      (1 << 23)
+#       define TIME_STAMP_INT_STATUS                    (1 << 26)
+#       define GENERIC2_INT_STATUS                      (1 << 29)
+#       define GENERIC1_INT_STATUS                      (1 << 30)
+#       define GENERIC0_INT_STATUS                      (1 << 31)
+
+#define	CP_MAX_CONTEXT					0xC2B8
+
+#define	CP_RB0_BASE_HI					0xC2C4
+
+#define RLC_CNTL                                          0xC300
+#       define RLC_ENABLE                                 (1 << 0)
+
+#define RLC_MC_CNTL                                       0xC30C
+
+#define RLC_MEM_SLP_CNTL                                  0xC318
+#       define RLC_MEM_LS_EN                              (1 << 0)
+
+#define RLC_LB_CNTR_MAX                                   0xC348
+
+#define RLC_LB_CNTL                                       0xC364
+#       define LOAD_BALANCE_ENABLE                        (1 << 0)
+
+#define RLC_LB_CNTR_INIT                                  0xC36C
+
+#define RLC_SAVE_AND_RESTORE_BASE                         0xC374
+#define RLC_DRIVER_DMA_STATUS                             0xC378 /* dGPU */
+#define RLC_CP_TABLE_RESTORE                              0xC378 /* APU */
+#define RLC_PG_DELAY_2                                    0xC37C
+
+#define RLC_GPM_UCODE_ADDR                                0xC388
+#define RLC_GPM_UCODE_DATA                                0xC38C
+#define RLC_GPU_CLOCK_COUNT_LSB                           0xC390
+#define RLC_GPU_CLOCK_COUNT_MSB                           0xC394
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0xC398
+#define RLC_UCODE_CNTL                                    0xC39C
+
+#define RLC_GPM_STAT                                      0xC400
+#       define RLC_GPM_BUSY                               (1 << 0)
+#       define GFX_POWER_STATUS                           (1 << 1)
+#       define GFX_CLOCK_STATUS                           (1 << 2)
+
+#define RLC_PG_CNTL                                       0xC40C
+#       define GFX_PG_ENABLE                              (1 << 0)
+#       define GFX_PG_SRC                                 (1 << 1)
+#       define DYN_PER_CU_PG_ENABLE                       (1 << 2)
+#       define STATIC_PER_CU_PG_ENABLE                    (1 << 3)
+#       define DISABLE_GDS_PG                             (1 << 13)
+#       define DISABLE_CP_PG                              (1 << 15)
+#       define SMU_CLK_SLOWDOWN_ON_PU_ENABLE              (1 << 17)
+#       define SMU_CLK_SLOWDOWN_ON_PD_ENABLE              (1 << 18)
+
+#define RLC_CGTT_MGCG_OVERRIDE                            0xC420
+#define RLC_CGCG_CGLS_CTRL                                0xC424
+#       define CGCG_EN                                    (1 << 0)
+#       define CGLS_EN                                    (1 << 1)
+
+#define RLC_PG_DELAY                                      0xC434
+
+#define RLC_LB_INIT_CU_MASK                               0xC43C
+
+#define RLC_LB_PARAMS                                     0xC444
+
+#define RLC_PG_AO_CU_MASK                                 0xC44C
+
+#define	RLC_MAX_PG_CU					0xC450
+#	define MAX_PU_CU(x)				((x) << 0)
+#	define MAX_PU_CU_MASK				(0xff << 0)
+#define RLC_AUTO_PG_CTRL                                  0xC454
+#       define AUTO_PG_EN                                 (1 << 0)
+#	define GRBM_REG_SGIT(x)				((x) << 3)
+#	define GRBM_REG_SGIT_MASK			(0xffff << 3)
+
+#define RLC_SERDES_WR_CU_MASTER_MASK                      0xC474
+#define RLC_SERDES_WR_NONCU_MASTER_MASK                   0xC478
+#define RLC_SERDES_WR_CTRL                                0xC47C
+#define		BPM_ADDR(x)				((x) << 0)
+#define		BPM_ADDR_MASK      			(0xff << 0)
+#define		CGLS_ENABLE				(1 << 16)
+#define		CGCG_OVERRIDE_0				(1 << 20)
+#define		MGCG_OVERRIDE_0				(1 << 22)
+#define		MGCG_OVERRIDE_1				(1 << 23)
+
+#define RLC_SERDES_CU_MASTER_BUSY                         0xC484
+#define RLC_SERDES_NONCU_MASTER_BUSY                      0xC488
+#       define SE_MASTER_BUSY_MASK                        0x0000ffff
+#       define GC_MASTER_BUSY                             (1 << 16)
+#       define TC0_MASTER_BUSY                            (1 << 17)
+#       define TC1_MASTER_BUSY                            (1 << 18)
+
+#define RLC_GPM_SCRATCH_ADDR                              0xC4B0
+#define RLC_GPM_SCRATCH_DATA                              0xC4B4
+
+#define RLC_GPR_REG2                                      0xC4E8
+#define		REQ      				0x00000001
+#define		MESSAGE(x)      			((x) << 1)
+#define		MESSAGE_MASK      			0x0000001e
+#define		MSG_ENTER_RLC_SAFE_MODE      			1
+#define		MSG_EXIT_RLC_SAFE_MODE      			0
+
+#define CP_HPD_EOP_BASE_ADDR                              0xC904
+#define CP_HPD_EOP_BASE_ADDR_HI                           0xC908
+#define CP_HPD_EOP_VMID                                   0xC90C
+#define CP_HPD_EOP_CONTROL                                0xC910
+#define		EOP_SIZE(x)				((x) << 0)
+#define		EOP_SIZE_MASK      			(0x3f << 0)
+#define CP_MQD_BASE_ADDR                                  0xC914
+#define CP_MQD_BASE_ADDR_HI                               0xC918
+#define CP_HQD_ACTIVE                                     0xC91C
+#define CP_HQD_VMID                                       0xC920
+
+#define CP_HQD_PERSISTENT_STATE				0xC924u
+#define	DEFAULT_CP_HQD_PERSISTENT_STATE			(0x33U << 8)
+
+#define CP_HQD_PIPE_PRIORITY				0xC928u
+#define CP_HQD_QUEUE_PRIORITY				0xC92Cu
+#define CP_HQD_QUANTUM					0xC930u
+#define	QUANTUM_EN					1U
+#define	QUANTUM_SCALE_1MS				(1U << 4)
+#define	QUANTUM_DURATION(x)				((x) << 8)
+
+#define CP_HQD_PQ_BASE                                    0xC934
+#define CP_HQD_PQ_BASE_HI                                 0xC938
+#define CP_HQD_PQ_RPTR                                    0xC93C
+#define CP_HQD_PQ_RPTR_REPORT_ADDR                        0xC940
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI                     0xC944
+#define CP_HQD_PQ_WPTR_POLL_ADDR                          0xC948
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI                       0xC94C
+#define CP_HQD_PQ_DOORBELL_CONTROL                        0xC950
+#define		DOORBELL_OFFSET(x)			((x) << 2)
+#define		DOORBELL_OFFSET_MASK			(0x1fffff << 2)
+#define		DOORBELL_SOURCE      			(1 << 28)
+#define		DOORBELL_SCHD_HIT      			(1 << 29)
+#define		DOORBELL_EN      			(1 << 30)
+#define		DOORBELL_HIT      			(1 << 31)
+#define CP_HQD_PQ_WPTR                                    0xC954
+#define CP_HQD_PQ_CONTROL                                 0xC958
+#define		QUEUE_SIZE(x)				((x) << 0)
+#define		QUEUE_SIZE_MASK      			(0x3f << 0)
+#define		RPTR_BLOCK_SIZE(x)			((x) << 8)
+#define		RPTR_BLOCK_SIZE_MASK			(0x3f << 8)
+#define		PQ_VOLATILE      			(1 << 26)
+#define		NO_UPDATE_RPTR      			(1 << 27)
+#define		UNORD_DISPATCH      			(1 << 28)
+#define		ROQ_PQ_IB_FLIP      			(1 << 29)
+#define		PRIV_STATE      			(1 << 30)
+#define		KMD_QUEUE      				(1 << 31)
+
+#define CP_HQD_IB_BASE_ADDR				0xC95Cu
+#define CP_HQD_IB_BASE_ADDR_HI			0xC960u
+#define CP_HQD_IB_RPTR					0xC964u
+#define CP_HQD_IB_CONTROL				0xC968u
+#define	IB_ATC_EN					(1U << 23)
+#define	DEFAULT_MIN_IB_AVAIL_SIZE			(3U << 20)
+
+#define CP_HQD_DEQUEUE_REQUEST			0xC974
+#define	DEQUEUE_REQUEST_DRAIN				1
+#define DEQUEUE_REQUEST_RESET				2
+
+#define CP_MQD_CONTROL                                  0xC99C
+#define		MQD_VMID(x)				((x) << 0)
+#define		MQD_VMID_MASK      			(0xf << 0)
+
+#define CP_HQD_SEMA_CMD					0xC97Cu
+#define CP_HQD_MSG_TYPE					0xC980u
+#define CP_HQD_ATOMIC0_PREOP_LO			0xC984u
+#define CP_HQD_ATOMIC0_PREOP_HI			0xC988u
+#define CP_HQD_ATOMIC1_PREOP_LO			0xC98Cu
+#define CP_HQD_ATOMIC1_PREOP_HI			0xC990u
+#define CP_HQD_HQ_SCHEDULER0			0xC994u
+#define CP_HQD_HQ_SCHEDULER1			0xC998u
+
+#define SH_STATIC_MEM_CONFIG			0x9604u
+
+#define DB_RENDER_CONTROL                               0x28000
+
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+#define		PKR_MAP(x)				((x) << 8)
+
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SO_VGT_STREAMOUT_FLUSH                   (31 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+#       define PIXEL_PIPE_STAT_CONTROL                  (56 << 0)
+#       define PIXEL_PIPE_STAT_DUMP                     (57 << 0)
+#       define PIXEL_PIPE_STAT_RESET                    (58 << 0)
+
+#define	SCRATCH_REG0					0x30100
+#define	SCRATCH_REG1					0x30104
+#define	SCRATCH_REG2					0x30108
+#define	SCRATCH_REG3					0x3010C
+#define	SCRATCH_REG4					0x30110
+#define	SCRATCH_REG5					0x30114
+#define	SCRATCH_REG6					0x30118
+#define	SCRATCH_REG7					0x3011C
+
+#define	SCRATCH_UMSK					0x30140
+#define	SCRATCH_ADDR					0x30144
+
+#define	CP_SEM_WAIT_TIMER				0x301BC
+
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x301C8
+
+#define	CP_WAIT_REG_MEM_TIMEOUT				0x301D0
+
+#define GRBM_GFX_INDEX          			0x30800
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SH_INDEX(x)     			((x) << 8)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		SH_BROADCAST_WRITES      		(1 << 29)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
+#define	VGT_ESGS_RING_SIZE				0x30900
+#define	VGT_GSVS_RING_SIZE				0x30904
+#define	VGT_PRIMITIVE_TYPE				0x30908
+#define	VGT_INDEX_TYPE					0x3090C
+
+#define	VGT_NUM_INDICES					0x30930
+#define	VGT_NUM_INSTANCES				0x30934
+#define	VGT_TF_RING_SIZE				0x30938
+#define	VGT_HS_OFFCHIP_PARAM				0x3093C
+#define	VGT_TF_MEMORY_BASE				0x30940
+
+#define	PA_SU_LINE_STIPPLE_VALUE			0x30a00
+#define	PA_SC_LINE_STIPPLE_STATE			0x30a04
+
+#define	SQC_CACHES					0x30d20
+
+#define	CP_PERFMON_CNTL					0x36020
+
+#define	CGTS_SM_CTRL_REG				0x3c000
+#define		SM_MODE(x)				((x) << 17)
+#define		SM_MODE_MASK				(0x7 << 17)
+#define		SM_MODE_ENABLE				(1 << 20)
+#define		CGTS_OVERRIDE				(1 << 21)
+#define		CGTS_LS_OVERRIDE			(1 << 22)
+#define		ON_MONITOR_ADD_EN			(1 << 23)
+#define		ON_MONITOR_ADD(x)			((x) << 24)
+#define		ON_MONITOR_ADD_MASK			(0xff << 24)
+
+#define	CGTS_TCC_DISABLE				0x3c00c
+#define	CGTS_USER_TCC_DISABLE				0x3c010
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+
+#define	CB_CGTT_SCLK_CTRL				0x3c2a0
+
+/*
+ * PM4
+ */
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n)	((PACKET_TYPE0 << 30) |				\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define			CE_PARTITION_BASE		3
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_ATOMIC_GDS				0x1D
+#define	PACKET3_ATOMIC_MEM				0x1E
+#define	PACKET3_OCCLUSION_QUERY				0x1F
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER_CONST			0x33
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_PREAMBLE				0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - gl2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_CACHE_POLICY(x)              ((x) << 25)
+                /* 0 - LRU
+		 * 1 - Stream
+		 */
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
+#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#              define PACKET3_SEM_USE_MAILBOX       (0x1 << 16)
+#              define PACKET3_SEM_SEL_SIGNAL_TYPE   (0x1 << 20) /* 0 = increment, 1 = write 1 */
+#              define PACKET3_SEM_CLIENT_CODE	    ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */
+#              define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define		WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#define		WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+		 * 1 - mem
+		 */
+#define		WAIT_REG_MEM_OPERATION(x)               ((x) << 6)
+                /* 0 - wait_reg_mem
+		 * 1 - wr_wait_wr_reg
+		 */
+#define		WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+		 * 1 - pfp
+		 */
+#define	PACKET3_INDIRECT_BUFFER				0x3F
+#define		INDIRECT_BUFFER_TCL2_VOLATILE           (1 << 22)
+#define		INDIRECT_BUFFER_VALID                   (1 << 23)
+#define		INDIRECT_BUFFER_CACHE_POLICY(x)         ((x) << 28)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_TCL1_VOL_ACTION_ENA  (1 << 15)
+#              define PACKET3_TC_VOL_ACTION_ENA    (1 << 16) /* L2 */
+#              define PACKET3_TC_WB_ACTION_ENA     (1 << 18) /* L2 */
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23) /* L2 */
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_*
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - EOP events
+		 * 6 - EOS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		EOP_TCL1_VOL_ACTION_EN                  (1 << 12)
+#define		EOP_TC_VOL_ACTION_EN                    (1 << 13) /* L2 */
+#define		EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
+#define		EOP_TCL1_ACTION_EN                      (1 << 16)
+#define		EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
+#define		EOP_TCL2_VOLATILE                       (1 << 24)
+#define		EOP_CACHE_POLICY(x)                     ((x) << 25)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit GPU counter value
+		 * 4 - send 64bit sys counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define		DST_SEL(x)                              ((x) << 16)
+                /* 0 - MC
+		 * 1 - TC/L2
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_RELEASE_MEM				0x49
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_DMA_DATA				0x50
+/* 1. header
+ * 2. CONTROL
+ * 3. SRC_ADDR_LO or DATA [31:0]
+ * 4. SRC_ADDR_HI [31:0]
+ * 5. DST_ADDR_LO [31:0]
+ * 6. DST_ADDR_HI [7:0]
+ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+/* CONTROL */
+#              define PACKET3_DMA_DATA_ENGINE(x)     ((x) << 0)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
+#              define PACKET3_DMA_DATA_DST_SEL(x)  ((x) << 20)
+                /* 0 - DST_ADDR using DAS
+		 * 1 - GDS
+		 * 3 - DST_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
+#              define PACKET3_DMA_DATA_SRC_SEL(x)  ((x) << 29)
+                /* 0 - SRC_ADDR using SAS
+		 * 1 - GDS
+		 * 2 - DATA
+		 * 3 - SRC_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_CP_SYNC     (1 << 31)
+/* COMMAND */
+#              define PACKET3_DMA_DATA_DIS_WC      (1 << 21)
+#              define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAS     (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_DAS     (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
+#              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
+#              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
+#define	PACKET3_AQUIRE_MEM				0x58
+#define	PACKET3_REWIND					0x59
+#define	PACKET3_LOAD_UCONFIG_REG			0x5E
+#define	PACKET3_LOAD_SH_REG				0x5F
+#define	PACKET3_LOAD_CONFIG_REG				0x60
+#define	PACKET3_LOAD_CONTEXT_REG			0x61
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000b000
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_SH_REG				0x76
+#define		PACKET3_SET_SH_REG_START			0x0000b000
+#define		PACKET3_SET_SH_REG_END				0x0000c000
+#define	PACKET3_SET_SH_REG_OFFSET			0x77
+#define	PACKET3_SET_QUEUE_REG				0x78
+#define	PACKET3_SET_UCONFIG_REG				0x79
+#define		PACKET3_SET_UCONFIG_REG_START			0x00030000
+#define		PACKET3_SET_UCONFIG_REG_END			0x00031000
+#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
+#define	PACKET3_SCRATCH_RAM_READ			0x7E
+#define	PACKET3_LOAD_CONST_RAM				0x80
+#define	PACKET3_WRITE_CONST_RAM				0x81
+#define	PACKET3_DUMP_CONST_RAM				0x83
+#define	PACKET3_INCREMENT_CE_COUNTER			0x84
+#define	PACKET3_INCREMENT_DE_COUNTER			0x85
+#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
+#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
+#define	PACKET3_SWITCH_BUFFER				0x8B
+
+/* SDMA - first instance at 0xd000, second at 0xd800 */
+#define SDMA0_REGISTER_OFFSET                             0x0 /* not a register */
+#define SDMA1_REGISTER_OFFSET                             0x800 /* not a register */
+
+#define	SDMA0_UCODE_ADDR                                  0xD000
+#define	SDMA0_UCODE_DATA                                  0xD004
+#define	SDMA0_POWER_CNTL                                  0xD008
+#define	SDMA0_CLK_CTRL                                    0xD00C
+
+#define SDMA0_CNTL                                        0xD010
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define AUTO_CTXSW_ENABLE                          (1 << 18)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+
+#define SDMA0_TILING_CONFIG  				  0xD018
+
+#define SDMA0_SEM_INCOMPLETE_TIMER_CNTL                   0xD020
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL                    0xD024
+
+#define SDMA0_STATUS_REG                                  0xd034
+#       define SDMA_IDLE                                  (1 << 0)
+
+#define SDMA0_ME_CNTL                                     0xD048
+#       define SDMA_HALT                                  (1 << 0)
+
+#define SDMA0_GFX_RB_CNTL                                 0xD200
+#       define SDMA_RB_ENABLE                             (1 << 0)
+#       define SDMA_RB_SIZE(x)                            ((x) << 1) /* log2 */
+#       define SDMA_RB_SWAP_ENABLE                        (1 << 9) /* 8IN32 */
+#       define SDMA_RPTR_WRITEBACK_ENABLE                 (1 << 12)
+#       define SDMA_RPTR_WRITEBACK_SWAP_ENABLE            (1 << 13)  /* 8IN32 */
+#       define SDMA_RPTR_WRITEBACK_TIMER(x)               ((x) << 16) /* log2 */
+#define SDMA0_GFX_RB_BASE                                 0xD204
+#define SDMA0_GFX_RB_BASE_HI                              0xD208
+#define SDMA0_GFX_RB_RPTR                                 0xD20C
+#define SDMA0_GFX_RB_WPTR                                 0xD210
+
+#define SDMA0_GFX_RB_RPTR_ADDR_HI                         0xD220
+#define SDMA0_GFX_RB_RPTR_ADDR_LO                         0xD224
+#define SDMA0_GFX_IB_CNTL                                 0xD228
+#       define SDMA_IB_ENABLE                             (1 << 0)
+#       define SDMA_IB_SWAP_ENABLE                        (1 << 4)
+#       define SDMA_SWITCH_INSIDE_IB                      (1 << 8)
+#       define SDMA_CMD_VMID(x)                           ((x) << 16)
+
+#define SDMA0_GFX_VIRTUAL_ADDR                            0xD29C
+#define SDMA0_GFX_APE1_CNTL                               0xD2A0
+
+#define SDMA_PACKET(op, sub_op, e)	((((e) & 0xFFFF) << 16) |	\
+					 (((sub_op) & 0xFF) << 8) |	\
+					 (((op) & 0xFF) << 0))
+/* sDMA opcodes */
+#define	SDMA_OPCODE_NOP					  0
+#define	SDMA_OPCODE_COPY				  1
+#       define SDMA_COPY_SUB_OPCODE_LINEAR                0
+#       define SDMA_COPY_SUB_OPCODE_TILED                 1
+#       define SDMA_COPY_SUB_OPCODE_SOA                   3
+#       define SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW     4
+#       define SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW      5
+#       define SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW        6
+#define	SDMA_OPCODE_WRITE				  2
+#       define SDMA_WRITE_SUB_OPCODE_LINEAR               0
+#       define SDMA_WRITE_SUB_OPCODE_TILED                1
+#define	SDMA_OPCODE_INDIRECT_BUFFER			  4
+#define	SDMA_OPCODE_FENCE				  5
+#define	SDMA_OPCODE_TRAP				  6
+#define	SDMA_OPCODE_SEMAPHORE				  7
+#       define SDMA_SEMAPHORE_EXTRA_O                     (1 << 13)
+                /* 0 - increment
+		 * 1 - write 1
+		 */
+#       define SDMA_SEMAPHORE_EXTRA_S                     (1 << 14)
+                /* 0 - wait
+		 * 1 - signal
+		 */
+#       define SDMA_SEMAPHORE_EXTRA_M                     (1 << 15)
+                /* mailbox */
+#define	SDMA_OPCODE_POLL_REG_MEM			  8
+#       define SDMA_POLL_REG_MEM_EXTRA_OP(x)              ((x) << 10)
+                /* 0 - wait_reg_mem
+		 * 1 - wr_wait_wr_reg
+		 */
+#       define SDMA_POLL_REG_MEM_EXTRA_FUNC(x)            ((x) << 12)
+                /* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#       define SDMA_POLL_REG_MEM_EXTRA_M                  (1 << 15)
+                /* 0 = register
+		 * 1 = memory
+		 */
+#define	SDMA_OPCODE_COND_EXEC				  9
+#define	SDMA_OPCODE_CONSTANT_FILL			  11
+#       define SDMA_CONSTANT_FILL_EXTRA_SIZE(x)           ((x) << 14)
+                /* 0 = byte fill
+		 * 2 = DW fill
+		 */
+#define	SDMA_OPCODE_GENERATE_PTE_PDE			  12
+#define	SDMA_OPCODE_TIMESTAMP				  13
+#       define SDMA_TIMESTAMP_SUB_OPCODE_SET_LOCAL        0
+#       define SDMA_TIMESTAMP_SUB_OPCODE_GET_LOCAL        1
+#       define SDMA_TIMESTAMP_SUB_OPCODE_GET_GLOBAL       2
+#define	SDMA_OPCODE_SRBM_WRITE				  14
+#       define SDMA_SRBM_WRITE_EXTRA_BYTE_ENABLE(x)       ((x) << 12)
+                /* byte mask */
+
+/* UVD */
+
+#define UVD_UDEC_ADDR_CONFIG		0xef4c
+#define UVD_UDEC_DB_ADDR_CONFIG		0xef50
+#define UVD_UDEC_DBW_ADDR_CONFIG	0xef54
+#define UVD_NO_OP			0xeffc
+
+#define UVD_LMI_EXT40_ADDR		0xf498
+#define UVD_GP_SCRATCH4			0xf4e0
+#define UVD_LMI_ADDR_EXT		0xf594
+#define UVD_VCPU_CACHE_OFFSET0		0xf608
+#define UVD_VCPU_CACHE_SIZE0		0xf60c
+#define UVD_VCPU_CACHE_OFFSET1		0xf610
+#define UVD_VCPU_CACHE_SIZE1		0xf614
+#define UVD_VCPU_CACHE_OFFSET2		0xf618
+#define UVD_VCPU_CACHE_SIZE2		0xf61c
+
+#define UVD_RBC_RB_RPTR			0xf690
+#define UVD_RBC_RB_WPTR			0xf694
+
+#define	UVD_CGC_CTRL					0xF4B0
+#	define DCM					(1 << 0)
+#	define CG_DT(x)					((x) << 2)
+#	define CG_DT_MASK				(0xf << 2)
+#	define CLK_OD(x)				((x) << 6)
+#	define CLK_OD_MASK				(0x1f << 6)
+
+#define UVD_STATUS					0xf6bc
+
+/* UVD clocks */
+
+#define CG_DCLK_CNTL			0xC050009C
+#	define DCLK_DIVIDER_MASK	0x7f
+#	define DCLK_DIR_CNTL_EN		(1 << 8)
+#define CG_DCLK_STATUS			0xC05000A0
+#	define DCLK_STATUS		(1 << 0)
+#define CG_VCLK_CNTL			0xC05000A4
+#define CG_VCLK_STATUS			0xC05000A8
+
+/* UVD CTX indirect */
+#define	UVD_CGC_MEM_CTRL				0xC0
+
+/* VCE */
+
+#define VCE_VCPU_CACHE_OFFSET0		0x20024
+#define VCE_VCPU_CACHE_SIZE0		0x20028
+#define VCE_VCPU_CACHE_OFFSET1		0x2002c
+#define VCE_VCPU_CACHE_SIZE1		0x20030
+#define VCE_VCPU_CACHE_OFFSET2		0x20034
+#define VCE_VCPU_CACHE_SIZE2		0x20038
+#define VCE_RB_RPTR2			0x20178
+#define VCE_RB_WPTR2			0x2017c
+#define VCE_RB_RPTR			0x2018c
+#define VCE_RB_WPTR			0x20190
+#define VCE_CLOCK_GATING_A		0x202f8
+#	define CGC_CLK_GATE_DLY_TIMER_MASK	(0xf << 0)
+#	define CGC_CLK_GATE_DLY_TIMER(x)	((x) << 0)
+#	define CGC_CLK_GATER_OFF_DLY_TIMER_MASK	(0xff << 4)
+#	define CGC_CLK_GATER_OFF_DLY_TIMER(x)	((x) << 4)
+#	define CGC_UENC_WAIT_AWAKE	(1 << 18)
+#define VCE_CLOCK_GATING_B		0x202fc
+#define VCE_CGTT_CLK_OVERRIDE		0x207a0
+#define VCE_UENC_CLOCK_GATING		0x207bc
+#	define CLOCK_ON_DELAY_MASK	(0xf << 0)
+#	define CLOCK_ON_DELAY(x)	((x) << 0)
+#	define CLOCK_OFF_DELAY_MASK	(0xff << 4)
+#	define CLOCK_OFF_DELAY(x)	((x) << 4)
+#define VCE_UENC_REG_CLOCK_GATING	0x207c0
+#define VCE_SYS_INT_EN			0x21300
+#	define VCE_SYS_INT_TRAP_INTERRUPT_EN	(1 << 3)
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR	0x2145c
+#define VCE_LMI_CTRL2			0x21474
+#define VCE_LMI_CTRL			0x21498
+#define VCE_LMI_VM_CTRL			0x214a0
+#define VCE_LMI_SWAP_CNTL		0x214b4
+#define VCE_LMI_SWAP_CNTL1		0x214b8
+#define VCE_LMI_CACHE_CTRL		0x214f4
+
+#define VCE_CMD_NO_OP		0x00000000
+#define VCE_CMD_END		0x00000001
+#define VCE_CMD_IB		0x00000002
+#define VCE_CMD_FENCE		0x00000003
+#define VCE_CMD_TRAP		0x00000004
+#define VCE_CMD_IB_AUTO		0x00000005
+#define VCE_CMD_SEMAPHORE	0x00000006
+
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS		0x3398u
+#define ATC_VMID0_PASID_MAPPING				0x339Cu
+#define ATC_VMID_PASID_MAPPING_PASID_MASK		(0xFFFF)
+#define ATC_VMID_PASID_MAPPING_PASID_SHIFT		0
+#define ATC_VMID_PASID_MAPPING_VALID_MASK		(0x1 << 31)
+#define ATC_VMID_PASID_MAPPING_VALID_SHIFT		31
+
+#define ATC_VM_APERTURE0_CNTL					0x3310u
+#define	ATS_ACCESS_MODE_NEVER						0
+#define	ATS_ACCESS_MODE_ALWAYS						1
+
+#define ATC_VM_APERTURE0_CNTL2					0x3318u
+#define ATC_VM_APERTURE0_HIGH_ADDR				0x3308u
+#define ATC_VM_APERTURE0_LOW_ADDR				0x3300u
+#define ATC_VM_APERTURE1_CNTL					0x3314u
+#define ATC_VM_APERTURE1_CNTL2					0x331Cu
+#define ATC_VM_APERTURE1_HIGH_ADDR				0x330Cu
+#define ATC_VM_APERTURE1_LOW_ADDR				0x3304u
+
+#define IH_VMID_0_LUT						0x3D40u
+
+#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
new file mode 100644
index 0000000..e48a140
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -0,0 +1,1081 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_15
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+    0x00000000, // SX_MISC
+    0x00000000, // SX_SURFACE_SYNC
+    0x00000000, // SX_SCATTER_EXPORT_BASE
+    0x00000000, // SX_SCATTER_EXPORT_SIZE
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_VTX_SEMANTIC_0
+    0x00000000, // SQ_VTX_SEMANTIC_1
+    0x00000000, // SQ_VTX_SEMANTIC_2
+    0x00000000, // SQ_VTX_SEMANTIC_3
+    0x00000000, // SQ_VTX_SEMANTIC_4
+    0x00000000, // SQ_VTX_SEMANTIC_5
+    0x00000000, // SQ_VTX_SEMANTIC_6
+    0x00000000, // SQ_VTX_SEMANTIC_7
+    0x00000000, // SQ_VTX_SEMANTIC_8
+    0x00000000, // SQ_VTX_SEMANTIC_9
+    0x00000000, // SQ_VTX_SEMANTIC_10
+    0x00000000, // SQ_VTX_SEMANTIC_11
+    0x00000000, // SQ_VTX_SEMANTIC_12
+    0x00000000, // SQ_VTX_SEMANTIC_13
+    0x00000000, // SQ_VTX_SEMANTIC_14
+    0x00000000, // SQ_VTX_SEMANTIC_15
+    0x00000000, // SQ_VTX_SEMANTIC_16
+    0x00000000, // SQ_VTX_SEMANTIC_17
+    0x00000000, // SQ_VTX_SEMANTIC_18
+    0x00000000, // SQ_VTX_SEMANTIC_19
+    0x00000000, // SQ_VTX_SEMANTIC_20
+    0x00000000, // SQ_VTX_SEMANTIC_21
+    0x00000000, // SQ_VTX_SEMANTIC_22
+    0x00000000, // SQ_VTX_SEMANTIC_23
+    0x00000000, // SQ_VTX_SEMANTIC_24
+    0x00000000, // SQ_VTX_SEMANTIC_25
+    0x00000000, // SQ_VTX_SEMANTIC_26
+    0x00000000, // SQ_VTX_SEMANTIC_27
+    0x00000000, // SQ_VTX_SEMANTIC_28
+    0x00000000, // SQ_VTX_SEMANTIC_29
+    0x00000000, // SQ_VTX_SEMANTIC_30
+    0x00000000, // SQ_VTX_SEMANTIC_31
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0x00000000, // SX_ALPHA_TEST_CONTROL
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0x00000000, // SX_ALPHA_REF
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0x00000000, // SPI_VS_OUT_ID_0
+    0x00000000, // SPI_VS_OUT_ID_1
+    0x00000000, // SPI_VS_OUT_ID_2
+    0x00000000, // SPI_VS_OUT_ID_3
+    0x00000000, // SPI_VS_OUT_ID_4
+    0x00000000, // SPI_VS_OUT_ID_5
+    0x00000000, // SPI_VS_OUT_ID_6
+    0x00000000, // SPI_VS_OUT_ID_7
+    0x00000000, // SPI_VS_OUT_ID_8
+    0x00000000, // SPI_VS_OUT_ID_9
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0x00000001, // SPI_THREAD_GROUPING
+    0x00000002, // SPI_PS_IN_CONTROL_0
+    0x00000000, // SPI_PS_IN_CONTROL_1
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000000, // SPI_INPUT_Z
+    0x00000000, // SPI_FOG_CNTL
+    0x00000000, // SPI_BARYC_CNTL
+    0x00000000, // SPI_PS_IN_CONTROL_2
+    0x00000000, // SPI_COMPUTE_INPUT_CNTL
+    0x00000000, // SPI_COMPUTE_NUM_THREAD_X
+    0x00000000, // SPI_COMPUTE_NUM_THREAD_Y
+    0x00000000, // SPI_COMPUTE_NUM_THREAD_Z
+    0x00000000, // SPI_GPR_MGMT
+    0x00000000, // SPI_LDS_MGMT
+    0x00000000, // SPI_STACK_MGMT
+    0x00000000, // SPI_WAVE_MGMT_1
+    0x00000000, // SPI_WAVE_MGMT_2
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // GDS_ADDR_BASE
+    0x00003fff, // GDS_ADDR_SIZE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // GDS_ORDERED_COUNT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // GDS_APPEND_CONSUME_UAV0
+    0x00000000, // GDS_APPEND_CONSUME_UAV1
+    0x00000000, // GDS_APPEND_CONSUME_UAV2
+    0x00000000, // GDS_APPEND_CONSUME_UAV3
+    0x00000000, // GDS_APPEND_CONSUME_UAV4
+    0x00000000, // GDS_APPEND_CONSUME_UAV5
+    0x00000000, // GDS_APPEND_CONSUME_UAV6
+    0x00000000, // GDS_APPEND_CONSUME_UAV7
+    0x00000000, // GDS_APPEND_CONSUME_UAV8
+    0x00000000, // GDS_APPEND_CONSUME_UAV9
+    0x00000000, // GDS_APPEND_CONSUME_UAV10
+    0x00000000, // GDS_APPEND_CONSUME_UAV11
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const u32 SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000200, // DB_SHADER_CONTROL
+    0x00000000, // PA_CL_CLIP_CNTL
+    0x00000000, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0x00000000, // SQ_LSTMP_RING_ITEMSIZE
+    0x00000000, // SQ_HSTMP_RING_ITEMSIZE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_PS
+    0x00000000, // SQ_PGM_RESOURCES_PS
+    0x00000000, // SQ_PGM_RESOURCES_2_PS
+    0x00000000, // SQ_PGM_EXPORTS_PS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_VS
+    0x00000000, // SQ_PGM_RESOURCES_VS
+    0x00000000, // SQ_PGM_RESOURCES_2_VS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_GS
+    0x00000000, // SQ_PGM_RESOURCES_GS
+    0x00000000, // SQ_PGM_RESOURCES_2_GS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_ES
+    0x00000000, // SQ_PGM_RESOURCES_ES
+    0x00000000, // SQ_PGM_RESOURCES_2_ES
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_FS
+    0x00000000, // SQ_PGM_RESOURCES_FS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_HS
+    0x00000000, // SQ_PGM_RESOURCES_HS
+    0x00000000, // SQ_PGM_RESOURCES_2_HS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_LS
+    0x00000000, // SQ_PGM_RESOURCES_LS
+    0x00000000, // SQ_PGM_RESOURCES_2_LS
+};
+static const u32 SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // SQ_LDS_ALLOC
+    0x00000000, // SQ_LDS_ALLOC_PS
+    0x00000000, // SQ_VTX_SEMANTIC_CLEAR
+    0, // HOLE
+    0x00000000, // SQ_THREAD_TRACE_CTRL
+    0, // HOLE
+    0x00000000, // SQ_ESGS_RING_ITEMSIZE
+    0x00000000, // SQ_GSVS_RING_ITEMSIZE
+    0x00000000, // SQ_ESTMP_RING_ITEMSIZE
+    0x00000000, // SQ_GSTMP_RING_ITEMSIZE
+    0x00000000, // SQ_VSTMP_RING_ITEMSIZE
+    0x00000000, // SQ_PSTMP_RING_ITEMSIZE
+    0, // HOLE
+    0x00000000, // SQ_GS_VERT_ITEMSIZE
+    0x00000000, // SQ_GS_VERT_ITEMSIZE_1
+    0x00000000, // SQ_GS_VERT_ITEMSIZE_2
+    0x00000000, // SQ_GS_VERT_ITEMSIZE_3
+    0x00000000, // SQ_GSVS_RING_OFFSET_1
+    0x00000000, // SQ_GSVS_RING_OFFSET_2
+    0x00000000, // SQ_GSVS_RING_OFFSET_3
+    0x00000000, // SQ_GWS_RING_OFFSET
+    0, // HOLE
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_15
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_15
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_15
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0, // HOLE
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const u32 SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // VGT_DMA_MAX_SIZE
+    0x00000000, // VGT_DMA_INDEX_TYPE
+    0, // HOLE
+    0x00000000, // VGT_PRIMITIVEID_EN
+    0x00000000, // VGT_DMA_NUM_INSTANCES
+};
+static const u32 SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_0
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_1
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_2
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_3
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_0
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_1
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_2
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_3
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+};
+static const u32 SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0x00000000, // CB_IMMED0_BASE
+    0x00000000, // CB_IMMED1_BASE
+    0x00000000, // CB_IMMED2_BASE
+    0x00000000, // CB_IMMED3_BASE
+    0x00000000, // CB_IMMED4_BASE
+    0x00000000, // CB_IMMED5_BASE
+    0x00000000, // CB_IMMED6_BASE
+    0x00000000, // CB_IMMED7_BASE
+    0x00000000, // CB_IMMED8_BASE
+    0x00000000, // CB_IMMED9_BASE
+    0x00000000, // CB_IMMED10_BASE
+    0x00000000, // CB_IMMED11_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0x00000000, // CB_CLRCMP_CONTROL
+    0x00000000, // CB_CLRCMP_SRC
+    0x00000000, // CB_CLRCMP_DST
+    0x00000000, // CB_CLRCMP_MSK
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0x00000000, // CB_COLOR0_DIM
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0x00000000, // CB_COLOR0_CLEAR_WORD2
+    0x00000000, // CB_COLOR0_CLEAR_WORD3
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0x00000000, // CB_COLOR1_DIM
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0x00000000, // CB_COLOR1_CLEAR_WORD2
+    0x00000000, // CB_COLOR1_CLEAR_WORD3
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0x00000000, // CB_COLOR2_DIM
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0x00000000, // CB_COLOR2_CLEAR_WORD2
+    0x00000000, // CB_COLOR2_CLEAR_WORD3
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0x00000000, // CB_COLOR3_DIM
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0x00000000, // CB_COLOR3_CLEAR_WORD2
+    0x00000000, // CB_COLOR3_CLEAR_WORD3
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0x00000000, // CB_COLOR4_DIM
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0x00000000, // CB_COLOR4_CLEAR_WORD2
+    0x00000000, // CB_COLOR4_CLEAR_WORD3
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0x00000000, // CB_COLOR5_DIM
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0x00000000, // CB_COLOR5_CLEAR_WORD2
+    0x00000000, // CB_COLOR5_CLEAR_WORD3
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0x00000000, // CB_COLOR6_DIM
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0x00000000, // CB_COLOR6_CLEAR_WORD2
+    0x00000000, // CB_COLOR6_CLEAR_WORD3
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0x00000000, // CB_COLOR7_DIM
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+    0x00000000, // CB_COLOR7_CLEAR_WORD2
+    0x00000000, // CB_COLOR7_CLEAR_WORD3
+    0x00000000, // CB_COLOR8_BASE
+    0x00000000, // CB_COLOR8_PITCH
+    0x00000000, // CB_COLOR8_SLICE
+    0x00000000, // CB_COLOR8_VIEW
+    0x00000000, // CB_COLOR8_INFO
+    0x00000000, // CB_COLOR8_ATTRIB
+    0x00000000, // CB_COLOR8_DIM
+    0x00000000, // CB_COLOR9_BASE
+    0x00000000, // CB_COLOR9_PITCH
+    0x00000000, // CB_COLOR9_SLICE
+    0x00000000, // CB_COLOR9_VIEW
+    0x00000000, // CB_COLOR9_INFO
+    0x00000000, // CB_COLOR9_ATTRIB
+    0x00000000, // CB_COLOR9_DIM
+    0x00000000, // CB_COLOR10_BASE
+    0x00000000, // CB_COLOR10_PITCH
+    0x00000000, // CB_COLOR10_SLICE
+    0x00000000, // CB_COLOR10_VIEW
+    0x00000000, // CB_COLOR10_INFO
+    0x00000000, // CB_COLOR10_ATTRIB
+    0x00000000, // CB_COLOR10_DIM
+    0x00000000, // CB_COLOR11_BASE
+    0x00000000, // CB_COLOR11_PITCH
+    0x00000000, // CB_COLOR11_SLICE
+    0x00000000, // CB_COLOR11_VIEW
+    0x00000000, // CB_COLOR11_INFO
+    0x00000000, // CB_COLOR11_ATTRIB
+    0x00000000, // CB_COLOR11_DIM
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_15
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_15
+};
+static const struct cs_extent_def SECT_CONTEXT_defs[] =
+{
+    {SECT_CONTEXT_def_1, 0x0000a000, 488 },
+    {SECT_CONTEXT_def_2, 0x0000a1f5, 6 },
+    {SECT_CONTEXT_def_3, 0x0000a200, 55 },
+    {SECT_CONTEXT_def_4, 0x0000a23a, 99 },
+    {SECT_CONTEXT_def_5, 0x0000a29e, 5 },
+    {SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
+    {SECT_CONTEXT_def_7, 0x0000a2de, 290 },
+    { NULL, 0, 0 }
+};
+static const u32 SECT_CLEAR_def_1[] =
+{
+    0xffffffff, // SQ_TEX_SAMPLER_CLEAR
+    0xffffffff, // SQ_TEX_RESOURCE_CLEAR
+    0xffffffff, // SQ_LOOP_BOOL_CLEAR
+};
+static const struct cs_extent_def SECT_CLEAR_defs[] =
+{
+    {SECT_CLEAR_def_1, 0x0000ffc0, 3 },
+    { NULL, 0, 0 }
+};
+static const u32 SECT_CTRLCONST_def_1[] =
+{
+    0x00000000, // SQ_VTX_BASE_VTX_LOC
+    0x00000000, // SQ_VTX_START_INST_LOC
+};
+static const struct cs_extent_def SECT_CTRLCONST_defs[] =
+{
+    {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
+    { NULL, 0, 0 }
+};
+static const struct cs_section_def cayman_cs_data[] = {
+    { SECT_CONTEXT_defs, SECT_CONTEXT },
+    { SECT_CLEAR_defs, SECT_CLEAR },
+    { SECT_CTRLCONST_defs, SECT_CTRLCONST },
+    { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
new file mode 100644
index 0000000..f55d066
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -0,0 +1,944 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int ci_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0x00000000, // TA_BC_BASE_ADDR_HI
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_HI_0
+    0x00000000, // COHER_DEST_BASE_HI_1
+    0x00000000, // COHER_DEST_BASE_HI_2
+    0x00000000, // COHER_DEST_BASE_HI_3
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int ci_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+    0, // HOLE
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int ci_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int ci_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0x00000000, // VGT_GS_ONCHIP_CNTL
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const unsigned int ci_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // WD_ENHANCE
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int ci_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int ci_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0, // HOLE
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
+{
+    {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+    {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+    {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { NULL, 0, 0 }
+};
+static const struct cs_section_def ci_cs_data[] = {
+    { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/radeon/clearstate_defs.h b/drivers/gpu/drm/radeon/clearstate_defs.h
new file mode 100644
index 0000000..3eda707
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_defs.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef CLEARSTATE_DEFS_H
+#define CLEARSTATE_DEFS_H
+
+enum section_id {
+    SECT_NONE,
+    SECT_CONTEXT,
+    SECT_CLEAR,
+    SECT_CTRLCONST
+};
+
+struct cs_extent_def {
+    const unsigned int *extent;
+    const unsigned int reg_index;
+    const unsigned int reg_count;
+};
+
+struct cs_section_def {
+    const struct cs_extent_def *section;
+    const enum section_id id;
+};
+
+#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
new file mode 100644
index 0000000..63a1ffb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -0,0 +1,1080 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_15
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+    0x00000000, // SX_MISC
+    0x00000000, // SX_SURFACE_SYNC
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_VTX_SEMANTIC_0
+    0x00000000, // SQ_VTX_SEMANTIC_1
+    0x00000000, // SQ_VTX_SEMANTIC_2
+    0x00000000, // SQ_VTX_SEMANTIC_3
+    0x00000000, // SQ_VTX_SEMANTIC_4
+    0x00000000, // SQ_VTX_SEMANTIC_5
+    0x00000000, // SQ_VTX_SEMANTIC_6
+    0x00000000, // SQ_VTX_SEMANTIC_7
+    0x00000000, // SQ_VTX_SEMANTIC_8
+    0x00000000, // SQ_VTX_SEMANTIC_9
+    0x00000000, // SQ_VTX_SEMANTIC_10
+    0x00000000, // SQ_VTX_SEMANTIC_11
+    0x00000000, // SQ_VTX_SEMANTIC_12
+    0x00000000, // SQ_VTX_SEMANTIC_13
+    0x00000000, // SQ_VTX_SEMANTIC_14
+    0x00000000, // SQ_VTX_SEMANTIC_15
+    0x00000000, // SQ_VTX_SEMANTIC_16
+    0x00000000, // SQ_VTX_SEMANTIC_17
+    0x00000000, // SQ_VTX_SEMANTIC_18
+    0x00000000, // SQ_VTX_SEMANTIC_19
+    0x00000000, // SQ_VTX_SEMANTIC_20
+    0x00000000, // SQ_VTX_SEMANTIC_21
+    0x00000000, // SQ_VTX_SEMANTIC_22
+    0x00000000, // SQ_VTX_SEMANTIC_23
+    0x00000000, // SQ_VTX_SEMANTIC_24
+    0x00000000, // SQ_VTX_SEMANTIC_25
+    0x00000000, // SQ_VTX_SEMANTIC_26
+    0x00000000, // SQ_VTX_SEMANTIC_27
+    0x00000000, // SQ_VTX_SEMANTIC_28
+    0x00000000, // SQ_VTX_SEMANTIC_29
+    0x00000000, // SQ_VTX_SEMANTIC_30
+    0x00000000, // SQ_VTX_SEMANTIC_31
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0x00000000, // SX_ALPHA_TEST_CONTROL
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0x00000000, // SX_ALPHA_REF
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0x00000000, // SPI_VS_OUT_ID_0
+    0x00000000, // SPI_VS_OUT_ID_1
+    0x00000000, // SPI_VS_OUT_ID_2
+    0x00000000, // SPI_VS_OUT_ID_3
+    0x00000000, // SPI_VS_OUT_ID_4
+    0x00000000, // SPI_VS_OUT_ID_5
+    0x00000000, // SPI_VS_OUT_ID_6
+    0x00000000, // SPI_VS_OUT_ID_7
+    0x00000000, // SPI_VS_OUT_ID_8
+    0x00000000, // SPI_VS_OUT_ID_9
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0x00000001, // SPI_THREAD_GROUPING
+    0x00000000, // SPI_PS_IN_CONTROL_0
+    0x00000000, // SPI_PS_IN_CONTROL_1
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000000, // SPI_INPUT_Z
+    0x00000000, // SPI_FOG_CNTL
+    0x00000000, // SPI_BARYC_CNTL
+    0x00000000, // SPI_PS_IN_CONTROL_2
+    0x00000000, // SPI_COMPUTE_INPUT_CNTL
+    0x00000000, // SPI_COMPUTE_NUM_THREAD_X
+    0x00000000, // SPI_COMPUTE_NUM_THREAD_Y
+    0x00000000, // SPI_COMPUTE_NUM_THREAD_Z
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // GDS_ADDR_BASE
+    0x00003fff, // GDS_ADDR_SIZE
+    0x00000001, // GDS_ORDERED_WAVE_PER_SE
+    0x00000000, // GDS_APPEND_CONSUME_UAV0
+    0x00000000, // GDS_APPEND_CONSUME_UAV1
+    0x00000000, // GDS_APPEND_CONSUME_UAV2
+    0x00000000, // GDS_APPEND_CONSUME_UAV3
+    0x00000000, // GDS_APPEND_CONSUME_UAV4
+    0x00000000, // GDS_APPEND_CONSUME_UAV5
+    0x00000000, // GDS_APPEND_CONSUME_UAV6
+    0x00000000, // GDS_APPEND_CONSUME_UAV7
+    0x00000000, // GDS_APPEND_CONSUME_UAV8
+    0x00000000, // GDS_APPEND_CONSUME_UAV9
+    0x00000000, // GDS_APPEND_CONSUME_UAV10
+    0x00000000, // GDS_APPEND_CONSUME_UAV11
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const u32 SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0, // HOLE
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000200, // DB_SHADER_CONTROL
+    0x00000000, // PA_CL_CLIP_CNTL
+    0x00000000, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0x00000000, // SQ_LSTMP_RING_ITEMSIZE
+    0x00000000, // SQ_HSTMP_RING_ITEMSIZE
+    0x00000000, // SQ_DYN_GPR_RESOURCE_LIMIT_1
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_PS
+    0x00000000, // SQ_PGM_RESOURCES_PS
+    0x00000000, // SQ_PGM_RESOURCES_2_PS
+    0x00000000, // SQ_PGM_EXPORTS_PS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_VS
+    0x00000000, // SQ_PGM_RESOURCES_VS
+    0x00000000, // SQ_PGM_RESOURCES_2_VS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_GS
+    0x00000000, // SQ_PGM_RESOURCES_GS
+    0x00000000, // SQ_PGM_RESOURCES_2_GS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_ES
+    0x00000000, // SQ_PGM_RESOURCES_ES
+    0x00000000, // SQ_PGM_RESOURCES_2_ES
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_FS
+    0x00000000, // SQ_PGM_RESOURCES_FS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_HS
+    0x00000000, // SQ_PGM_RESOURCES_HS
+    0x00000000, // SQ_PGM_RESOURCES_2_HS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_PGM_START_LS
+    0x00000000, // SQ_PGM_RESOURCES_LS
+    0x00000000, // SQ_PGM_RESOURCES_2_LS
+};
+static const u32 SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // SQ_LDS_ALLOC
+    0x00000000, // SQ_LDS_ALLOC_PS
+    0x00000000, // SQ_VTX_SEMANTIC_CLEAR
+    0, // HOLE
+    0x00000000, // SQ_THREAD_TRACE_CTRL
+    0, // HOLE
+    0x00000000, // SQ_ESGS_RING_ITEMSIZE
+    0x00000000, // SQ_GSVS_RING_ITEMSIZE
+    0x00000000, // SQ_ESTMP_RING_ITEMSIZE
+    0x00000000, // SQ_GSTMP_RING_ITEMSIZE
+    0x00000000, // SQ_VSTMP_RING_ITEMSIZE
+    0x00000000, // SQ_PSTMP_RING_ITEMSIZE
+    0, // HOLE
+    0x00000000, // SQ_GS_VERT_ITEMSIZE
+    0x00000000, // SQ_GS_VERT_ITEMSIZE_1
+    0x00000000, // SQ_GS_VERT_ITEMSIZE_2
+    0x00000000, // SQ_GS_VERT_ITEMSIZE_3
+    0x00000000, // SQ_GSVS_RING_OFFSET_1
+    0x00000000, // SQ_GSVS_RING_OFFSET_2
+    0x00000000, // SQ_GSVS_RING_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_PS_15
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_VS_15
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_GS_15
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0, // HOLE
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000000, // VGT_GS_PER_ES
+    0x00000000, // VGT_ES_PER_GS
+    0x00000000, // VGT_GS_PER_VS
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+};
+static const u32 SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // VGT_DMA_MAX_SIZE
+    0x00000000, // VGT_DMA_INDEX_TYPE
+    0, // HOLE
+    0x00000000, // VGT_PRIMITIVEID_EN
+    0x00000000, // VGT_DMA_NUM_INSTANCES
+};
+static const u32 SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_0
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_1
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_2
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0x00000000, // VGT_STRMOUT_BUFFER_BASE_3
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_0
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_1
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_2
+    0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_3
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_LS_SIZE
+    0x00000000, // VGT_HS_SIZE
+    0x00000000, // VGT_LS_HS_ALLOC
+    0x00000000, // VGT_HS_PATCH_CONST
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+};
+static const u32 SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0x00000000, // CB_IMMED0_BASE
+    0x00000000, // CB_IMMED1_BASE
+    0x00000000, // CB_IMMED2_BASE
+    0x00000000, // CB_IMMED3_BASE
+    0x00000000, // CB_IMMED4_BASE
+    0x00000000, // CB_IMMED5_BASE
+    0x00000000, // CB_IMMED6_BASE
+    0x00000000, // CB_IMMED7_BASE
+    0x00000000, // CB_IMMED8_BASE
+    0x00000000, // CB_IMMED9_BASE
+    0x00000000, // CB_IMMED10_BASE
+    0x00000000, // CB_IMMED11_BASE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_4
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_5
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_6
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_7
+    0xffffffff, // PA_SC_AA_MASK
+    0x00000000, // CB_CLRCMP_CONTROL
+    0x00000000, // CB_CLRCMP_SRC
+    0x00000000, // CB_CLRCMP_DST
+    0x00000000, // CB_CLRCMP_MSK
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0x00000000, // CB_COLOR0_DIM
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0x00000000, // CB_COLOR0_CLEAR_WORD2
+    0x00000000, // CB_COLOR0_CLEAR_WORD3
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0x00000000, // CB_COLOR1_DIM
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0x00000000, // CB_COLOR1_CLEAR_WORD2
+    0x00000000, // CB_COLOR1_CLEAR_WORD3
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0x00000000, // CB_COLOR2_DIM
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0x00000000, // CB_COLOR2_CLEAR_WORD2
+    0x00000000, // CB_COLOR2_CLEAR_WORD3
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0x00000000, // CB_COLOR3_DIM
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0x00000000, // CB_COLOR3_CLEAR_WORD2
+    0x00000000, // CB_COLOR3_CLEAR_WORD3
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0x00000000, // CB_COLOR4_DIM
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0x00000000, // CB_COLOR4_CLEAR_WORD2
+    0x00000000, // CB_COLOR4_CLEAR_WORD3
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0x00000000, // CB_COLOR5_DIM
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0x00000000, // CB_COLOR5_CLEAR_WORD2
+    0x00000000, // CB_COLOR5_CLEAR_WORD3
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0x00000000, // CB_COLOR6_DIM
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0x00000000, // CB_COLOR6_CLEAR_WORD2
+    0x00000000, // CB_COLOR6_CLEAR_WORD3
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0x00000000, // CB_COLOR7_DIM
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+    0x00000000, // CB_COLOR7_CLEAR_WORD2
+    0x00000000, // CB_COLOR7_CLEAR_WORD3
+    0x00000000, // CB_COLOR8_BASE
+    0x00000000, // CB_COLOR8_PITCH
+    0x00000000, // CB_COLOR8_SLICE
+    0x00000000, // CB_COLOR8_VIEW
+    0x00000000, // CB_COLOR8_INFO
+    0x00000000, // CB_COLOR8_ATTRIB
+    0x00000000, // CB_COLOR8_DIM
+    0x00000000, // CB_COLOR9_BASE
+    0x00000000, // CB_COLOR9_PITCH
+    0x00000000, // CB_COLOR9_SLICE
+    0x00000000, // CB_COLOR9_VIEW
+    0x00000000, // CB_COLOR9_INFO
+    0x00000000, // CB_COLOR9_ATTRIB
+    0x00000000, // CB_COLOR9_DIM
+    0x00000000, // CB_COLOR10_BASE
+    0x00000000, // CB_COLOR10_PITCH
+    0x00000000, // CB_COLOR10_SLICE
+    0x00000000, // CB_COLOR10_VIEW
+    0x00000000, // CB_COLOR10_INFO
+    0x00000000, // CB_COLOR10_ATTRIB
+    0x00000000, // CB_COLOR10_DIM
+    0x00000000, // CB_COLOR11_BASE
+    0x00000000, // CB_COLOR11_PITCH
+    0x00000000, // CB_COLOR11_SLICE
+    0x00000000, // CB_COLOR11_VIEW
+    0x00000000, // CB_COLOR11_INFO
+    0x00000000, // CB_COLOR11_ATTRIB
+    0x00000000, // CB_COLOR11_DIM
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_HS_15
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_0
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_1
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_2
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_3
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_4
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_5
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_6
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_7
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_8
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_9
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_10
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_11
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_12
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_13
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_14
+    0x00000000, // SQ_ALU_CONST_CACHE_LS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_15
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_0
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_1
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_2
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_3
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_4
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_5
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_6
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_7
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_8
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_9
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_10
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_11
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_12
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_13
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_14
+    0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_15
+};
+static const struct cs_extent_def SECT_CONTEXT_defs[] =
+{
+    {SECT_CONTEXT_def_1, 0x0000a000, 488 },
+    {SECT_CONTEXT_def_2, 0x0000a1f5, 6 },
+    {SECT_CONTEXT_def_3, 0x0000a200, 55 },
+    {SECT_CONTEXT_def_4, 0x0000a23a, 98 },
+    {SECT_CONTEXT_def_5, 0x0000a29e, 5 },
+    {SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
+    {SECT_CONTEXT_def_7, 0x0000a2de, 290 },
+    { 0, 0, 0 }
+};
+static const u32 SECT_CLEAR_def_1[] =
+{
+    0xffffffff, // SQ_TEX_SAMPLER_CLEAR
+    0xffffffff, // SQ_TEX_RESOURCE_CLEAR
+    0xffffffff, // SQ_LOOP_BOOL_CLEAR
+};
+static const struct cs_extent_def SECT_CLEAR_defs[] =
+{
+    {SECT_CLEAR_def_1, 0x0000ffc0, 3 },
+    { 0, 0, 0 }
+};
+static const u32 SECT_CTRLCONST_def_1[] =
+{
+    0x00000000, // SQ_VTX_BASE_VTX_LOC
+    0x00000000, // SQ_VTX_START_INST_LOC
+};
+static const struct cs_extent_def SECT_CTRLCONST_defs[] =
+{
+    {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
+    { 0, 0, 0 }
+};
+static const struct cs_section_def evergreen_cs_data[] = {
+    { SECT_CONTEXT_defs, SECT_CONTEXT },
+    { SECT_CLEAR_defs, SECT_CLEAR },
+    { SECT_CTRLCONST_defs, SECT_CTRLCONST },
+    { 0, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
new file mode 100644
index 0000000..66e39cd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 si_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const u32 si_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_WAVE_MGMT_1
+    0x00000000, // SPI_WAVE_MGMT_2
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 si_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const u32 si_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0, // HOLE
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const u32 si_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const u32 si_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const u32 si_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0, // HOLE
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
+{
+    {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+    {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
+    {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { NULL, 0, 0 }
+};
+static const struct cs_section_def si_cs_data[] = {
+    { si_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
new file mode 100644
index 0000000..3eb7899
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -0,0 +1,2165 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "r600_dpm.h"
+#include "cypress_dpm.h"
+#include "atom.h"
+
+#define SMC_RAM_END 0x8000
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0       0x05
+#define MC_CG_SEQ_DRAMCONF_S1       0x06
+#define MC_CG_SEQ_YCLK_SUSPEND      0x04
+#define MC_CG_SEQ_YCLK_RESUME       0x0a
+
+struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
+struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
+struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+
+static void cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
+						 bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp, bif;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if (enable) {
+		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+			if (!pi->boot_in_gen2) {
+				bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
+				bif |= CG_CLIENT_REQ(0xd);
+				WREG32(CG_BIF_REQ_AND_RSP, bif);
+
+				tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+				tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
+				tmp |= LC_GEN2_EN_STRAP;
+
+				tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+				udelay(10);
+				tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+			}
+		}
+	} else {
+		if (!pi->boot_in_gen2) {
+			tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+			tmp &= ~LC_GEN2_EN_STRAP;
+		}
+		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+	}
+}
+
+static void cypress_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
+					     bool enable)
+{
+	cypress_enable_bif_dynamic_pcie_gen2(rdev, enable);
+
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+}
+
+#if 0
+static int cypress_enter_ulp_state(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (pi->gfx_clock_gating) {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+
+		RREG32(GB_ADDR_CONFIG);
+	}
+
+	WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
+		 ~HOST_SMC_MSG_MASK);
+
+	udelay(7000);
+
+	return 0;
+}
+#endif
+
+static void cypress_gfx_clock_gating_enable(struct radeon_device *rdev,
+					    bool enable)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (enable) {
+		if (eg_pi->light_sleep) {
+			WREG32(GRBM_GFX_INDEX, 0xC0000000);
+
+			WREG32_CG(CG_CGLS_TILE_0, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_1, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_2, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_3, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_4, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_5, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_6, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_7, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_8, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_9, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_10, 0xFFFFFFFF);
+			WREG32_CG(CG_CGLS_TILE_11, 0xFFFFFFFF);
+
+			WREG32_P(SCLK_PWRMGT_CNTL, DYN_LIGHT_SLEEP_EN, ~DYN_LIGHT_SLEEP_EN);
+		}
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
+	} else {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+		RREG32(GB_ADDR_CONFIG);
+
+		if (eg_pi->light_sleep) {
+			WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_LIGHT_SLEEP_EN);
+
+			WREG32(GRBM_GFX_INDEX, 0xC0000000);
+
+			WREG32_CG(CG_CGLS_TILE_0, 0);
+			WREG32_CG(CG_CGLS_TILE_1, 0);
+			WREG32_CG(CG_CGLS_TILE_2, 0);
+			WREG32_CG(CG_CGLS_TILE_3, 0);
+			WREG32_CG(CG_CGLS_TILE_4, 0);
+			WREG32_CG(CG_CGLS_TILE_5, 0);
+			WREG32_CG(CG_CGLS_TILE_6, 0);
+			WREG32_CG(CG_CGLS_TILE_7, 0);
+			WREG32_CG(CG_CGLS_TILE_8, 0);
+			WREG32_CG(CG_CGLS_TILE_9, 0);
+			WREG32_CG(CG_CGLS_TILE_10, 0);
+			WREG32_CG(CG_CGLS_TILE_11, 0);
+		}
+	}
+}
+
+static void cypress_mg_clock_gating_enable(struct radeon_device *rdev,
+					   bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (enable) {
+		u32 cgts_sm_ctrl_reg;
+
+		if (rdev->family == CHIP_CEDAR)
+			cgts_sm_ctrl_reg = CEDAR_MGCGCGTSSMCTRL_DFLT;
+		else if (rdev->family == CHIP_REDWOOD)
+			cgts_sm_ctrl_reg = REDWOOD_MGCGCGTSSMCTRL_DFLT;
+		else
+			cgts_sm_ctrl_reg = CYPRESS_MGCGCGTSSMCTRL_DFLT;
+
+		WREG32(GRBM_GFX_INDEX, 0xC0000000);
+
+		WREG32_CG(CG_CGTT_LOCAL_0, CYPRESS_MGCGTTLOCAL0_DFLT);
+		WREG32_CG(CG_CGTT_LOCAL_1, CYPRESS_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF);
+		WREG32_CG(CG_CGTT_LOCAL_2, CYPRESS_MGCGTTLOCAL2_DFLT);
+		WREG32_CG(CG_CGTT_LOCAL_3, CYPRESS_MGCGTTLOCAL3_DFLT);
+
+		if (pi->mgcgtssm)
+			WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+		if (eg_pi->mcls) {
+			WREG32_P(MC_CITF_MISC_RD_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+			WREG32_P(MC_CITF_MISC_WR_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+			WREG32_P(MC_CITF_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+			WREG32_P(MC_HUB_MISC_HUB_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+			WREG32_P(MC_HUB_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+			WREG32_P(MC_HUB_MISC_SIP_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+			WREG32_P(MC_XPB_CLK_GAT, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+			WREG32_P(VM_L2_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
+		}
+	} else {
+		WREG32(GRBM_GFX_INDEX, 0xC0000000);
+
+		WREG32_CG(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
+		WREG32_CG(CG_CGTT_LOCAL_1, 0xFFFFFFFF);
+		WREG32_CG(CG_CGTT_LOCAL_2, 0xFFFFFFFF);
+		WREG32_CG(CG_CGTT_LOCAL_3, 0xFFFFFFFF);
+
+		if (pi->mgcgtssm)
+			WREG32(CGTS_SM_CTRL_REG, 0x81f44bc0);
+	}
+}
+
+void cypress_enable_spread_spectrum(struct radeon_device *rdev,
+				    bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (enable) {
+		if (pi->sclk_ss)
+			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+
+		if (pi->mclk_ss)
+			WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
+	} else {
+		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+		WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
+		WREG32_P(MPLL_CNTL_MODE, 0, ~SS_DSMODE_EN);
+	}
+}
+
+void cypress_start_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+void cypress_enable_sclk_control(struct radeon_device *rdev,
+				 bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+}
+
+void cypress_enable_mclk_control(struct radeon_device *rdev,
+				 bool enable)
+{
+	if (enable)
+		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
+	else
+		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
+}
+
+int cypress_notify_smc_display_change(struct radeon_device *rdev,
+				      bool has_display)
+{
+	PPSMC_Msg msg = has_display ?
+		(PPSMC_Msg)PPSMC_MSG_HasDisplay : (PPSMC_Msg)PPSMC_MSG_NoDisplay;
+
+	if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+void cypress_program_response_times(struct radeon_device *rdev)
+{
+	u32 reference_clock;
+	u32 mclk_switch_limit;
+
+	reference_clock = radeon_get_xclk(rdev);
+	mclk_switch_limit = (460 * reference_clock) / 100;
+
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_mclk_switch_lim,
+				      mclk_switch_limit);
+
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+
+	rv770_program_response_times(rdev);
+
+	if (ASIC_IS_LOMBOK(rdev))
+		rv770_write_smc_soft_register(rdev,
+					      RV770_SMC_SOFT_REGISTER_is_asic_lombok, 1);
+
+}
+
+static int cypress_pcie_performance_request(struct radeon_device *rdev,
+					    u8 perf_req, bool advertise)
+{
+#if defined(CONFIG_ACPI)
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+#endif
+	u32 tmp;
+
+	udelay(10);
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) && (tmp & LC_CURRENT_DATA_RATE))
+		return 0;
+
+#if defined(CONFIG_ACPI)
+	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
+	    (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
+		eg_pi->pcie_performance_request_registered = true;
+		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
+	} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
+		   eg_pi->pcie_performance_request_registered) {
+		eg_pi->pcie_performance_request_registered = false;
+		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
+	}
+#endif
+
+	return 0;
+}
+
+void cypress_advertise_gen2_capability(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp;
+
+#if defined(CONFIG_ACPI)
+	radeon_acpi_pcie_notify_device_ready(rdev);
+#endif
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+
+	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+		pi->pcie_gen2 = true;
+	else
+		pi->pcie_gen2 = false;
+
+	if (!pi->pcie_gen2)
+		cypress_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
+
+}
+
+static enum radeon_pcie_gen cypress_get_maximum_link_speed(struct radeon_ps *radeon_state)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+
+	if (state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
+		return 1;
+	return 0;
+}
+
+void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
+							 struct radeon_ps *radeon_new_state,
+							 struct radeon_ps *radeon_current_state)
+{
+	enum radeon_pcie_gen pcie_link_speed_target =
+		cypress_get_maximum_link_speed(radeon_new_state);
+	enum radeon_pcie_gen pcie_link_speed_current =
+		cypress_get_maximum_link_speed(radeon_current_state);
+	u8 request;
+
+	if (pcie_link_speed_target < pcie_link_speed_current) {
+		if (pcie_link_speed_target == RADEON_PCIE_GEN1)
+			request = PCIE_PERF_REQ_PECI_GEN1;
+		else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
+			request = PCIE_PERF_REQ_PECI_GEN2;
+		else
+			request = PCIE_PERF_REQ_PECI_GEN3;
+
+		cypress_pcie_performance_request(rdev, request, false);
+	}
+}
+
+void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
+							  struct radeon_ps *radeon_new_state,
+							  struct radeon_ps *radeon_current_state)
+{
+	enum radeon_pcie_gen pcie_link_speed_target =
+		cypress_get_maximum_link_speed(radeon_new_state);
+	enum radeon_pcie_gen pcie_link_speed_current =
+		cypress_get_maximum_link_speed(radeon_current_state);
+	u8 request;
+
+	if (pcie_link_speed_target > pcie_link_speed_current) {
+		if (pcie_link_speed_target == RADEON_PCIE_GEN1)
+			request = PCIE_PERF_REQ_PECI_GEN1;
+		else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
+			request = PCIE_PERF_REQ_PECI_GEN2;
+		else
+			request = PCIE_PERF_REQ_PECI_GEN3;
+
+		cypress_pcie_performance_request(rdev, request, false);
+	}
+}
+
+static int cypress_populate_voltage_value(struct radeon_device *rdev,
+					  struct atom_voltage_table *table,
+					  u16 value, RV770_SMC_VOLTAGE_VALUE *voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++) {
+		if (value <= table->entries[i].value) {
+			voltage->index = (u8)i;
+			voltage->value = cpu_to_be16(table->entries[i].value);
+			break;
+		}
+	}
+
+	if (i == table->count)
+		return -EINVAL;
+
+	return 0;
+}
+
+u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 result = 0;
+	bool strobe_mode = false;
+
+	if (pi->mem_gddr5) {
+		if (mclk <= pi->mclk_strobe_mode_threshold)
+			strobe_mode = true;
+		result = cypress_get_mclk_frequency_ratio(rdev, mclk, strobe_mode);
+
+		if (strobe_mode)
+			result |= SMC_STROBE_ENABLE;
+	}
+
+	return result;
+}
+
+u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
+{
+	u32 ref_clk = rdev->clock.mpll.reference_freq;
+	u32 vco = clkf * ref_clk;
+
+	/* 100 Mhz ref clk */
+	if (ref_clk == 10000) {
+		if (vco > 500000)
+			return 0xC6;
+		if (vco > 400000)
+			return 0x9D;
+		if (vco > 330000)
+			return 0x6C;
+		if (vco > 250000)
+			return 0x2B;
+		if (vco >  160000)
+			return 0x5B;
+		if (vco > 120000)
+			return 0x0A;
+		return 0x4B;
+	}
+
+	/* 27 Mhz ref clk */
+	if (vco > 250000)
+		return 0x8B;
+	if (vco > 200000)
+		return 0xCC;
+	if (vco > 150000)
+		return 0x9B;
+	return 0x6B;
+}
+
+static int cypress_populate_mclk_value(struct radeon_device *rdev,
+				       u32 engine_clock, u32 memory_clock,
+				       RV7XX_SMC_MCLK_VALUE *mclk,
+				       bool strobe_mode, bool dll_state_on)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	u32 mpll_ad_func_cntl =
+		pi->clk_regs.rv770.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl =
+		pi->clk_regs.rv770.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
+	u32 mclk_pwrmgt_cntl =
+		pi->clk_regs.rv770.mclk_pwrmgt_cntl;
+	u32 dll_cntl =
+		pi->clk_regs.rv770.dll_cntl;
+	u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
+	u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
+	struct atom_clock_dividers dividers;
+	u32 ibias;
+	u32 dll_speed;
+	int ret;
+	u32 mc_seq_misc7;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
+					     memory_clock, strobe_mode, &dividers);
+	if (ret)
+		return ret;
+
+	if (!strobe_mode) {
+		mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
+
+		if(mc_seq_misc7 & 0x8000000)
+			dividers.post_div = 1;
+	}
+
+	ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
+
+	mpll_ad_func_cntl &= ~(CLKR_MASK |
+			       YCLK_POST_DIV_MASK |
+			       CLKF_MASK |
+			       CLKFRAC_MASK |
+			       IBIAS_MASK);
+	mpll_ad_func_cntl |= CLKR(dividers.ref_div);
+	mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
+	mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
+	mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
+	mpll_ad_func_cntl |= IBIAS(ibias);
+
+	if (dividers.vco_mode)
+		mpll_ad_func_cntl_2 |= VCO_MODE;
+	else
+		mpll_ad_func_cntl_2 &= ~VCO_MODE;
+
+	if (pi->mem_gddr5) {
+		mpll_dq_func_cntl &= ~(CLKR_MASK |
+				       YCLK_POST_DIV_MASK |
+				       CLKF_MASK |
+				       CLKFRAC_MASK |
+				       IBIAS_MASK);
+		mpll_dq_func_cntl |= CLKR(dividers.ref_div);
+		mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
+		mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
+		mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
+		mpll_dq_func_cntl |= IBIAS(ibias);
+
+		if (strobe_mode)
+			mpll_dq_func_cntl &= ~PDNB;
+		else
+			mpll_dq_func_cntl |= PDNB;
+
+		if (dividers.vco_mode)
+			mpll_dq_func_cntl_2 |= VCO_MODE;
+		else
+			mpll_dq_func_cntl_2 &= ~VCO_MODE;
+	}
+
+	if (pi->mclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = memory_clock * dividers.post_div;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+			u32 reference_clock = rdev->clock.mpll.reference_freq;
+			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+			u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+			u32 clk_v = ss.percentage *
+				(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
+
+			mpll_ss1 &= ~CLKV_MASK;
+			mpll_ss1 |= CLKV(clk_v);
+
+			mpll_ss2 &= ~CLKS_MASK;
+			mpll_ss2 |= CLKS(clk_s);
+		}
+	}
+
+	dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
+					memory_clock);
+
+	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
+	if (dll_state_on)
+		mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
+				     MRDCKA1_PDNB |
+				     MRDCKB0_PDNB |
+				     MRDCKB1_PDNB |
+				     MRDCKC0_PDNB |
+				     MRDCKC1_PDNB |
+				     MRDCKD0_PDNB |
+				     MRDCKD1_PDNB);
+	else
+		mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
+				      MRDCKA1_PDNB |
+				      MRDCKB0_PDNB |
+				      MRDCKB1_PDNB |
+				      MRDCKC0_PDNB |
+				      MRDCKC1_PDNB |
+				      MRDCKD0_PDNB |
+				      MRDCKD1_PDNB);
+
+	mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
+	mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+	mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+	mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
+	mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
+	mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+	return 0;
+}
+
+u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
+				    u32 memory_clock, bool strobe_mode)
+{
+	u8 mc_para_index;
+
+	if (rdev->family >= CHIP_BARTS) {
+		if (strobe_mode) {
+			if (memory_clock < 10000)
+				mc_para_index = 0x00;
+			else if (memory_clock > 47500)
+				mc_para_index = 0x0f;
+			else
+				mc_para_index = (u8)((memory_clock - 10000) / 2500);
+		} else {
+			if (memory_clock < 65000)
+				mc_para_index = 0x00;
+			else if (memory_clock > 135000)
+				mc_para_index = 0x0f;
+			else
+				mc_para_index = (u8)((memory_clock - 60000) / 5000);
+		}
+	} else {
+		if (strobe_mode) {
+			if (memory_clock < 10000)
+				mc_para_index = 0x00;
+			else if (memory_clock > 47500)
+				mc_para_index = 0x0f;
+			else
+				mc_para_index = (u8)((memory_clock - 10000) / 2500);
+		} else {
+			if (memory_clock < 40000)
+				mc_para_index = 0x00;
+			else if (memory_clock > 115000)
+				mc_para_index = 0x0f;
+			else
+				mc_para_index = (u8)((memory_clock - 40000) / 5000);
+		}
+	}
+	return mc_para_index;
+}
+
+static int cypress_populate_mvdd_value(struct radeon_device *rdev,
+				       u32 mclk,
+				       RV770_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (!pi->mvdd_control) {
+		voltage->index = eg_pi->mvdd_high_index;
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+		return 0;
+	}
+
+	if (mclk <= pi->mvdd_split_frequency) {
+		voltage->index = eg_pi->mvdd_low_index;
+		voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
+	} else {
+		voltage->index = eg_pi->mvdd_high_index;
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+	}
+
+	return 0;
+}
+
+int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
+				       struct rv7xx_pl *pl,
+				       RV770_SMC_HW_PERFORMANCE_LEVEL *level,
+				       u8 watermark_level)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int ret;
+	bool dll_state_on;
+
+	level->gen2PCIE = pi->pcie_gen2 ?
+		((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
+	level->gen2XSP  = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
+	level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
+	level->displayWatermark = watermark_level;
+
+	ret = rv740_populate_sclk_value(rdev, pl->sclk, &level->sclk);
+	if (ret)
+		return ret;
+
+	level->mcFlags =  0;
+	if (pi->mclk_stutter_mode_threshold &&
+	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+	    !eg_pi->uvd_enabled) {
+		level->mcFlags |= SMC_MC_STUTTER_EN;
+		if (eg_pi->sclk_deep_sleep)
+			level->stateFlags |= PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
+		else
+			level->stateFlags &= ~PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
+	}
+
+	if (pi->mem_gddr5) {
+		if (pl->mclk > pi->mclk_edc_enable_threshold)
+			level->mcFlags |= SMC_MC_EDC_RD_FLAG;
+
+		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+			level->mcFlags |= SMC_MC_EDC_WR_FLAG;
+
+		level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
+
+		if (level->strobeMode & SMC_STROBE_ENABLE) {
+			if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
+			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+			else
+				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+		} else
+			dll_state_on = eg_pi->dll_default_on;
+
+		ret = cypress_populate_mclk_value(rdev,
+						  pl->sclk,
+						  pl->mclk,
+						  &level->mclk,
+						  (level->strobeMode & SMC_STROBE_ENABLE) != 0,
+						  dll_state_on);
+	} else {
+		ret = cypress_populate_mclk_value(rdev,
+						  pl->sclk,
+						  pl->mclk,
+						  &level->mclk,
+						  true,
+						  true);
+	}
+	if (ret)
+		return ret;
+
+	ret = cypress_populate_voltage_value(rdev,
+					     &eg_pi->vddc_voltage_table,
+					     pl->vddc,
+					     &level->vddc);
+	if (ret)
+		return ret;
+
+	if (eg_pi->vddci_control) {
+		ret = cypress_populate_voltage_value(rdev,
+						     &eg_pi->vddci_voltage_table,
+						     pl->vddci,
+						     &level->vddci);
+		if (ret)
+			return ret;
+	}
+
+	ret = cypress_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
+
+	return ret;
+}
+
+static int cypress_convert_power_state_to_smc(struct radeon_device *rdev,
+					      struct radeon_ps *radeon_state,
+					      RV770_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int ret;
+
+	if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
+		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	ret = cypress_convert_power_level_to_smc(rdev,
+						 &state->low,
+						 &smc_state->levels[0],
+						 PPSMC_DISPLAY_WATERMARK_LOW);
+	if (ret)
+		return ret;
+
+	ret = cypress_convert_power_level_to_smc(rdev,
+						 &state->medium,
+						 &smc_state->levels[1],
+						 PPSMC_DISPLAY_WATERMARK_LOW);
+	if (ret)
+		return ret;
+
+	ret = cypress_convert_power_level_to_smc(rdev,
+						 &state->high,
+						 &smc_state->levels[2],
+						 PPSMC_DISPLAY_WATERMARK_HIGH);
+	if (ret)
+		return ret;
+
+	smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
+	smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
+	smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
+
+	if (eg_pi->dynamic_ac_timing) {
+		smc_state->levels[0].ACIndex = 2;
+		smc_state->levels[1].ACIndex = 3;
+		smc_state->levels[2].ACIndex = 4;
+	} else {
+		smc_state->levels[0].ACIndex = 0;
+		smc_state->levels[1].ACIndex = 0;
+		smc_state->levels[2].ACIndex = 0;
+	}
+
+	rv770_populate_smc_sp(rdev, radeon_state, smc_state);
+
+	return rv770_populate_smc_t(rdev, radeon_state, smc_state);
+}
+
+static void cypress_convert_mc_registers(struct evergreen_mc_reg_entry *entry,
+					 SMC_Evergreen_MCRegisterSet *data,
+					 u32 num_entries, u32 valid_flag)
+{
+	u32 i, j;
+
+	for (i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & (1 << j)) {
+			data->value[i] = cpu_to_be32(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static void cypress_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
+						      struct rv7xx_pl *pl,
+						      SMC_Evergreen_MCRegisterSet *mc_reg_table_data)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 i = 0;
+
+	for (i = 0; i < eg_pi->mc_reg_table.num_entries; i++) {
+		if (pl->mclk <=
+		    eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+			break;
+	}
+
+	if ((i == eg_pi->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[i],
+				     mc_reg_table_data,
+				     eg_pi->mc_reg_table.last,
+				     eg_pi->mc_reg_table.valid_flag);
+}
+
+static void cypress_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
+						struct radeon_ps *radeon_state,
+						SMC_Evergreen_MCRegisters *mc_reg_table)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+
+	cypress_convert_mc_reg_table_entry_to_smc(rdev,
+						  &state->low,
+						  &mc_reg_table->data[2]);
+	cypress_convert_mc_reg_table_entry_to_smc(rdev,
+						  &state->medium,
+						  &mc_reg_table->data[3]);
+	cypress_convert_mc_reg_table_entry_to_smc(rdev,
+						  &state->high,
+						  &mc_reg_table->data[4]);
+}
+
+int cypress_upload_sw_state(struct radeon_device *rdev,
+			    struct radeon_ps *radeon_new_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u16 address = pi->state_table_start +
+		offsetof(RV770_SMC_STATETABLE, driverState);
+	RV770_SMC_SWSTATE state = { 0 };
+	int ret;
+
+	ret = cypress_convert_power_state_to_smc(rdev, radeon_new_state, &state);
+	if (ret)
+		return ret;
+
+	return rv770_copy_bytes_to_smc(rdev, address, (u8 *)&state,
+				    sizeof(RV770_SMC_SWSTATE),
+				    pi->sram_end);
+}
+
+int cypress_upload_mc_reg_table(struct radeon_device *rdev,
+				struct radeon_ps *radeon_new_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
+	u16 address;
+
+	cypress_convert_mc_reg_table_to_smc(rdev, radeon_new_state, &mc_reg_table);
+
+	address = eg_pi->mc_reg_table_start +
+		(u16)offsetof(SMC_Evergreen_MCRegisters, data[2]);
+
+	return rv770_copy_bytes_to_smc(rdev, address,
+				       (u8 *)&mc_reg_table.data[2],
+				       sizeof(SMC_Evergreen_MCRegisterSet) * 3,
+				       pi->sram_end);
+}
+
+u32 cypress_calculate_burst_time(struct radeon_device *rdev,
+				 u32 engine_clock, u32 memory_clock)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 multiplier = pi->mem_gddr5 ? 1 : 2;
+	u32 result = (4 * multiplier * engine_clock) / (memory_clock / 2);
+	u32 burst_time;
+
+	if (result <= 4)
+		burst_time = 0;
+	else if (result < 8)
+		burst_time = result - 4;
+	else {
+		burst_time = result / 2 ;
+		if (burst_time > 18)
+			burst_time = 18;
+	}
+
+	return burst_time;
+}
+
+void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
+					      struct radeon_ps *radeon_new_state)
+{
+	struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
+	u32 mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
+
+	mc_arb_burst_time &= ~(STATE1_MASK | STATE2_MASK | STATE3_MASK);
+
+	mc_arb_burst_time |= STATE1(cypress_calculate_burst_time(rdev,
+								 new_state->low.sclk,
+								 new_state->low.mclk));
+	mc_arb_burst_time |= STATE2(cypress_calculate_burst_time(rdev,
+								 new_state->medium.sclk,
+								 new_state->medium.mclk));
+	mc_arb_burst_time |= STATE3(cypress_calculate_burst_time(rdev,
+								 new_state->high.sclk,
+								 new_state->high.mclk));
+
+	rv730_program_memory_timing_parameters(rdev, radeon_new_state);
+
+	WREG32(MC_ARB_BURST_TIME, mc_arb_burst_time);
+}
+
+static void cypress_populate_mc_reg_addresses(struct radeon_device *rdev,
+					      SMC_Evergreen_MCRegisters *mc_reg_table)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 i, j;
+
+	for (i = 0, j = 0; j < eg_pi->mc_reg_table.last; j++) {
+		if (eg_pi->mc_reg_table.valid_flag & (1 << j)) {
+			mc_reg_table->address[i].s0 =
+				cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+
+	mc_reg_table->last = (u8)i;
+}
+
+static void cypress_set_mc_reg_address_table(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 i = 0;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RAS_TIMING_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RAS_TIMING >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_CAS_TIMING_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_CAS_TIMING >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING2_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING2 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D0_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D0 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D1_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D1 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D0_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D0 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D1_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D1 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_EMRS >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS1 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC1 >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC1 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RESERVE_M >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RESERVE_M >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC3 >> 2;
+	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC3 >> 2;
+	i++;
+
+	eg_pi->mc_reg_table.last = (u8)i;
+}
+
+static void cypress_retrieve_ac_timing_for_one_entry(struct radeon_device *rdev,
+						     struct evergreen_mc_reg_entry *entry)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 i;
+
+	for (i = 0; i < eg_pi->mc_reg_table.last; i++)
+		entry->mc_data[i] =
+			RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
+
+}
+
+static void cypress_retrieve_ac_timing_for_all_ranges(struct radeon_device *rdev,
+						      struct atom_memory_clock_range_table *range_table)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 i, j;
+
+	for (i = 0; i < range_table->num_entries; i++) {
+		eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max =
+			range_table->mclk[i];
+		radeon_atom_set_ac_timing(rdev, range_table->mclk[i]);
+		cypress_retrieve_ac_timing_for_one_entry(rdev,
+							 &eg_pi->mc_reg_table.mc_reg_table_entry[i]);
+	}
+
+	eg_pi->mc_reg_table.num_entries = range_table->num_entries;
+	eg_pi->mc_reg_table.valid_flag = 0;
+
+	for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
+		for (j = 1; j < range_table->num_entries; j++) {
+			if (eg_pi->mc_reg_table.mc_reg_table_entry[j-1].mc_data[i] !=
+			    eg_pi->mc_reg_table.mc_reg_table_entry[j].mc_data[i]) {
+				eg_pi->mc_reg_table.valid_flag |= (1 << i);
+				break;
+			}
+		}
+	}
+}
+
+static int cypress_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 module_index = rv770_get_memory_module_index(rdev);
+	struct atom_memory_clock_range_table range_table = { 0 };
+	int ret;
+
+	ret = radeon_atom_get_mclk_range_table(rdev,
+					       pi->mem_gddr5,
+					       module_index, &range_table);
+	if (ret)
+		return ret;
+
+	cypress_retrieve_ac_timing_for_all_ranges(rdev, &range_table);
+
+	return 0;
+}
+
+static void cypress_wait_for_mc_sequencer(struct radeon_device *rdev, u8 value)
+{
+	u32 i, j;
+	u32 channels = 2;
+
+	if ((rdev->family == CHIP_CYPRESS) ||
+	    (rdev->family == CHIP_HEMLOCK))
+		channels = 4;
+	else if (rdev->family == CHIP_CEDAR)
+		channels = 1;
+
+	for (i = 0; i < channels; i++) {
+		if ((rdev->family == CHIP_CYPRESS) ||
+		    (rdev->family == CHIP_HEMLOCK)) {
+			WREG32_P(MC_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
+			WREG32_P(MC_CG_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
+		} else {
+			WREG32_P(MC_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
+			WREG32_P(MC_CG_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
+		}
+		for (j = 0; j < rdev->usec_timeout; j++) {
+			if (((RREG32(MC_SEQ_CG) & CG_SEQ_RESP_MASK) >> CG_SEQ_RESP_SHIFT) == value)
+				break;
+			udelay(1);
+		}
+	}
+}
+
+static void cypress_force_mc_use_s1(struct radeon_device *rdev,
+				    struct radeon_ps *radeon_boot_state)
+{
+	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
+	u32 strobe_mode;
+	u32 mc_seq_cg;
+	int i;
+
+	if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
+		return;
+
+	radeon_atom_set_ac_timing(rdev, boot_state->low.mclk);
+	radeon_mc_wait_for_idle(rdev);
+
+	if ((rdev->family == CHIP_CYPRESS) ||
+	    (rdev->family == CHIP_HEMLOCK)) {
+		WREG32(MC_CONFIG_MCD, 0xf);
+		WREG32(MC_CG_CONFIG_MCD, 0xf);
+	} else {
+		WREG32(MC_CONFIG, 0xf);
+		WREG32(MC_CG_CONFIG, 0xf);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++)
+		radeon_wait_for_vblank(rdev, i);
+
+	WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
+	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
+
+	strobe_mode = cypress_get_strobe_mode_settings(rdev,
+						       boot_state->low.mclk);
+
+	mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S1);
+	mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
+	WREG32(MC_SEQ_CG, mc_seq_cg);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
+			break;
+		udelay(1);
+	}
+
+	mc_seq_cg &= ~CG_SEQ_REQ_MASK;
+	mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
+	WREG32(MC_SEQ_CG, mc_seq_cg);
+
+	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
+}
+
+static void cypress_copy_ac_timing_from_s1_to_s0(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 value;
+	u32 i;
+
+	for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
+		value = RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
+		WREG32(eg_pi->mc_reg_table.mc_reg_address[i].s0 << 2, value);
+	}
+}
+
+static void cypress_force_mc_use_s0(struct radeon_device *rdev,
+				    struct radeon_ps *radeon_boot_state)
+{
+	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
+	u32 strobe_mode;
+	u32 mc_seq_cg;
+	int i;
+
+	cypress_copy_ac_timing_from_s1_to_s0(rdev);
+	radeon_mc_wait_for_idle(rdev);
+
+	if ((rdev->family == CHIP_CYPRESS) ||
+	    (rdev->family == CHIP_HEMLOCK)) {
+		WREG32(MC_CONFIG_MCD, 0xf);
+		WREG32(MC_CG_CONFIG_MCD, 0xf);
+	} else {
+		WREG32(MC_CONFIG, 0xf);
+		WREG32(MC_CG_CONFIG, 0xf);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++)
+		radeon_wait_for_vblank(rdev, i);
+
+	WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
+	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
+
+	strobe_mode = cypress_get_strobe_mode_settings(rdev,
+						       boot_state->low.mclk);
+
+	mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S0);
+	mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
+	WREG32(MC_SEQ_CG, mc_seq_cg);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (!(RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE))
+			break;
+		udelay(1);
+	}
+
+	mc_seq_cg &= ~CG_SEQ_REQ_MASK;
+	mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
+	WREG32(MC_SEQ_CG, mc_seq_cg);
+
+	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
+}
+
+static int cypress_populate_initial_mvdd_value(struct radeon_device *rdev,
+					       RV770_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	voltage->index = eg_pi->mvdd_high_index;
+	voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+
+	return 0;
+}
+
+int cypress_populate_smc_initial_state(struct radeon_device *rdev,
+				       struct radeon_ps *radeon_initial_state,
+				       RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_ps *initial_state = rv770_get_ps(radeon_initial_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 a_t;
+
+	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
+	table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
+	table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
+
+	table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
+
+	table->initialState.levels[0].mclk.mclk770.mclk_value =
+		cpu_to_be32(initial_state->low.mclk);
+
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
+
+	table->initialState.levels[0].sclk.sclk_value =
+		cpu_to_be32(initial_state->low.sclk);
+
+	table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
+
+	table->initialState.levels[0].ACIndex = 0;
+
+	cypress_populate_voltage_value(rdev,
+				       &eg_pi->vddc_voltage_table,
+				       initial_state->low.vddc,
+				       &table->initialState.levels[0].vddc);
+
+	if (eg_pi->vddci_control)
+		cypress_populate_voltage_value(rdev,
+					       &eg_pi->vddci_voltage_table,
+					       initial_state->low.vddci,
+					       &table->initialState.levels[0].vddci);
+
+	cypress_populate_initial_mvdd_value(rdev,
+					    &table->initialState.levels[0].mvdd);
+
+	a_t = CG_R(0xffff) | CG_L(0);
+	table->initialState.levels[0].aT = cpu_to_be32(a_t);
+
+	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+
+
+	if (pi->boot_in_gen2)
+		table->initialState.levels[0].gen2PCIE = 1;
+	else
+		table->initialState.levels[0].gen2PCIE = 0;
+	if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
+		table->initialState.levels[0].gen2XSP = 1;
+	else
+		table->initialState.levels[0].gen2XSP = 0;
+
+	if (pi->mem_gddr5) {
+		table->initialState.levels[0].strobeMode =
+			cypress_get_strobe_mode_settings(rdev,
+							 initial_state->low.mclk);
+
+		if (initial_state->low.mclk > pi->mclk_edc_enable_threshold)
+			table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
+		else
+			table->initialState.levels[0].mcFlags =  0;
+	}
+
+	table->initialState.levels[1] = table->initialState.levels[0];
+	table->initialState.levels[2] = table->initialState.levels[0];
+
+	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	return 0;
+}
+
+int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
+				    RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 mpll_ad_func_cntl =
+		pi->clk_regs.rv770.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl =
+		pi->clk_regs.rv770.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
+	u32 spll_func_cntl =
+		pi->clk_regs.rv770.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 =
+		pi->clk_regs.rv770.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 =
+		pi->clk_regs.rv770.cg_spll_func_cntl_3;
+	u32 mclk_pwrmgt_cntl =
+		pi->clk_regs.rv770.mclk_pwrmgt_cntl;
+	u32 dll_cntl =
+		pi->clk_regs.rv770.dll_cntl;
+
+	table->ACPIState = table->initialState;
+
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		cypress_populate_voltage_value(rdev,
+					       &eg_pi->vddc_voltage_table,
+					       pi->acpi_vddc,
+					       &table->ACPIState.levels[0].vddc);
+		if (pi->pcie_gen2) {
+			if (pi->acpi_pcie_gen2)
+				table->ACPIState.levels[0].gen2PCIE = 1;
+			else
+				table->ACPIState.levels[0].gen2PCIE = 0;
+		} else
+			table->ACPIState.levels[0].gen2PCIE = 0;
+		if (pi->acpi_pcie_gen2)
+			table->ACPIState.levels[0].gen2XSP = 1;
+		else
+			table->ACPIState.levels[0].gen2XSP = 0;
+	} else {
+		cypress_populate_voltage_value(rdev,
+					       &eg_pi->vddc_voltage_table,
+					       pi->min_vddc_in_table,
+					       &table->ACPIState.levels[0].vddc);
+		table->ACPIState.levels[0].gen2PCIE = 0;
+	}
+
+	if (eg_pi->acpi_vddci) {
+		if (eg_pi->vddci_control) {
+			cypress_populate_voltage_value(rdev,
+						       &eg_pi->vddci_voltage_table,
+						       eg_pi->acpi_vddci,
+						       &table->ACPIState.levels[0].vddci);
+		}
+	}
+
+	mpll_ad_func_cntl &= ~PDNB;
+
+	mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
+
+	if (pi->mem_gddr5)
+		mpll_dq_func_cntl &= ~PDNB;
+	mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
+
+	mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
+			     MRDCKA1_RESET |
+			     MRDCKB0_RESET |
+			     MRDCKB1_RESET |
+			     MRDCKC0_RESET |
+			     MRDCKC1_RESET |
+			     MRDCKD0_RESET |
+			     MRDCKD1_RESET);
+
+	mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
+			      MRDCKA1_PDNB |
+			      MRDCKB0_PDNB |
+			      MRDCKB1_PDNB |
+			      MRDCKC0_PDNB |
+			      MRDCKC1_PDNB |
+			      MRDCKD0_PDNB |
+			      MRDCKD1_PDNB);
+
+	dll_cntl |= (MRDCKA0_BYPASS |
+		     MRDCKA1_BYPASS |
+		     MRDCKB0_BYPASS |
+		     MRDCKB1_BYPASS |
+		     MRDCKC0_BYPASS |
+		     MRDCKC1_BYPASS |
+		     MRDCKD0_BYPASS |
+		     MRDCKD1_BYPASS);
+
+	/* evergreen only */
+	if (rdev->family <= CHIP_HEMLOCK)
+		spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(mpll_ad_func_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
+		cpu_to_be32(mpll_ad_func_cntl_2);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(mpll_dq_func_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
+		cpu_to_be32(mpll_dq_func_cntl_2);
+	table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
+
+	table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(spll_func_cntl_3);
+
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	cypress_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+
+	if (eg_pi->dynamic_ac_timing)
+		table->ACPIState.levels[0].ACIndex = 1;
+
+	table->ACPIState.levels[1] = table->ACPIState.levels[0];
+	table->ACPIState.levels[2] = table->ACPIState.levels[0];
+
+	return 0;
+}
+
+static void cypress_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+							  struct atom_voltage_table *voltage_table)
+{
+	unsigned int i, diff;
+
+	if (voltage_table->count <= MAX_NO_VREG_STEPS)
+		return;
+
+	diff = voltage_table->count - MAX_NO_VREG_STEPS;
+
+	for (i= 0; i < MAX_NO_VREG_STEPS; i++)
+		voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+	voltage_table->count = MAX_NO_VREG_STEPS;
+}
+
+int cypress_construct_voltage_tables(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int ret;
+
+	ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0,
+					    &eg_pi->vddc_voltage_table);
+	if (ret)
+		return ret;
+
+	if (eg_pi->vddc_voltage_table.count > MAX_NO_VREG_STEPS)
+		cypress_trim_voltage_table_to_fit_state_table(rdev,
+							      &eg_pi->vddc_voltage_table);
+
+	if (eg_pi->vddci_control) {
+		ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0,
+						    &eg_pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+
+		if (eg_pi->vddci_voltage_table.count > MAX_NO_VREG_STEPS)
+			cypress_trim_voltage_table_to_fit_state_table(rdev,
+								      &eg_pi->vddci_voltage_table);
+	}
+
+	return 0;
+}
+
+static void cypress_populate_smc_voltage_table(struct radeon_device *rdev,
+					       struct atom_voltage_table *voltage_table,
+					       RV770_SMC_STATETABLE *table)
+{
+	unsigned int i;
+
+	for (i = 0; i < voltage_table->count; i++) {
+		table->highSMIO[i] = 0;
+		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+	}
+}
+
+int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
+					RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	unsigned char i;
+
+	if (eg_pi->vddc_voltage_table.count) {
+		cypress_populate_smc_voltage_table(rdev,
+						   &eg_pi->vddc_voltage_table,
+						   table);
+
+		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
+		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
+			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+		for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+			if (pi->max_vddc_in_table <=
+			    eg_pi->vddc_voltage_table.entries[i].value) {
+				table->maxVDDCIndexInPPTable = i;
+				break;
+			}
+		}
+	}
+
+	if (eg_pi->vddci_voltage_table.count) {
+		cypress_populate_smc_voltage_table(rdev,
+						   &eg_pi->vddci_voltage_table,
+						   table);
+
+		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
+		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
+			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+	}
+
+	return 0;
+}
+
+static u32 cypress_get_mclk_split_point(struct atom_memory_info *memory_info)
+{
+	if ((memory_info->mem_type == MEM_TYPE_GDDR3) ||
+	    (memory_info->mem_type == MEM_TYPE_DDR3))
+		return 30000;
+
+	return 0;
+}
+
+int cypress_get_mvdd_configuration(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u8 module_index;
+	struct atom_memory_info memory_info;
+	u32 tmp = RREG32(GENERAL_PWRMGT);
+
+	if (!(tmp & BACKBIAS_PAD_EN)) {
+		eg_pi->mvdd_high_index = 0;
+		eg_pi->mvdd_low_index = 1;
+		pi->mvdd_control = false;
+		return 0;
+	}
+
+	if (tmp & BACKBIAS_VALUE)
+		eg_pi->mvdd_high_index = 1;
+	else
+		eg_pi->mvdd_high_index = 0;
+
+	eg_pi->mvdd_low_index =
+		(eg_pi->mvdd_high_index == 0) ? 1 : 0;
+
+	module_index = rv770_get_memory_module_index(rdev);
+
+	if (radeon_atom_get_memory_info(rdev, module_index, &memory_info)) {
+		pi->mvdd_control = false;
+		return 0;
+	}
+
+	pi->mvdd_split_frequency =
+		cypress_get_mclk_split_point(&memory_info);
+
+	if (pi->mvdd_split_frequency == 0) {
+		pi->mvdd_control = false;
+		return 0;
+	}
+
+	return 0;
+}
+
+static int cypress_init_smc_table(struct radeon_device *rdev,
+				  struct radeon_ps *radeon_boot_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	RV770_SMC_STATETABLE *table = &pi->smc_statetable;
+	int ret;
+
+	memset(table, 0, sizeof(RV770_SMC_STATETABLE));
+
+	cypress_populate_smc_voltage_tables(rdev, table);
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+		break;
+	case THERMAL_TYPE_NONE:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+		break;
+	default:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+		break;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (pi->mem_gddr5)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
+	if (ret)
+		return ret;
+
+	ret = cypress_populate_smc_acpi_state(rdev, table);
+	if (ret)
+		return ret;
+
+	table->driverState = table->initialState;
+
+	return rv770_copy_bytes_to_smc(rdev,
+				       pi->state_table_start,
+				       (u8 *)table, sizeof(RV770_SMC_STATETABLE),
+				       pi->sram_end);
+}
+
+int cypress_populate_mc_reg_table(struct radeon_device *rdev,
+				  struct radeon_ps *radeon_boot_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
+	SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
+
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_seq_index, 1);
+
+	cypress_populate_mc_reg_addresses(rdev, &mc_reg_table);
+
+	cypress_convert_mc_reg_table_entry_to_smc(rdev,
+						  &boot_state->low,
+						  &mc_reg_table.data[0]);
+
+	cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[0],
+				     &mc_reg_table.data[1], eg_pi->mc_reg_table.last,
+				     eg_pi->mc_reg_table.valid_flag);
+
+	cypress_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, &mc_reg_table);
+
+	return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
+				       (u8 *)&mc_reg_table, sizeof(SMC_Evergreen_MCRegisters),
+				       pi->sram_end);
+}
+
+int cypress_get_table_locations(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
+					EVERGREEN_SMC_FIRMWARE_HEADER_stateTable,
+					&tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->state_table_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
+					EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters,
+					&tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->soft_regs_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
+					EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable,
+					&tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	eg_pi->mc_reg_table_start = (u16)tmp;
+
+	return 0;
+}
+
+void cypress_enable_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void cypress_program_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp, pipe;
+	int i;
+
+	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	if (rdev->pm.dpm.new_active_crtc_count > 0)
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	if (rdev->pm.dpm.new_active_crtc_count > 1)
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+	if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
+	    (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+		/* find the first active crtc */
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.dpm.new_active_crtcs & (1 << i))
+				break;
+		}
+		if (i == rdev->num_crtc)
+			pipe = 0;
+		else
+			pipe = i;
+
+		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+	}
+
+	cypress_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
+}
+
+void cypress_dpm_setup_asic(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	rv740_read_clock_registers(rdev);
+	rv770_read_voltage_smio_registers(rdev);
+	rv770_get_max_vddc(rdev);
+	rv770_get_memory_type(rdev);
+
+	if (eg_pi->pcie_performance_request)
+		eg_pi->pcie_performance_request_registered = false;
+
+	if (eg_pi->pcie_performance_request)
+		cypress_advertise_gen2_capability(rdev);
+
+	rv770_get_pcie_gen2_status(rdev);
+
+	rv770_enable_acpi_pm(rdev);
+}
+
+int cypress_dpm_enable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+	int ret;
+
+	if (pi->gfx_clock_gating)
+		rv770_restore_cgcg(rdev);
+
+	if (rv770_dpm_enabled(rdev))
+		return -EINVAL;
+
+	if (pi->voltage_control) {
+		rv770_enable_voltage_control(rdev, true);
+		ret = cypress_construct_voltage_tables(rdev);
+		if (ret) {
+			DRM_ERROR("cypress_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+
+	if (pi->mvdd_control) {
+		ret = cypress_get_mvdd_configuration(rdev);
+		if (ret) {
+			DRM_ERROR("cypress_get_mvdd_configuration failed\n");
+			return ret;
+		}
+	}
+
+	if (eg_pi->dynamic_ac_timing) {
+		cypress_set_mc_reg_address_table(rdev);
+		cypress_force_mc_use_s0(rdev, boot_ps);
+		ret = cypress_initialize_mc_reg_table(rdev);
+		if (ret)
+			eg_pi->dynamic_ac_timing = false;
+		cypress_force_mc_use_s1(rdev, boot_ps);
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv770_enable_backbias(rdev, true);
+
+	if (pi->dynamic_ss)
+		cypress_enable_spread_spectrum(rdev, true);
+
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, true);
+
+	rv770_setup_bsp(rdev);
+	rv770_program_git(rdev);
+	rv770_program_tp(rdev);
+	rv770_program_tpp(rdev);
+	rv770_program_sstp(rdev);
+	rv770_program_engine_speed_parameters(rdev);
+	cypress_enable_display_gap(rdev);
+	rv770_program_vc(rdev);
+
+	if (pi->dynamic_pcie_gen2)
+		cypress_enable_dynamic_pcie_gen2(rdev, true);
+
+	ret = rv770_upload_firmware(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_upload_firmware failed\n");
+		return ret;
+	}
+
+	ret = cypress_get_table_locations(rdev);
+	if (ret) {
+		DRM_ERROR("cypress_get_table_locations failed\n");
+		return ret;
+	}
+	ret = cypress_init_smc_table(rdev, boot_ps);
+	if (ret) {
+		DRM_ERROR("cypress_init_smc_table failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = cypress_populate_mc_reg_table(rdev, boot_ps);
+		if (ret) {
+			DRM_ERROR("cypress_populate_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+
+	cypress_program_response_times(rdev);
+
+	r7xx_start_smc(rdev);
+
+	ret = cypress_notify_smc_display_change(rdev, false);
+	if (ret) {
+		DRM_ERROR("cypress_notify_smc_display_change failed\n");
+		return ret;
+	}
+	cypress_enable_sclk_control(rdev, true);
+
+	if (eg_pi->memory_transition)
+		cypress_enable_mclk_control(rdev, true);
+
+	cypress_start_dpm(rdev);
+
+	if (pi->gfx_clock_gating)
+		cypress_gfx_clock_gating_enable(rdev, true);
+
+	if (pi->mg_clock_gating)
+		cypress_mg_clock_gating_enable(rdev, true);
+
+	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	return 0;
+}
+
+void cypress_dpm_disable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+	if (!rv770_dpm_enabled(rdev))
+		return;
+
+	rv770_clear_vc(rdev);
+
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, false);
+
+	if (pi->dynamic_pcie_gen2)
+		cypress_enable_dynamic_pcie_gen2(rdev, false);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+
+	if (pi->gfx_clock_gating)
+		cypress_gfx_clock_gating_enable(rdev, false);
+
+	if (pi->mg_clock_gating)
+		cypress_mg_clock_gating_enable(rdev, false);
+
+	rv770_stop_dpm(rdev);
+	r7xx_stop_smc(rdev);
+
+	cypress_enable_spread_spectrum(rdev, false);
+
+	if (eg_pi->dynamic_ac_timing)
+		cypress_force_mc_use_s1(rdev, boot_ps);
+
+	rv770_reset_smio_status(rdev);
+}
+
+int cypress_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
+	struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
+	int ret;
+
+	ret = rv770_restrict_performance_levels_before_switch(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
+		return ret;
+	}
+	if (eg_pi->pcie_performance_request)
+		cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
+
+	rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+	ret = rv770_halt_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_halt_smc failed\n");
+		return ret;
+	}
+	ret = cypress_upload_sw_state(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("cypress_upload_sw_state failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = cypress_upload_mc_reg_table(rdev, new_ps);
+		if (ret) {
+			DRM_ERROR("cypress_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+
+	cypress_program_memory_timing_parameters(rdev, new_ps);
+
+	ret = rv770_resume_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_resume_smc failed\n");
+		return ret;
+	}
+	ret = rv770_set_sw_state(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_set_sw_state failed\n");
+		return ret;
+	}
+	rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+
+	if (eg_pi->pcie_performance_request)
+		cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
+
+	return 0;
+}
+
+#if 0
+void cypress_dpm_reset_asic(struct radeon_device *rdev)
+{
+	rv770_restrict_performance_levels_before_switch(rdev);
+	rv770_set_boot_state(rdev);
+}
+#endif
+
+void cypress_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+	cypress_program_display_gap(rdev);
+}
+
+int cypress_dpm_init(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi;
+	struct evergreen_power_info *eg_pi;
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
+	if (eg_pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = eg_pi;
+	pi = &eg_pi->rv7xx;
+
+	rv770_get_max_vddc(rdev);
+
+	eg_pi->ulv.supported = false;
+	pi->acpi_vddc = 0;
+	eg_pi->acpi_vddci = 0;
+	pi->min_vddc_in_table = 0;
+	pi->max_vddc_in_table = 0;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = rv7xx_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	if (rdev->pm.dpm.voltage_response_time == 0)
+		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (rdev->pm.dpm.backbias_response_time == 0)
+		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->ref_div = dividers.ref_div + 1;
+	else
+		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	pi->mclk_strobe_mode_threshold = 40000;
+	pi->mclk_edc_enable_threshold = 40000;
+	eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+	pi->rlp = RV770_RLP_DFLT;
+	pi->rmp = RV770_RMP_DFLT;
+	pi->lhp = RV770_LHP_DFLT;
+	pi->lmp = RV770_LMP_DFLT;
+
+	pi->voltage_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
+
+	pi->mvdd_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
+
+	eg_pi->vddci_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
+
+	rv770_get_engine_memory_ss(rdev);
+
+	pi->asi = RV770_ASI_DFLT;
+	pi->pasi = CYPRESS_HASI_DFLT;
+	pi->vrc = CYPRESS_VRC_DFLT;
+
+	pi->power_gating = false;
+
+	if ((rdev->family == CHIP_CYPRESS) ||
+	    (rdev->family == CHIP_HEMLOCK))
+		pi->gfx_clock_gating = false;
+	else
+		pi->gfx_clock_gating = true;
+
+	pi->mg_clock_gating = true;
+	pi->mgcgtssm = true;
+	eg_pi->ls_clock_gating = false;
+	eg_pi->sclk_deep_sleep = false;
+
+	pi->dynamic_pcie_gen2 = true;
+
+	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	pi->display_gap = true;
+
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		pi->dcodt = true;
+	else
+		pi->dcodt = false;
+
+	pi->ulps = true;
+
+	eg_pi->dynamic_ac_timing = true;
+	eg_pi->abm = true;
+	eg_pi->mcls = true;
+	eg_pi->light_sleep = true;
+	eg_pi->memory_transition = true;
+#if defined(CONFIG_ACPI)
+	eg_pi->pcie_performance_request =
+		radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+	eg_pi->pcie_performance_request = false;
+#endif
+
+	if ((rdev->family == CHIP_CYPRESS) ||
+	    (rdev->family == CHIP_HEMLOCK) ||
+	    (rdev->family == CHIP_JUNIPER))
+		eg_pi->dll_default_on = true;
+	else
+		eg_pi->dll_default_on = false;
+
+	eg_pi->sclk_deep_sleep = false;
+	pi->mclk_stutter_mode_threshold = 0;
+
+	pi->sram_end = SMC_RAM_END;
+
+	return 0;
+}
+
+void cypress_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+}
+
+bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+	/* we never hit the non-gddr5 limit so disable it */
+	u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.h b/drivers/gpu/drm/radeon/cypress_dpm.h
new file mode 100644
index 0000000..4c3f18c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cypress_dpm.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CYPRESS_DPM_H__
+#define __CYPRESS_DPM_H__
+
+#include "rv770_dpm.h"
+#include "evergreen_smc.h"
+
+struct evergreen_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_ulv_param {
+	bool supported;
+	struct rv7xx_pl *pl;
+};
+
+struct evergreen_arb_registers {
+	u32 mc_arb_dram_timing;
+	u32 mc_arb_dram_timing2;
+	u32 mc_arb_rfsh_rate;
+	u32 mc_arb_burst_time;
+};
+
+struct at {
+	u32 rlp;
+	u32 rmp;
+	u32 lhp;
+	u32 lmp;
+};
+
+struct evergreen_power_info {
+	/* must be first! */
+	struct rv7xx_power_info rv7xx;
+	/* flags */
+	bool vddci_control;
+	bool dynamic_ac_timing;
+	bool abm;
+	bool mcls;
+	bool light_sleep;
+	bool memory_transition;
+	bool pcie_performance_request;
+	bool pcie_performance_request_registered;
+	bool sclk_deep_sleep;
+	bool dll_default_on;
+	bool ls_clock_gating;
+	bool smu_uvd_hs;
+	bool uvd_enabled;
+	/* stored values */
+	u16 acpi_vddci;
+	u8 mvdd_high_index;
+	u8 mvdd_low_index;
+	u32 mclk_edc_wr_enable_threshold;
+	struct evergreen_mc_reg_table mc_reg_table;
+	struct atom_voltage_table vddc_voltage_table;
+	struct atom_voltage_table vddci_voltage_table;
+	struct evergreen_arb_registers bootup_arb_registers;
+	struct evergreen_ulv_param ulv;
+	struct at ats[2];
+	/* smc offsets */
+	u16 mc_reg_table_start;
+	struct radeon_ps current_rps;
+	struct rv7xx_ps current_ps;
+	struct radeon_ps requested_rps;
+	struct rv7xx_ps requested_ps;
+};
+
+#define CYPRESS_HASI_DFLT                               400000
+#define CYPRESS_MGCGTTLOCAL0_DFLT                       0x00000000
+#define CYPRESS_MGCGTTLOCAL1_DFLT                       0x00000000
+#define CYPRESS_MGCGTTLOCAL2_DFLT                       0x00000000
+#define CYPRESS_MGCGTTLOCAL3_DFLT                       0x00000000
+#define CYPRESS_MGCGCGTSSMCTRL_DFLT                     0x81944bc0
+#define REDWOOD_MGCGCGTSSMCTRL_DFLT                     0x6e944040
+#define CEDAR_MGCGCGTSSMCTRL_DFLT                       0x46944040
+#define CYPRESS_VRC_DFLT                                0xC00033
+
+#define PCIE_PERF_REQ_REMOVE_REGISTRY   0
+#define PCIE_PERF_REQ_FORCE_LOWPOWER    1
+#define PCIE_PERF_REQ_PECI_GEN1         2
+#define PCIE_PERF_REQ_PECI_GEN2         3
+#define PCIE_PERF_REQ_PECI_GEN3         4
+
+int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
+				       struct rv7xx_pl *pl,
+				       RV770_SMC_HW_PERFORMANCE_LEVEL *level,
+				       u8 watermark_level);
+int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
+				    RV770_SMC_STATETABLE *table);
+int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
+					RV770_SMC_STATETABLE *table);
+int cypress_populate_smc_initial_state(struct radeon_device *rdev,
+				       struct radeon_ps *radeon_initial_state,
+				       RV770_SMC_STATETABLE *table);
+u32 cypress_calculate_burst_time(struct radeon_device *rdev,
+				 u32 engine_clock, u32 memory_clock);
+void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
+							  struct radeon_ps *radeon_new_state,
+							  struct radeon_ps *radeon_current_state);
+int cypress_upload_sw_state(struct radeon_device *rdev,
+			    struct radeon_ps *radeon_new_state);
+int cypress_upload_mc_reg_table(struct radeon_device *rdev,
+				struct radeon_ps *radeon_new_state);
+void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
+					      struct radeon_ps *radeon_new_state);
+void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
+							 struct radeon_ps *radeon_new_state,
+							 struct radeon_ps *radeon_current_state);
+int cypress_construct_voltage_tables(struct radeon_device *rdev);
+int cypress_get_mvdd_configuration(struct radeon_device *rdev);
+void cypress_enable_spread_spectrum(struct radeon_device *rdev,
+				    bool enable);
+void cypress_enable_display_gap(struct radeon_device *rdev);
+int cypress_get_table_locations(struct radeon_device *rdev);
+int cypress_populate_mc_reg_table(struct radeon_device *rdev,
+				  struct radeon_ps *radeon_boot_state);
+void cypress_program_response_times(struct radeon_device *rdev);
+int cypress_notify_smc_display_change(struct radeon_device *rdev,
+				      bool has_display);
+void cypress_enable_sclk_control(struct radeon_device *rdev,
+				 bool enable);
+void cypress_enable_mclk_control(struct radeon_device *rdev,
+				 bool enable);
+void cypress_start_dpm(struct radeon_device *rdev);
+void cypress_advertise_gen2_capability(struct radeon_device *rdev);
+u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf);
+u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
+				    u32 memory_clock, bool strobe_mode);
+u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
new file mode 100644
index 0000000..cfa3a84
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * Copyright 2014 Rafał Miłecki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "r600d.h"
+
+void dce3_2_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+	u8 *sadb, int sad_count)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	u32 tmp;
+
+	/* program the speaker allocation */
+	tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+	tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+	/* set HDMI mode */
+	tmp |= HDMI_CONNECTION;
+	if (sad_count)
+		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+	else
+		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+	WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void dce3_2_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+	u8 *sadb, int sad_count)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	u32 tmp;
+
+	/* program the speaker allocation */
+	tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+	tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
+	/* set DP mode */
+	tmp |= DP_CONNECTION;
+	if (sad_count)
+		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+	else
+		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+	WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder,
+	struct cea_sad *sads, int sad_count)
+{
+	int i;
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	static const u16 eld_reg_to_type[][2] = {
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+	};
+
+	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+		u32 value = 0;
+		u8 stereo_freqs = 0;
+		int max_channels = -1;
+		int j;
+
+		for (j = 0; j < sad_count; j++) {
+			struct cea_sad *sad = &sads[j];
+
+			if (sad->format == eld_reg_to_type[i][1]) {
+				if (sad->channels > max_channels) {
+					value = MAX_CHANNELS(sad->channels) |
+						DESCRIPTOR_BYTE_2(sad->byte2) |
+						SUPPORTED_FREQUENCIES(sad->freq);
+					max_channels = sad->channels;
+				}
+
+				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+					stereo_freqs |= sad->freq;
+				else
+					break;
+			}
+		}
+
+		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
+		WREG32_ENDPOINT(0, eld_reg_to_type[i][0], value);
+	}
+}
+
+void dce3_2_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock)
+{
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_atom_dig *dig;
+	unsigned int max_ratio = clock / 24000;
+	u32 dto_phase;
+	u32 wallclock_ratio;
+	u32 dto_cntl;
+
+	if (!crtc)
+		return;
+
+	radeon_encoder = to_radeon_encoder(crtc->encoder);
+	dig = radeon_encoder->enc_priv;
+
+	if (!dig)
+		return;
+
+	if (max_ratio >= 8) {
+		dto_phase = 192 * 1000;
+		wallclock_ratio = 3;
+	} else if (max_ratio >= 4) {
+		dto_phase = 96 * 1000;
+		wallclock_ratio = 2;
+	} else if (max_ratio >= 2) {
+		dto_phase = 48 * 1000;
+		wallclock_ratio = 1;
+	} else {
+		dto_phase = 24 * 1000;
+		wallclock_ratio = 0;
+	}
+
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	if (dig->dig_encoder == 0) {
+		dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+		dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+		WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+		WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+		WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+		WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+	} else {
+		dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+		dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+		WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
+		WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
+		WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+		WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+	}
+}
+
+void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+	const struct radeon_hdmi_acr *acr)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
+		HDMI0_ACR_SOURCE |		/* select SW CTS value */
+		HDMI0_ACR_AUTO_SEND);	/* allow hw to sent ACR packets when required */
+
+	WREG32_P(HDMI0_ACR_32_0 + offset,
+		HDMI0_ACR_CTS_32(acr->cts_32khz),
+		~HDMI0_ACR_CTS_32_MASK);
+	WREG32_P(HDMI0_ACR_32_1 + offset,
+		HDMI0_ACR_N_32(acr->n_32khz),
+		~HDMI0_ACR_N_32_MASK);
+
+	WREG32_P(HDMI0_ACR_44_0 + offset,
+		HDMI0_ACR_CTS_44(acr->cts_44_1khz),
+		~HDMI0_ACR_CTS_44_MASK);
+	WREG32_P(HDMI0_ACR_44_1 + offset,
+		HDMI0_ACR_N_44(acr->n_44_1khz),
+		~HDMI0_ACR_N_44_MASK);
+
+	WREG32_P(HDMI0_ACR_48_0 + offset,
+		HDMI0_ACR_CTS_48(acr->cts_48khz),
+		~HDMI0_ACR_CTS_48_MASK);
+	WREG32_P(HDMI0_ACR_48_1 + offset,
+		HDMI0_ACR_N_48(acr->n_48khz),
+		~HDMI0_ACR_N_48_MASK);
+}
+
+void dce3_2_set_audio_packet(struct drm_encoder *encoder, u32 offset)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		HDMI0_AUDIO_DELAY_EN(1) |			/* default audio delay */
+		HDMI0_AUDIO_PACKETS_PER_LINE(3));	/* should be suffient for all audio modes and small enough for all hblanks */
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+		AFMT_AUDIO_SAMPLE_SEND |			/* send audio packets */
+		AFMT_60958_CS_UPDATE);				/* allow 60958 channel status fields to be updated */
+
+	WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+		HDMI0_AUDIO_INFO_SEND |				/* enable audio info frames (frames won't be set until audio is enabled) */
+		HDMI0_AUDIO_INFO_CONT);				/* send audio info frames every frame/field */
+
+	WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
+		HDMI0_AUDIO_INFO_LINE(2));			/* anything other than 0 */
+}
+
+void dce3_2_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (mute)
+		WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE);
+	else
+		WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE);
+}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
new file mode 100644
index 0000000..367a916
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_audio.h"
+#include "sid.h"
+
+#define DCE8_DCCG_AUDIO_DTO1_PHASE	0x05b8
+#define DCE8_DCCG_AUDIO_DTO1_MODULE	0x05bc
+
+u32 dce6_endpoint_rreg(struct radeon_device *rdev,
+			      u32 block_offset, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->end_idx_lock, flags);
+	WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+	r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+	spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
+
+	return r;
+}
+
+void dce6_endpoint_wreg(struct radeon_device *rdev,
+			       u32 block_offset, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->end_idx_lock, flags);
+	if (ASIC_IS_DCE8(rdev))
+		WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+	else
+		WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
+		       AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
+	WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+	spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
+}
+
+static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
+{
+	int i;
+	u32 offset, tmp;
+
+	for (i = 0; i < rdev->audio.num_pins; i++) {
+		offset = rdev->audio.pin[i].offset;
+		tmp = RREG32_ENDPOINT(offset,
+				      AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+		if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+			rdev->audio.pin[i].connected = false;
+		else
+			rdev->audio.pin[i].connected = true;
+	}
+}
+
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
+{
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_atom_dig *dig;
+	struct r600_audio_pin *pin = NULL;
+	int i, pin_count;
+
+	dce6_afmt_get_connected_pins(rdev);
+
+	for (i = 0; i < rdev->audio.num_pins; i++) {
+		if (rdev->audio.pin[i].connected) {
+			pin = &rdev->audio.pin[i];
+			pin_count = 0;
+
+			list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+				if (radeon_encoder_is_digital(encoder)) {
+					radeon_encoder = to_radeon_encoder(encoder);
+					dig = radeon_encoder->enc_priv;
+					if (dig->pin == pin)
+						pin_count++;
+				}
+			}
+
+			if (pin_count == 0)
+				return pin;
+		}
+	}
+	if (!pin)
+		DRM_ERROR("No connected audio pins found!\n");
+	return pin;
+}
+
+void dce6_afmt_select_pin(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt || !dig->pin)
+		return;
+
+	WREG32(AFMT_AUDIO_SRC_CONTROL +  dig->afmt->offset,
+	       AFMT_AUDIO_SRC_SELECT(dig->pin->id));
+}
+
+void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+				    struct drm_connector *connector,
+				    struct drm_display_mode *mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u32 tmp = 0;
+
+	if (!dig || !dig->afmt || !dig->pin)
+		return;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (connector->latency_present[1])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+				AUDIO_LIPSYNC(connector->audio_latency[1]);
+		else
+			tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
+	} else {
+		if (connector->latency_present[0])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+				AUDIO_LIPSYNC(connector->audio_latency[0]);
+		else
+			tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
+	}
+	WREG32_ENDPOINT(dig->pin->offset,
+			AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
+void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+					     u8 *sadb, int sad_count)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u32 tmp;
+
+	if (!dig || !dig->afmt || !dig->pin)
+		return;
+
+	/* program the speaker allocation */
+	tmp = RREG32_ENDPOINT(dig->pin->offset,
+			      AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+	tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+	/* set HDMI mode */
+	tmp |= HDMI_CONNECTION;
+	if (sad_count)
+		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+	else
+		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+	WREG32_ENDPOINT(dig->pin->offset,
+			AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+					   u8 *sadb, int sad_count)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u32 tmp;
+
+	if (!dig || !dig->afmt || !dig->pin)
+		return;
+
+	/* program the speaker allocation */
+	tmp = RREG32_ENDPOINT(dig->pin->offset,
+			      AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+	tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
+	/* set DP mode */
+	tmp |= DP_CONNECTION;
+	if (sad_count)
+		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+	else
+		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+	WREG32_ENDPOINT(dig->pin->offset,
+			AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
+			      struct cea_sad *sads, int sad_count)
+{
+	int i;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	static const u16 eld_reg_to_type[][2] = {
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+	};
+
+	if (!dig || !dig->afmt || !dig->pin)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+		u32 value = 0;
+		u8 stereo_freqs = 0;
+		int max_channels = -1;
+		int j;
+
+		for (j = 0; j < sad_count; j++) {
+			struct cea_sad *sad = &sads[j];
+
+			if (sad->format == eld_reg_to_type[i][1]) {
+				if (sad->channels > max_channels) {
+					value = MAX_CHANNELS(sad->channels) |
+						DESCRIPTOR_BYTE_2(sad->byte2) |
+						SUPPORTED_FREQUENCIES(sad->freq);
+					max_channels = sad->channels;
+				}
+
+				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+					stereo_freqs |= sad->freq;
+				else
+					break;
+			}
+		}
+
+		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
+		WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
+	}
+}
+
+void dce6_audio_enable(struct radeon_device *rdev,
+		       struct r600_audio_pin *pin,
+		       u8 enable_mask)
+{
+	if (!pin)
+		return;
+
+	WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			enable_mask ? AUDIO_ENABLED : 0);
+}
+
+void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
+			     struct radeon_crtc *crtc, unsigned int clock)
+{
+	/* Two dtos; generally use dto0 for HDMI */
+	u32 value = 0;
+
+	if (crtc)
+		value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
+
+	WREG32(DCCG_AUDIO_DTO_SOURCE, value);
+
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
+	WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+}
+
+void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+			   struct radeon_crtc *crtc, unsigned int clock)
+{
+	/* Two dtos; generally use dto1 for DP */
+	u32 value = 0;
+	value |= DCCG_AUDIO_DTO_SEL;
+
+	if (crtc)
+		value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
+
+	WREG32(DCCG_AUDIO_DTO_SOURCE, value);
+
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	if (ASIC_IS_DCE8(rdev)) {
+		unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
+			DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+			DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+		div = radeon_audio_decode_dfs_div(div);
+
+		if (div)
+			clock = clock * 100 / div;
+
+		WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+		WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+	} else {
+		WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+		WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 0000000..5712d63
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,5532 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include <drm/radeon_drm.h>
+#include "evergreend.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+#include "evergreen_blit_shaders.h"
+#include "radeon_ucode.h"
+
+#define DC_HPDx_CONTROL(x)        (DC_HPD1_CONTROL     + (x * 0xc))
+#define DC_HPDx_INT_CONTROL(x)    (DC_HPD1_INT_CONTROL + (x * 0xc))
+#define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS  + (x * 0xc))
+
+/*
+ * Indirect registers accessor
+ */
+u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
+	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
+	r = RREG32(EVERGREEN_CG_IND_DATA);
+	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
+	return r;
+}
+
+void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
+	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
+	WREG32(EVERGREEN_CG_IND_DATA, (v));
+	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
+}
+
+u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
+	return r;
+}
+
+void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
+}
+
+u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
+	return r;
+}
+
+void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
+}
+
+static const u32 crtc_offsets[6] =
+{
+	EVERGREEN_CRTC0_REGISTER_OFFSET,
+	EVERGREEN_CRTC1_REGISTER_OFFSET,
+	EVERGREEN_CRTC2_REGISTER_OFFSET,
+	EVERGREEN_CRTC3_REGISTER_OFFSET,
+	EVERGREEN_CRTC4_REGISTER_OFFSET,
+	EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
+#include "clearstate_evergreen.h"
+
+static const u32 sumo_rlc_save_restore_register_list[] =
+{
+	0x98fc,
+	0x9830,
+	0x9834,
+	0x9838,
+	0x9870,
+	0x9874,
+	0x8a14,
+	0x8b24,
+	0x8bcc,
+	0x8b10,
+	0x8d00,
+	0x8d04,
+	0x8c00,
+	0x8c04,
+	0x8c08,
+	0x8c0c,
+	0x8d8c,
+	0x8c20,
+	0x8c24,
+	0x8c28,
+	0x8c18,
+	0x8c1c,
+	0x8cf0,
+	0x8e2c,
+	0x8e38,
+	0x8c30,
+	0x9508,
+	0x9688,
+	0x9608,
+	0x960c,
+	0x9610,
+	0x9614,
+	0x88c4,
+	0x88d4,
+	0xa008,
+	0x900c,
+	0x9100,
+	0x913c,
+	0x98f8,
+	0x98f4,
+	0x9b7c,
+	0x3f8c,
+	0x8950,
+	0x8954,
+	0x8a18,
+	0x8b28,
+	0x9144,
+	0x9148,
+	0x914c,
+	0x3f90,
+	0x3f94,
+	0x915c,
+	0x9160,
+	0x9178,
+	0x917c,
+	0x9180,
+	0x918c,
+	0x9190,
+	0x9194,
+	0x9198,
+	0x919c,
+	0x91a8,
+	0x91ac,
+	0x91b0,
+	0x91b4,
+	0x91b8,
+	0x91c4,
+	0x91c8,
+	0x91cc,
+	0x91d0,
+	0x91d4,
+	0x91e0,
+	0x91e4,
+	0x91ec,
+	0x91f0,
+	0x91f4,
+	0x9200,
+	0x9204,
+	0x929c,
+	0x9150,
+	0x802c,
+};
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+void evergreen_program_aspm(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+				     int ring, u32 cp_int_cntl);
+extern void cayman_vm_decode_fault(struct radeon_device *rdev,
+				   u32 status, u32 addr);
+void cik_init_cp_pg_table(struct radeon_device *rdev);
+
+extern u32 si_get_csb_size(struct radeon_device *rdev);
+extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern u32 cik_get_csb_size(struct radeon_device *rdev);
+extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
+
+static const u32 evergreen_golden_registers[] =
+{
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8a14, 0xffffffff, 0x00000007,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x10830, 0xffffffff, 0x00000011,
+	0x11430, 0xffffffff, 0x00000011,
+	0x12030, 0xffffffff, 0x00000011,
+	0x12c30, 0xffffffff, 0x00000011,
+	0xd02c, 0xffffffff, 0x08421000,
+	0x240c, 0xffffffff, 0x00000380,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x10c, 0x00000001, 0x00000001,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8c04, 0xffffffff, 0x40600060,
+	0x8c08, 0xffffffff, 0x001c001c,
+	0x8cf0, 0xffffffff, 0x08e00620,
+	0x8c20, 0xffffffff, 0x00800080,
+	0x8c24, 0xffffffff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x28350, 0xffffffff, 0x00000000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x9508, 0xffffffff, 0x00000002,
+	0x913c, 0x0000000f, 0x0000000a
+};
+
+static const u32 evergreen_golden_registers2[] =
+{
+	0x2f4c, 0xffffffff, 0x00000000,
+	0x54f4, 0xffffffff, 0x00000000,
+	0x54f0, 0xffffffff, 0x00000000,
+	0x5498, 0xffffffff, 0x00000000,
+	0x549c, 0xffffffff, 0x00000000,
+	0x5494, 0xffffffff, 0x00000000,
+	0x53cc, 0xffffffff, 0x00000000,
+	0x53c8, 0xffffffff, 0x00000000,
+	0x53c4, 0xffffffff, 0x00000000,
+	0x53c0, 0xffffffff, 0x00000000,
+	0x53bc, 0xffffffff, 0x00000000,
+	0x53b8, 0xffffffff, 0x00000000,
+	0x53b4, 0xffffffff, 0x00000000,
+	0x53b0, 0xffffffff, 0x00000000
+};
+
+static const u32 cypress_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9208, 0xffffffff, 0x00070000,
+	0x920c, 0xffffffff, 0x00030002,
+	0x9210, 0xffffffff, 0x00050004,
+	0x921c, 0xffffffff, 0x00010006,
+	0x9220, 0xffffffff, 0x00090008,
+	0x9224, 0xffffffff, 0x00070000,
+	0x9228, 0xffffffff, 0x00030002,
+	0x922c, 0xffffffff, 0x00050004,
+	0x9238, 0xffffffff, 0x00010006,
+	0x923c, 0xffffffff, 0x00090008,
+	0x9240, 0xffffffff, 0x00070000,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9248, 0xffffffff, 0x00050004,
+	0x9254, 0xffffffff, 0x00010006,
+	0x9258, 0xffffffff, 0x00090008,
+	0x925c, 0xffffffff, 0x00070000,
+	0x9260, 0xffffffff, 0x00030002,
+	0x9264, 0xffffffff, 0x00050004,
+	0x9270, 0xffffffff, 0x00010006,
+	0x9274, 0xffffffff, 0x00090008,
+	0x9278, 0xffffffff, 0x00070000,
+	0x927c, 0xffffffff, 0x00030002,
+	0x9280, 0xffffffff, 0x00050004,
+	0x928c, 0xffffffff, 0x00010006,
+	0x9290, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0x40010000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9208, 0xffffffff, 0x00070000,
+	0x920c, 0xffffffff, 0x00030002,
+	0x9210, 0xffffffff, 0x00050004,
+	0x921c, 0xffffffff, 0x00010006,
+	0x9220, 0xffffffff, 0x00090008,
+	0x9224, 0xffffffff, 0x00070000,
+	0x9228, 0xffffffff, 0x00030002,
+	0x922c, 0xffffffff, 0x00050004,
+	0x9238, 0xffffffff, 0x00010006,
+	0x923c, 0xffffffff, 0x00090008,
+	0x9240, 0xffffffff, 0x00070000,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9248, 0xffffffff, 0x00050004,
+	0x9254, 0xffffffff, 0x00010006,
+	0x9258, 0xffffffff, 0x00090008,
+	0x925c, 0xffffffff, 0x00070000,
+	0x9260, 0xffffffff, 0x00030002,
+	0x9264, 0xffffffff, 0x00050004,
+	0x9270, 0xffffffff, 0x00010006,
+	0x9274, 0xffffffff, 0x00090008,
+	0x9278, 0xffffffff, 0x00070000,
+	0x927c, 0xffffffff, 0x00030002,
+	0x9280, 0xffffffff, 0x00050004,
+	0x928c, 0xffffffff, 0x00010006,
+	0x9290, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 redwood_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 cedar_golden_registers[] =
+{
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8a14, 0xffffffff, 0x00000007,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000000,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x10830, 0xffffffff, 0x00000011,
+	0x11430, 0xffffffff, 0x00000011,
+	0xd02c, 0xffffffff, 0x08421000,
+	0x240c, 0xffffffff, 0x00000380,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x10c, 0x00000001, 0x00000001,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8c04, 0xffffffff, 0x40600060,
+	0x8c08, 0xffffffff, 0x001c001c,
+	0x8cf0, 0xffffffff, 0x08e00410,
+	0x8c20, 0xffffffff, 0x00800080,
+	0x8c24, 0xffffffff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x28350, 0xffffffff, 0x00000000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 cedar_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9178, 0xffffffff, 0x00050000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00010004,
+	0x9190, 0xffffffff, 0x00070006,
+	0x9194, 0xffffffff, 0x00050000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x91a8, 0xffffffff, 0x00010004,
+	0x91ac, 0xffffffff, 0x00070006,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 juniper_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9208, 0xffffffff, 0x00070000,
+	0x920c, 0xffffffff, 0x00030002,
+	0x9210, 0xffffffff, 0x00050004,
+	0x921c, 0xffffffff, 0x00010006,
+	0x9220, 0xffffffff, 0x00090008,
+	0x9224, 0xffffffff, 0x00070000,
+	0x9228, 0xffffffff, 0x00030002,
+	0x922c, 0xffffffff, 0x00050004,
+	0x9238, 0xffffffff, 0x00010006,
+	0x923c, 0xffffffff, 0x00090008,
+	0x9240, 0xffffffff, 0x00070000,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9248, 0xffffffff, 0x00050004,
+	0x9254, 0xffffffff, 0x00010006,
+	0x9258, 0xffffffff, 0x00090008,
+	0x925c, 0xffffffff, 0x00070000,
+	0x9260, 0xffffffff, 0x00030002,
+	0x9264, 0xffffffff, 0x00050004,
+	0x9270, 0xffffffff, 0x00010006,
+	0x9274, 0xffffffff, 0x00090008,
+	0x9278, 0xffffffff, 0x00070000,
+	0x927c, 0xffffffff, 0x00030002,
+	0x9280, 0xffffffff, 0x00050004,
+	0x928c, 0xffffffff, 0x00010006,
+	0x9290, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 supersumo_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x8c04, 0xffffffff, 0x40600060,
+	0x8c08, 0xffffffff, 0x001c001c,
+	0x8c20, 0xffffffff, 0x00800080,
+	0x8c24, 0xffffffff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x918c, 0xffffffff, 0x00010006,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9150, 0xffffffff, 0x6e944040,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9178, 0xffffffff, 0x00070000,
+	0x9194, 0xffffffff, 0x00070000,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x9190, 0xffffffff, 0x00090008,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8030, 0xffffffff, 0x0000100a,
+	0x8a14, 0xffffffff, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x913c, 0xffff000f, 0x0100000a,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0xd02c, 0xffffffff, 0x08421000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8cf0, 0x1fffffff, 0x08e00620,
+	0x28350, 0xffffffff, 0x00000000,
+	0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 sumo_golden_registers[] =
+{
+	0x900c, 0x00ffffff, 0x0017071f,
+	0x8c18, 0xffffffff, 0x10101060,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x8c30, 0x0000000f, 0x00000005,
+	0x9688, 0x0000000f, 0x00000007
+};
+
+static const u32 wrestler_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x918c, 0xffffffff, 0x00010006,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x9150, 0xffffffff, 0x6e944040,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9198, 0xffffffff, 0x00030002,
+	0x915c, 0xffffffff, 0x00010000,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9178, 0xffffffff, 0x00070000,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x9190, 0xffffffff, 0x00090008,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8030, 0xffffffff, 0x0000100a,
+	0x8a14, 0xffffffff, 0x00000001,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x913c, 0xffff000f, 0x0100000a,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0xd02c, 0xffffffff, 0x08421000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8cf0, 0x1fffffff, 0x08e00410,
+	0x28350, 0xffffffff, 0x00000000,
+	0x9508, 0xffffffff, 0x00000002,
+	0x900c, 0xffffffff, 0x0017071f,
+	0x8c18, 0xffffffff, 0x10101060,
+	0x8c1c, 0xffffffff, 0x00001010
+};
+
+static const u32 barts_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x70073777, 0x00010001,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x03773777, 0x02011003,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x02011003,
+	0x98fc, 0xffffffff, 0x76543210,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x00000007, 0x02011003,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000380,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000000f, 0x0100000a,
+	0x8d00, 0xffff7f7f, 0x100e4848,
+	0x8d04, 0x00ffffff, 0x00164745,
+	0x8c00, 0xfffc0003, 0xe4000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c08, 0x00ff00ff, 0x001c001c,
+	0x8cf0, 0x1fff1fff, 0x08e00620,
+	0x8c20, 0x0fff0fff, 0x00800080,
+	0x8c24, 0x0fff0fff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0x0000ffff, 0x00001010,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x000000c2,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 turks_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x8c8, 0x00003000, 0x00001070,
+	0x8cc, 0x000fffff, 0x00040035,
+	0x3f90, 0xffff0000, 0xfff00000,
+	0x9148, 0xffff0000, 0xfff00000,
+	0x3f94, 0xffff0000, 0xfff00000,
+	0x914c, 0xffff0000, 0xfff00000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x00073007, 0x00010002,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x03773777, 0x02010002,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x00010002,
+	0x98fc, 0xffffffff, 0x33221100,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x33773777, 0x00010002,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000380,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000000f, 0x0100000a,
+	0x8d00, 0xffff7f7f, 0x100e4848,
+	0x8d04, 0x00ffffff, 0x00164745,
+	0x8c00, 0xfffc0003, 0xe4000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c08, 0x00ff00ff, 0x001c001c,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x8c20, 0x0fff0fff, 0x00800080,
+	0x8c24, 0x0fff0fff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0x0000ffff, 0x00001010,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x000000c2,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 caicos_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x8c8, 0x00003420, 0x00001450,
+	0x8cc, 0x000fffff, 0x00040035,
+	0x3f90, 0xffff0000, 0xfffc0000,
+	0x9148, 0xffff0000, 0xfffc0000,
+	0x3f94, 0xffff0000, 0xfffc0000,
+	0x914c, 0xffff0000, 0xfffc0000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x00073007, 0x00010001,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x03773777, 0x02010001,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x02010001,
+	0x98fc, 0xffffffff, 0x33221100,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x33773777, 0x02010001,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000380,
+	0x8a14, 0xf000001f, 0x00000001,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000000f, 0x0100000a,
+	0x8d00, 0xffff7f7f, 0x100e4848,
+	0x8d04, 0x00ffffff, 0x00164745,
+	0x8c00, 0xfffc0003, 0xe4000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c08, 0x00ff00ff, 0x001c001c,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x8c20, 0x0fff0fff, 0x00800080,
+	0x8c24, 0x0fff0fff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0x0000ffff, 0x00001010,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x000000c2,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static void evergreen_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 cypress_mgcg_init,
+						 (const u32)ARRAY_SIZE(cypress_mgcg_init));
+		break;
+	case CHIP_JUNIPER:
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 juniper_mgcg_init,
+						 (const u32)ARRAY_SIZE(juniper_mgcg_init));
+		break;
+	case CHIP_REDWOOD:
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 redwood_mgcg_init,
+						 (const u32)ARRAY_SIZE(redwood_mgcg_init));
+		break;
+	case CHIP_CEDAR:
+		radeon_program_register_sequence(rdev,
+						 cedar_golden_registers,
+						 (const u32)ARRAY_SIZE(cedar_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 cedar_mgcg_init,
+						 (const u32)ARRAY_SIZE(cedar_mgcg_init));
+		break;
+	case CHIP_PALM:
+		radeon_program_register_sequence(rdev,
+						 wrestler_golden_registers,
+						 (const u32)ARRAY_SIZE(wrestler_golden_registers));
+		break;
+	case CHIP_SUMO:
+		radeon_program_register_sequence(rdev,
+						 supersumo_golden_registers,
+						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
+		break;
+	case CHIP_SUMO2:
+		radeon_program_register_sequence(rdev,
+						 supersumo_golden_registers,
+						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 sumo_golden_registers,
+						 (const u32)ARRAY_SIZE(sumo_golden_registers));
+		break;
+	case CHIP_BARTS:
+		radeon_program_register_sequence(rdev,
+						 barts_golden_registers,
+						 (const u32)ARRAY_SIZE(barts_golden_registers));
+		break;
+	case CHIP_TURKS:
+		radeon_program_register_sequence(rdev,
+						 turks_golden_registers,
+						 (const u32)ARRAY_SIZE(turks_golden_registers));
+		break;
+	case CHIP_CAICOS:
+		radeon_program_register_sequence(rdev,
+						 caicos_golden_registers,
+						 (const u32)ARRAY_SIZE(caicos_golden_registers));
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * evergreen_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+					u32 reg, u32 *val)
+{
+	switch (reg) {
+	case GRBM_STATUS:
+	case GRBM_STATUS_SE0:
+	case GRBM_STATUS_SE1:
+	case SRBM_STATUS:
+	case SRBM_STATUS2:
+	case DMA_STATUS_REG:
+	case UVD_STATUS:
+		*val = RREG32(reg);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+			     unsigned *bankh, unsigned *mtaspect,
+			     unsigned *tile_split)
+{
+	*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+	*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+	*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+	*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+	switch (*bankw) {
+	default:
+	case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
+	case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
+	case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
+	case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
+	}
+	switch (*bankh) {
+	default:
+	case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
+	case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
+	case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
+	case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
+	}
+	switch (*mtaspect) {
+	default:
+	case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
+	case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
+	case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
+	case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
+	}
+}
+
+static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
+			      u32 cntl_reg, u32 status_reg)
+{
+	int r, i;
+	struct atom_clock_dividers dividers;
+
+	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					   clock, false, &dividers);
+	if (r)
+		return r;
+
+	WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32(status_reg) & DCLK_STATUS)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	int r = 0;
+	u32 cg_scratch = RREG32(CG_SCRATCH1);
+
+	r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
+	if (r)
+		goto done;
+	cg_scratch &= 0xffff0000;
+	cg_scratch |= vclk / 100; /* Mhz */
+
+	r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
+	if (r)
+		goto done;
+	cg_scratch &= 0x0000ffff;
+	cg_scratch |= (dclk / 100) << 16; /* Mhz */
+
+done:
+	WREG32(CG_SCRATCH1, cg_scratch);
+
+	return r;
+}
+
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	/* start off with something large */
+	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+	int r;
+
+	/* bypass vclk and dclk with bclk */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	/* put PLL in bypass mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+	if (!vclk || !dclk) {
+		/* keep the Bypass mode, put PLL to sleep */
+		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+		return 0;
+	}
+
+	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+					  16384, 0x03FFFFFF, 0, 128, 5,
+					  &fb_div, &vclk_div, &dclk_div);
+	if (r)
+		return r;
+
+	/* set VCO_MODE to 1 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+	/* toggle UPLL_SLEEP to 1 then back to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+	/* deassert UPLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(1);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* assert UPLL_RESET again */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+	/* disable spread spectrum. */
+	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+	/* set feedback divider */
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+	/* set ref divider to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+	if (fb_div < 307200)
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+	else
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+	/* set PDIV_A and PDIV_B */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* switch from bypass mode to normal mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
+
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+{
+	int readrq;
+	u16 v;
+
+	readrq = pcie_get_readrq(rdev->pdev);
+	v = ffs(readrq) - 8;
+	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
+	 * to avoid hangs or perfomance issues
+	 */
+	if ((v == 0) || (v == 6) || (v == 7))
+		pcie_set_readrq(rdev->pdev, 512);
+}
+
+void dce4_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS/eDP FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN);
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce4_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (dce4_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce4_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!dce4_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce4_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+/**
+ * evergreen_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Triggers the actual pageflip by updating the primary
+ * surface base address (evergreen+).
+ */
+void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base,
+			 bool async)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
+	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+	       async ? EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	/* post the write */
+	RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset);
+}
+
+/**
+ * evergreen_page_flip_pending - check if page flip is still pending
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to check
+ *
+ * Returns the current update pending status.
+ */
+bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
+	/* Return current update_pending status: */
+	return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
+		EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
+}
+
+/* get temperature in millidegrees */
+int evergreen_get_temp(struct radeon_device *rdev)
+{
+	u32 temp, toffset;
+	int actual_temp = 0;
+
+	if (rdev->family == CHIP_JUNIPER) {
+		toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
+			TOFFSET_SHIFT;
+		temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
+			TS0_ADC_DOUT_SHIFT;
+
+		if (toffset & 0x100)
+			actual_temp = temp / 2 - (0x200 - toffset);
+		else
+			actual_temp = temp / 2 + toffset;
+
+		actual_temp = actual_temp * 1000;
+
+	} else {
+		temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+			ASIC_T_SHIFT;
+
+		if (temp & 0x400)
+			actual_temp = -256;
+		else if (temp & 0x200)
+			actual_temp = 255;
+		else if (temp & 0x100) {
+			actual_temp = temp & 0x1ff;
+			actual_temp |= ~0x1ff;
+		} else
+			actual_temp = temp & 0xff;
+
+		actual_temp = (actual_temp * 1000) / 2;
+	}
+
+	return actual_temp;
+}
+
+int sumo_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+	int actual_temp = temp - 49;
+
+	return actual_temp * 1000;
+}
+
+/**
+ * sumo_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (sumo, trinity, SI).
+ * Used for profile mode only.
+ */
+void sumo_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+
+	/* low,mid sh/mh */
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+	else
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+
+	/* high sh/mh */
+	idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
+		rdev->pm.power_state[idx].num_clock_modes - 1;
+
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
+		rdev->pm.power_state[idx].num_clock_modes - 1;
+}
+
+/**
+ * btc_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (BTC, cayman).
+ * Used for profile mode only.
+ */
+void btc_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+	/* starting with BTC, there is one state that is used for both
+	 * MH and SH.  Difference is that we always use the high clock index for
+	 * mclk.
+	 */
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+	else
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+}
+
+/**
+ * evergreen_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, etc.) (evergreen+).
+ */
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if (voltage->type == VOLTAGE_SW) {
+		/* 0xff0x are flags rather then an actual voltage */
+		if ((voltage->voltage & 0xff00) == 0xff00)
+			return;
+		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+		}
+
+		/* starting with BTC, there is one state that is used for both
+		 * MH and SH.  Difference is that we always use the high clock index for
+		 * mclk and vddci.
+		 */
+		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+		    (rdev->family >= CHIP_BARTS) &&
+		    rdev->pm.active_crtc_count &&
+		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+			voltage = &rdev->pm.power_state[req_ps_idx].
+				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
+		/* 0xff0x are flags rather then an actual voltage */
+		if ((voltage->vddci & 0xff00) == 0xff00)
+			return;
+		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+			rdev->pm.current_vddci = voltage->vddci;
+			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
+		}
+	}
+}
+
+/**
+ * evergreen_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (evergreen+).
+ */
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/**
+ * evergreen_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (evergreen+).
+ */
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/**
+ * evergreen_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	if (hpd == RADEON_HPD_NONE)
+		return false;
+
+	return !!(RREG32(DC_HPDx_INT_STATUS_REG(hpd)) & DC_HPDx_SENSE);
+}
+
+/**
+ * evergreen_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+				enum radeon_hpd_id hpd)
+{
+	bool connected = evergreen_hpd_sense(rdev, hpd);
+
+	if (hpd == RADEON_HPD_NONE)
+		return;
+
+	if (connected)
+		WREG32_AND(DC_HPDx_INT_CONTROL(hpd), ~DC_HPDx_INT_POLARITY);
+	else
+		WREG32_OR(DC_HPDx_INT_CONTROL(hpd), DC_HPDx_INT_POLARITY);
+}
+
+/**
+ * evergreen_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enabled = 0;
+	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		enum radeon_hpd_id hpd =
+			to_radeon_connector(connector)->hpd.hpd;
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			continue;
+		}
+
+		if (hpd == RADEON_HPD_NONE)
+			continue;
+
+		WREG32(DC_HPDx_CONTROL(hpd), tmp);
+		enabled |= 1 << hpd;
+
+		radeon_hpd_set_polarity(rdev, hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enabled);
+}
+
+/**
+ * evergreen_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disabled = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		enum radeon_hpd_id hpd =
+			to_radeon_connector(connector)->hpd.hpd;
+
+		if (hpd == RADEON_HPD_NONE)
+			continue;
+
+		WREG32(DC_HPDx_CONTROL(hpd), 0);
+		disabled |= 1 << hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disabled);
+}
+
+/* watermark setup */
+
+static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+					struct radeon_crtc *radeon_crtc,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *other_mode)
+{
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 2:0:
+	 * first display controller
+	 *  0 - first half of lb (3840 * 2)
+	 *  1 - first 3/4 of lb (5760 * 2)
+	 *  2 - whole lb (7680 * 2), other crtc must be disabled
+	 *  3 - first 1/4 of lb (1920 * 2)
+	 * second display controller
+	 *  4 - second half of lb (3840 * 2)
+	 *  5 - second 3/4 of lb (5760 * 2)
+	 *  6 - whole lb (7680 * 2), other crtc must be disabled
+	 *  7 - last 1/4 of lb (1920 * 2)
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (radeon_crtc->base.enabled && mode) {
+		if (other_mode) {
+			tmp = 0; /* 1/2 */
+			buffer_alloc = 1;
+		} else {
+			tmp = 2; /* whole */
+			buffer_alloc = 2;
+		}
+	} else {
+		tmp = 0;
+		buffer_alloc = 0;
+	}
+
+	/* second controller of the pair uses second half of the lb */
+	if (radeon_crtc->crtc_id % 2)
+		tmp += 4;
+	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
+	if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+		WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+		       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+			    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+				break;
+			udelay(1);
+		}
+	}
+
+	if (radeon_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		case 4:
+		default:
+			if (ASIC_IS_DCE5(rdev))
+				return 4096 * 2;
+			else
+				return 3840 * 2;
+		case 1:
+		case 5:
+			if (ASIC_IS_DCE5(rdev))
+				return 6144 * 2;
+			else
+				return 5760 * 2;
+		case 2:
+		case 6:
+			if (ASIC_IS_DCE5(rdev))
+				return 8192 * 2;
+			else
+				return 7680 * 2;
+		case 3:
+		case 7:
+			if (ASIC_IS_DCE5(rdev))
+				return 2048 * 2;
+			else
+				return 1920 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	}
+}
+
+struct evergreen_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, disp_clk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
+	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
+{
+	/* First calcualte the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = evergreen_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+
+	lb_fill_bw = min(dfixed_trunc(a), wm->disp_clk * wm->bytes_per_pixel / 1000);
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+	if (evergreen_average_bandwidth(wm) <=
+	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
+{
+	if (evergreen_average_bandwidth(wm) <=
+	    (evergreen_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (evergreen_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+static void evergreen_program_watermarks(struct radeon_device *rdev,
+					 struct radeon_crtc *radeon_crtc,
+					 u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &radeon_crtc->base.mode;
+	struct evergreen_wm_params wm_low, wm_high;
+	u32 dram_channels;
+	u32 active_time;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 pipe_offset = radeon_crtc->crtc_id * 16;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (radeon_crtc->base.enabled && num_heads && mode) {
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+		dram_channels = evergreen_get_number_of_dram_channels(rdev);
+
+		/* watermark for high clocks */
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			wm_high.yclk =
+				radeon_dpm_get_mclk(rdev, false) * 10;
+			wm_high.sclk =
+				radeon_dpm_get_sclk(rdev, false) * 10;
+		} else {
+			wm_high.yclk = rdev->pm.current_mclk * 10;
+			wm_high.sclk = rdev->pm.current_sclk * 10;
+		}
+
+		wm_high.disp_clk = mode->clock;
+		wm_high.src_width = mode->crtc_hdisplay;
+		wm_high.active_time = active_time;
+		wm_high.blank_time = line_time - wm_high.active_time;
+		wm_high.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_high.interlaced = true;
+		wm_high.vsc = radeon_crtc->vsc;
+		wm_high.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm_high.vtaps = 2;
+		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_high.lb_size = lb_size;
+		wm_high.dram_channels = dram_channels;
+		wm_high.num_heads = num_heads;
+
+		/* watermark for low clocks */
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			wm_low.yclk =
+				radeon_dpm_get_mclk(rdev, true) * 10;
+			wm_low.sclk =
+				radeon_dpm_get_sclk(rdev, true) * 10;
+		} else {
+			wm_low.yclk = rdev->pm.current_mclk * 10;
+			wm_low.sclk = rdev->pm.current_sclk * 10;
+		}
+
+		wm_low.disp_clk = mode->clock;
+		wm_low.src_width = mode->crtc_hdisplay;
+		wm_low.active_time = active_time;
+		wm_low.blank_time = line_time - wm_low.active_time;
+		wm_low.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_low.interlaced = true;
+		wm_low.vsc = radeon_crtc->vsc;
+		wm_low.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm_low.vtaps = 2;
+		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_low.lb_size = lb_size;
+		wm_low.dram_channels = dram_channels;
+		wm_low.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
+		/* set for low clocks */
+		latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+		    !evergreen_check_latency_hiding(&wm_high) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority a to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+		}
+		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+		    !evergreen_check_latency_hiding(&wm_low) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority b to high\n");
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+		/* Save number of lines the linebuffer leads before the scanout */
+		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+	/* save values for DPM */
+	radeon_crtc->line_time = line_time;
+	radeon_crtc->wm_high = latency_watermark_a;
+	radeon_crtc->wm_low = latency_watermark_b;
+}
+
+/**
+ * evergreen_bandwidth_update - update display watermarks callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the display watermarks based on the requested mode(s)
+ * (evergreen+).
+ */
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	radeon_update_display_priority(rdev);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+
+/**
+ * evergreen_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(SRBM_STATUS) & 0x1F00;
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+/*
+ * GART
+ */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+		if (tmp == 2) {
+			pr_warn("[drm] r600 flush TLB failed\n");
+			return;
+		}
+		if (tmp) {
+			return;
+		}
+		udelay(1);
+	}
+}
+
+static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	if (rdev->flags & RADEON_IS_IGP) {
+		WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
+		WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
+		WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
+	} else {
+		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+		if ((rdev->family == CHIP_JUNIPER) ||
+		    (rdev->family == CHIP_CYPRESS) ||
+		    (rdev->family == CHIP_HEMLOCK) ||
+		    (rdev->family == CHIP_BARTS))
+			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+	}
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL, 0);
+
+	evergreen_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+	evergreen_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+
+static void evergreen_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+}
+
+static const unsigned ni_dig_offsets[] =
+{
+	NI_DIG0_REGISTER_OFFSET,
+	NI_DIG1_REGISTER_OFFSET,
+	NI_DIG2_REGISTER_OFFSET,
+	NI_DIG3_REGISTER_OFFSET,
+	NI_DIG4_REGISTER_OFFSET,
+	NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+	EVERGREEN_DP0_REGISTER_OFFSET,
+	EVERGREEN_DP1_REGISTER_OFFSET,
+	EVERGREEN_DP2_REGISTER_OFFSET,
+	EVERGREEN_DP3_REGISTER_OFFSET,
+	EVERGREEN_DP4_REGISTER_OFFSET,
+	EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+static const unsigned evergreen_disp_int_status[] =
+{
+	DISP_INTERRUPT_STATUS,
+	DISP_INTERRUPT_STATUS_CONTINUE,
+	DISP_INTERRUPT_STATUS_CONTINUE2,
+	DISP_INTERRUPT_STATUS_CONTINUE3,
+	DISP_INTERRUPT_STATUS_CONTINUE4,
+	DISP_INTERRUPT_STATUS_CONTINUE5
+};
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible  since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+					       unsigned crtc_id, unsigned *ret_dig_fe)
+{
+	unsigned i;
+	unsigned dig_fe;
+	unsigned dig_be;
+	unsigned dig_en_be;
+	unsigned uniphy_pll;
+	unsigned digs_fe_selected;
+	unsigned dig_be_mode;
+	unsigned dig_fe_mask;
+	bool is_enabled = false;
+	bool found_crtc = false;
+
+	/* loop through all running dig_fe to find selected crtc */
+	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+			/* found running pipe */
+			found_crtc = true;
+			dig_fe_mask = 1 << i;
+			dig_fe = i;
+			break;
+		}
+	}
+
+	if (found_crtc) {
+		/* loop through all running dig_be to find selected dig_fe */
+		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+			/* if dig_fe_selected by dig_be? */
+			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+			if (dig_fe_mask &  digs_fe_selected &&
+			    /* if dig_be in sst mode? */
+			    dig_be_mode == NI_DIG_BE_DPSST) {
+				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+						   ni_dig_offsets[i]);
+				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+						    ni_tx_offsets[i]);
+				/* dig_be enable and tx is running */
+				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+					is_enabled = true;
+					*ret_dig_fe = dig_fe;
+					break;
+				}
+			}
+		}
+	}
+
+	return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+				      unsigned dig_fe)
+{
+	unsigned stream_ctrl;
+	unsigned fifo_ctrl;
+	unsigned counter = 0;
+
+	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+		return;
+	}
+
+	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+			     evergreen_dp_offsets[dig_fe]);
+	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+		DRM_ERROR("dig %d , should be enable\n", dig_fe);
+		return;
+	}
+
+	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+	       evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+			     evergreen_dp_offsets[dig_fe]);
+	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+		msleep(1);
+		counter++;
+		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+				     evergreen_dp_offsets[dig_fe]);
+	}
+	if (counter >= 32 )
+		DRM_ERROR("counter exceeds %d\n", counter);
+
+	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+	unsigned dig_fe;
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+		save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+		/* disable VGA render */
+		WREG32(VGA_RENDER_CONTROL, 0);
+	}
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+					radeon_wait_for_vblank(rdev, i);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+				}
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+					radeon_wait_for_vblank(rdev, i);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+				}
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+			/*we should disable dig if it drives dp sst*/
+			/*but we are in radeon_device_init and the topology is unknown*/
+			/*and it is available after radeon_modeset_init*/
+			/*the following method radeon_atom_encoder_dpms_dig*/
+			/*does the job if we initialize it properly*/
+			/*for now we do it this manually*/
+			/**/
+			if (ASIC_IS_DCE5(rdev) &&
+			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+				evergreen_blank_dp_output(rdev, dig_fe);
+			/*we could remove 6 lines below*/
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+
+	radeon_mc_wait_for_idle(rdev);
+
+	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+		/* Block CPU access */
+		WREG32(BIF_FB_EN, 0);
+		/* blackout the MC */
+		blackout &= ~BLACKOUT_MODE_MASK;
+		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+	/* lock double buffered regs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+				tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (!(tmp & 1)) {
+				tmp |= 1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+		}
+	}
+}
+
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+	u32 tmp, frame_count;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+	}
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+	}
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x7) != 0) {
+				tmp &= ~0x7;
+				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	/* unblackout the MC */
+	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	tmp &= ~BLACKOUT_MODE_MASK;
+	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+	/* allow CPU access */
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	if (!ASIC_IS_NODCE(rdev)) {
+		/* Unlock vga access */
+		WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+		mdelay(1);
+		WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+	}
+}
+
+void evergreen_mc_program(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	/* llano/ontario only */
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2)) {
+		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+	}
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	evergreen_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/*
+ * CP.
+ */
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 next_rptr;
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	if (ring->rptr_save_reg) {
+		next_rptr = ring->wptr + 3 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg - 
+					  PACKET3_SET_CONFIG_REG_START) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	} else if (rdev->wb.enabled) {
+		next_rptr = ring->wptr + 5 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+		radeon_ring_write(ring, next_rptr);
+		radeon_ring_write(ring, 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+
+static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r700_cp_stop(rdev);
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+	uint32_t cp_me;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	cp_me = 0xff;
+	WREG32(CP_ME_CNTL, cp_me);
+
+	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < evergreen_default_size; i++)
+		radeon_ring_write(ring, evergreen_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* Clear consts */
+	radeon_ring_write(ring, 0xc0036f00);
+	radeon_ring_write(ring, 0x00000bc4);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+
+	radeon_ring_write(ring, 0xc0026900);
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /*  */
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	return 0;
+}
+
+static int evergreen_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_SH |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	/* Set ring buffer size */
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB_CNTL, tmp);
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+	WREG32(CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB_RPTR_ADDR,
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB_CNTL, tmp);
+
+	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+	evergreen_cp_start(rdev);
+	ring->ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+	return 0;
+}
+
+/*
+ * Core functions
+ */
+static void evergreen_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 sq_config;
+	u32 sq_lds_resource_mgmt;
+	u32 sq_gpr_resource_mgmt_1;
+	u32 sq_gpr_resource_mgmt_2;
+	u32 sq_gpr_resource_mgmt_3;
+	u32 sq_thread_resource_mgmt;
+	u32 sq_thread_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_1;
+	u32 sq_stack_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_3;
+	u32 vgt_cache_invalidation;
+	u32 hdp_host_path_cntl, tmp;
+	u32 disabled_rb_mask;
+	int i, j, ps_thread_count;
+
+	switch (rdev->family) {
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_JUNIPER:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_REDWOOD:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 5;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_CEDAR:
+	default:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PALM:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_SUMO:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		if (rdev->pdev->device == 0x9648)
+			rdev->config.evergreen.max_simds = 3;
+		else if ((rdev->pdev->device == 0x9647) ||
+			 (rdev->pdev->device == 0x964a))
+			rdev->config.evergreen.max_simds = 4;
+		else
+			rdev->config.evergreen.max_simds = 5;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_SUMO2:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_BARTS:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 7;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_TURKS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 6;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_CAICOS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 0x1);
+	WREG32(SRBM_INT_ACK, 0x1);
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2))
+		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
+	else
+		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.evergreen.tile_config = 0;
+	switch (rdev->config.evergreen.max_tile_pipes) {
+	case 1:
+	default:
+		rdev->config.evergreen.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.evergreen.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.evergreen.tile_config |= (2 << 0);
+		break;
+	case 8:
+		rdev->config.evergreen.tile_config |= (3 << 0);
+		break;
+	}
+	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->config.evergreen.tile_config |= 1 << 4;
+	else {
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
+			rdev->config.evergreen.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.evergreen.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.evergreen.tile_config |= 2 << 4;
+			break;
+		}
+	}
+	rdev->config.evergreen.tile_config |= 0 << 8;
+	rdev->config.evergreen.tile_config |=
+		((gb_addr_config & 0x30000000) >> 28) << 12;
+
+	if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+		u32 efuse_straps_4;
+		u32 efuse_straps_3;
+
+		efuse_straps_4 = RREG32_RCU(0x204);
+		efuse_straps_3 = RREG32_RCU(0x203);
+		tmp = (((efuse_straps_4 & 0xf) << 4) |
+		      ((efuse_straps_3 & 0xf0000000) >> 28));
+	} else {
+		tmp = 0;
+		for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+			u32 rb_disable_bitmap;
+
+			WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+			tmp <<= 4;
+			tmp |= rb_disable_bitmap;
+		}
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+	tmp = 0;
+	for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+		tmp |= (1 << i);
+	/* if all the backends are disabled, fix it up here */
+	if ((disabled_rb_mask & tmp) == tmp) {
+		for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+			disabled_rb_mask &= ~(1 << i);
+	}
+
+	for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+		u32 simd_disable_bitmap;
+
+		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
+		simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
+		tmp <<= 16;
+		tmp |= simd_disable_bitmap;
+	}
+	rdev->config.evergreen.active_simds = hweight32(~tmp);
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+	if ((rdev->config.evergreen.max_backends == 1) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		if ((disabled_rb_mask & 3) == 1) {
+			/* RB0 disabled, RB1 enabled */
+			tmp = 0x11111111;
+		} else {
+			/* RB1 disabled, RB0 enabled */
+			tmp = 0x00000000;
+		}
+	} else {
+		tmp = gb_addr_config & NUM_PIPES_MASK;
+		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+						EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+	}
+	rdev->config.evergreen.backend_map = tmp;
+	WREG32(GB_BACKEND_MAP, tmp);
+
+	WREG32(CGTS_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+
+	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+			     SYNC_GRADIENT |
+			     SYNC_WALKER |
+			     SYNC_ALIGNER));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	if (rdev->family <= CHIP_SUMO2)
+		WREG32(SMX_SAR_CTL0, 0x00010000);
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+	WREG32(SPI_CONFIG_CNTL, 0);
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+				  FETCH_FIFO_HIWATER(0x4) |
+				  DONE_FIFO_HIWATER(0xe0) |
+				  ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (VC_ENABLE |
+		      EXPORT_SRC_C |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_CAICOS:
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+		break;
+	default:
+		break;
+	}
+
+	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		ps_thread_count = 96;
+		break;
+	default:
+		ps_thread_count = 128;
+		break;
+	}
+
+	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+
+	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+	WREG32(SQ_CONFIG, sq_config);
+	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_CAICOS:
+		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+		break;
+	default:
+		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+		break;
+	}
+	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
+	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
+
+	WREG32(CB_PERF_CTR0_SEL_0, 0);
+	WREG32(CB_PERF_CTR0_SEL_1, 0);
+	WREG32(CB_PERF_CTR1_SEL_0, 0);
+	WREG32(CB_PERF_CTR1_SEL_1, 0);
+	WREG32(CB_PERF_CTR2_SEL_0, 0);
+	WREG32(CB_PERF_CTR2_SEL_1, 0);
+	WREG32(CB_PERF_CTR3_SEL_0, 0);
+	WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+	/* clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+	WREG32(CB_COLOR8_BASE, 0);
+	WREG32(CB_COLOR9_BASE, 0);
+	WREG32(CB_COLOR10_BASE, 0);
+	WREG32(CB_COLOR11_BASE, 0);
+
+	/* set the shader const cache sizes to 0 */
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
+		WREG32(i, 0);
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
+		WREG32(i, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+
+}
+
+int evergreen_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2))
+		tmp = RREG32(FUS_MC_ARB_RAMCFG);
+	else
+		tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* Setup GPU memory space */
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2)) {
+		/* size in bytes on fusion */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	} else {
+		/* size in MB on evergreen/cayman/tn */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	}
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r700_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
+{
+	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
+		RREG32(SRBM_STATUS));
+	dev_info(rdev->dev, "  SRBM_STATUS2              = 0x%08X\n",
+		RREG32(SRBM_STATUS2));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+	if (rdev->family >= CHIP_CAYMAN) {
+		dev_info(rdev->dev, "  R_00D834_DMA_STATUS_REG   = 0x%08X\n",
+			 RREG32(DMA_STATUS_REG + 0x800));
+	}
+}
+
+bool evergreen_is_display_hung(struct radeon_device *rdev)
+{
+	u32 crtc_hung = 0;
+	u32 crtc_status[6];
+	u32 i, j, tmp;
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
+			crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+			crtc_hung |= (1 << i);
+		}
+	}
+
+	for (j = 0; j < 10; j++) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (crtc_hung & (1 << i)) {
+				tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+				if (tmp != crtc_status[i])
+					crtc_hung &= ~(1 << i);
+			}
+		}
+		if (crtc_hung == 0)
+			return false;
+		udelay(100);
+	}
+
+	return true;
+}
+
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(GRBM_STATUS);
+	if (tmp & (PA_BUSY | SC_BUSY |
+		   SH_BUSY | SX_BUSY |
+		   TA_BUSY | VGT_BUSY |
+		   DB_BUSY | CB_BUSY |
+		   SPI_BUSY | VGT_BUSY_NO_DMA))
+		reset_mask |= RADEON_RESET_GFX;
+
+	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+		   CP_BUSY | CP_COHERENCY_BUSY))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (tmp & GRBM_EE_BUSY)
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* DMA_STATUS_REG */
+	tmp = RREG32(DMA_STATUS_REG);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(SRBM_STATUS2);
+	if (tmp & DMA_BUSY)
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(SRBM_STATUS);
+	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+		reset_mask |= RADEON_RESET_RLC;
+
+	if (tmp & IH_BUSY)
+		reset_mask |= RADEON_RESET_IH;
+
+	if (tmp & SEM_BUSY)
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (tmp & GRBM_RQ_PENDING)
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (tmp & VMC_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+		   MCC_BUSY | MCD_BUSY))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (evergreen_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* VM_L2_STATUS */
+	tmp = RREG32(VM_L2_STATUS);
+	if (tmp & L2_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	evergreen_print_gpu_status_regs(rdev);
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* Disable DMA */
+		tmp = RREG32(DMA_RB_CNTL);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL, tmp);
+	}
+
+	udelay(50);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+		grbm_soft_reset |= SOFT_RESET_DB |
+			SOFT_RESET_CB |
+			SOFT_RESET_PA |
+			SOFT_RESET_SC |
+			SOFT_RESET_SPI |
+			SOFT_RESET_SX |
+			SOFT_RESET_SH |
+			SOFT_RESET_TC |
+			SOFT_RESET_TA |
+			SOFT_RESET_VC |
+			SOFT_RESET_VGT;
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= SOFT_RESET_CP |
+			SOFT_RESET_VGT;
+
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+	}
+
+	if (reset_mask & RADEON_RESET_DMA)
+		srbm_soft_reset |= SOFT_RESET_DMA;
+
+	if (reset_mask & RADEON_RESET_DISPLAY)
+		srbm_soft_reset |= SOFT_RESET_DC;
+
+	if (reset_mask & RADEON_RESET_RLC)
+		srbm_soft_reset |= SOFT_RESET_RLC;
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= SOFT_RESET_SEM;
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= SOFT_RESET_IH;
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= SOFT_RESET_VMC;
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		if (reset_mask & RADEON_RESET_MC)
+			srbm_soft_reset |= SOFT_RESET_MC;
+	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	udelay(50);
+
+	evergreen_print_gpu_status_regs(rdev);
+}
+
+void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+	udelay(50);
+	/* Disable DMA */
+	tmp = RREG32(DMA_RB_CNTL);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, tmp);
+	/* XXX other engines? */
+
+	/* halt the rlc */
+	r600_rlc_stop(rdev);
+
+	udelay(50);
+
+	/* set mclk/sclk to bypass */
+	rv770_set_clk_bypass_mode(rdev);
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* disable mem access */
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+	}
+	/* reset */
+	radeon_pci_config_reset(rdev);
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	u32 reset_mask;
+
+	if (hard) {
+		evergreen_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
+	reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	/* try soft reset */
+	evergreen_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		evergreen_gpu_pci_config_reset(rdev);
+
+	reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/*
+ * RLC
+ */
+#define RLC_SAVE_RESTORE_LIST_END_MARKER    0x00000000
+#define RLC_CLEAR_STATE_END_MARKER          0x00000001
+
+void sumo_rlc_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	/* save restore block */
+	if (rdev->rlc.save_restore_obj) {
+		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
+		radeon_bo_unpin(rdev->rlc.save_restore_obj);
+		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+
+		radeon_bo_unref(&rdev->rlc.save_restore_obj);
+		rdev->rlc.save_restore_obj = NULL;
+	}
+
+	/* clear state block */
+	if (rdev->rlc.clear_state_obj) {
+		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
+		radeon_bo_unpin(rdev->rlc.clear_state_obj);
+		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+
+		radeon_bo_unref(&rdev->rlc.clear_state_obj);
+		rdev->rlc.clear_state_obj = NULL;
+	}
+
+	/* clear state block */
+	if (rdev->rlc.cp_table_obj) {
+		r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+		radeon_bo_unpin(rdev->rlc.cp_table_obj);
+		radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+		radeon_bo_unref(&rdev->rlc.cp_table_obj);
+		rdev->rlc.cp_table_obj = NULL;
+	}
+}
+
+#define CP_ME_TABLE_SIZE    96
+
+int sumo_rlc_init(struct radeon_device *rdev)
+{
+	const u32 *src_ptr;
+	volatile u32 *dst_ptr;
+	u32 dws, data, i, j, k, reg_num;
+	u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
+	u64 reg_list_mc_addr;
+	const struct cs_section_def *cs_data;
+	int r;
+
+	src_ptr = rdev->rlc.reg_list;
+	dws = rdev->rlc.reg_list_size;
+	if (rdev->family >= CHIP_BONAIRE) {
+		dws += (5 * 16) + 48 + 48 + 64;
+	}
+	cs_data = rdev->rlc.cs_data;
+
+	if (src_ptr) {
+		/* save restore block */
+		if (rdev->rlc.save_restore_obj == NULL) {
+			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+					     NULL, &rdev->rlc.save_restore_obj);
+			if (r) {
+				dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+				return r;
+			}
+		}
+
+		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+		if (unlikely(r != 0)) {
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+				  &rdev->rlc.save_restore_gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+			dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+
+		r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+		/* write the sr buffer */
+		dst_ptr = rdev->rlc.sr_ptr;
+		if (rdev->family >= CHIP_TAHITI) {
+			/* SI */
+			for (i = 0; i < rdev->rlc.reg_list_size; i++)
+				dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+		} else {
+			/* ON/LN/TN */
+			/* format:
+			 * dw0: (reg2 << 16) | reg1
+			 * dw1: reg1 save space
+			 * dw2: reg2 save space
+			 */
+			for (i = 0; i < dws; i++) {
+				data = src_ptr[i] >> 2;
+				i++;
+				if (i < dws)
+					data |= (src_ptr[i] >> 2) << 16;
+				j = (((i - 1) * 3) / 2);
+				dst_ptr[j] = cpu_to_le32(data);
+			}
+			j = ((i * 3) / 2);
+			dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
+		}
+		radeon_bo_kunmap(rdev->rlc.save_restore_obj);
+		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+	}
+
+	if (cs_data) {
+		/* clear state block */
+		if (rdev->family >= CHIP_BONAIRE) {
+			rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
+		} else if (rdev->family >= CHIP_TAHITI) {
+			rdev->rlc.clear_state_size = si_get_csb_size(rdev);
+			dws = rdev->rlc.clear_state_size + (256 / 4);
+		} else {
+			reg_list_num = 0;
+			dws = 0;
+			for (i = 0; cs_data[i].section != NULL; i++) {
+				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+					reg_list_num++;
+					dws += cs_data[i].section[j].reg_count;
+				}
+			}
+			reg_list_blk_index = (3 * reg_list_num + 2);
+			dws += reg_list_blk_index;
+			rdev->rlc.clear_state_size = dws;
+		}
+
+		if (rdev->rlc.clear_state_obj == NULL) {
+			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+					     NULL, &rdev->rlc.clear_state_obj);
+			if (r) {
+				dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+				sumo_rlc_fini(rdev);
+				return r;
+			}
+		}
+		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+		if (unlikely(r != 0)) {
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+				  &rdev->rlc.clear_state_gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+			dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+
+		r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+		/* set up the cs buffer */
+		dst_ptr = rdev->rlc.cs_ptr;
+		if (rdev->family >= CHIP_BONAIRE) {
+			cik_get_csb_buffer(rdev, dst_ptr);
+		} else if (rdev->family >= CHIP_TAHITI) {
+			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
+			dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+			dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+			dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
+			si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
+		} else {
+			reg_list_hdr_blk_index = 0;
+			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
+			data = upper_32_bits(reg_list_mc_addr);
+			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
+			reg_list_hdr_blk_index++;
+			for (i = 0; cs_data[i].section != NULL; i++) {
+				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+					reg_num = cs_data[i].section[j].reg_count;
+					data = reg_list_mc_addr & 0xffffffff;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
+					reg_list_hdr_blk_index++;
+
+					data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
+					reg_list_hdr_blk_index++;
+
+					data = 0x08000000 | (reg_num * 4);
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
+					reg_list_hdr_blk_index++;
+
+					for (k = 0; k < reg_num; k++) {
+						data = cs_data[i].section[j].extent[k];
+						dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
+					}
+					reg_list_mc_addr += reg_num * 4;
+					reg_list_blk_index += reg_num;
+				}
+			}
+			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
+		}
+		radeon_bo_kunmap(rdev->rlc.clear_state_obj);
+		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+	}
+
+	if (rdev->rlc.cp_table_size) {
+		if (rdev->rlc.cp_table_obj == NULL) {
+			r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
+					     PAGE_SIZE, true,
+					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+					     NULL, &rdev->rlc.cp_table_obj);
+			if (r) {
+				dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
+				sumo_rlc_fini(rdev);
+				return r;
+			}
+		}
+
+		r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+		if (unlikely(r != 0)) {
+			dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
+				  &rdev->rlc.cp_table_gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+			dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
+			sumo_rlc_fini(rdev);
+			return r;
+		}
+
+		cik_init_cp_pg_table(rdev);
+
+		radeon_bo_kunmap(rdev->rlc.cp_table_obj);
+		radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+	}
+
+	return 0;
+}
+
+static void evergreen_rlc_start(struct radeon_device *rdev)
+{
+	u32 mask = RLC_ENABLE;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
+	}
+
+	WREG32(RLC_CNTL, mask);
+}
+
+int evergreen_rlc_resume(struct radeon_device *rdev)
+{
+	u32 i;
+	const __be32 *fw_data;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	r600_rlc_stop(rdev);
+
+	WREG32(RLC_HB_CNTL, 0);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family == CHIP_ARUBA) {
+			u32 always_on_bitmap =
+				3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
+			/* find out the number of active simds */
+			u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
+			tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
+			tmp = hweight32(~tmp);
+			if (tmp == rdev->config.cayman.max_simds_per_se) {
+				WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
+				WREG32(TN_RLC_LB_PARAMS, 0x00601004);
+				WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
+				WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
+				WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
+			}
+		} else {
+			WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+			WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+		}
+		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+	} else {
+		WREG32(RLC_HB_BASE, 0);
+		WREG32(RLC_HB_RPTR, 0);
+		WREG32(RLC_HB_WPTR, 0);
+		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+	}
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	fw_data = (const __be32 *)rdev->rlc_fw->data;
+	if (rdev->family >= CHIP_ARUBA) {
+		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_CAYMAN) {
+		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else {
+		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	evergreen_rlc_start(rdev);
+
+	return 0;
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc >= rdev->num_crtc)
+		return 0;
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+	int i;
+	u32 tmp;
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		cayman_cp_int_cntl_setup(rdev, 0,
+					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+		cayman_cp_int_cntl_setup(rdev, 1, 0);
+		cayman_cp_int_cntl_setup(rdev, 2, 0);
+		tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		WREG32(CAYMAN_DMA1_CNTL, tmp);
+	} else
+		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(SRBM_INT_CNTL, 0);
+	for (i = 0; i < rdev->num_crtc; i++)
+		WREG32(INT_MASK + crtc_offsets[i], 0);
+	for (i = 0; i < rdev->num_crtc; i++)
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], 0);
+
+	/* only one DAC on DCE5 */
+	if (!ASIC_IS_DCE5(rdev))
+		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+	for (i = 0; i < 6; i++)
+		WREG32_AND(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_POLARITY);
+}
+
+/* Note that the order we write back regs here is important */
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+	int i;
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+	u32 grbm_int_cntl = 0;
+	u32 dma_cntl, dma_cntl1 = 0;
+	u32 thermal_int = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		r600_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		evergreen_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	if (rdev->family == CHIP_ARUBA)
+		thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
+			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+	else
+		thermal_int = RREG32(CG_THERMAL_INT) &
+			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		/* enable CP interrupts on all rings */
+		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+		}
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+		}
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+		}
+	} else {
+		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			cp_int_cntl |= RB_INT_ENABLE;
+			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+		}
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+			DRM_DEBUG("r600_irq_set: sw int dma1\n");
+			dma_cntl1 |= TRAP_ENABLE;
+		}
+	}
+
+	if (rdev->irq.dpm_thermal) {
+		DRM_DEBUG("dpm thermal\n");
+		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+	}
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+		cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+	} else
+		WREG32(CP_INT_CNTL, cp_int_cntl);
+
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_CAYMAN)
+		WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		radeon_irq_kms_set_irq_n_enabled(
+		    rdev, INT_MASK + crtc_offsets[i],
+		    VBLANK_INT_MASK,
+		    rdev->irq.crtc_vblank_int[i] ||
+		    atomic_read(&rdev->irq.pflip[i]), "vblank", i);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++)
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], GRPH_PFLIP_INT_MASK);
+
+	for (i = 0; i < 6; i++) {
+		radeon_irq_kms_set_irq_n_enabled(
+		    rdev, DC_HPDx_INT_CONTROL(i),
+		    DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN,
+		    rdev->irq.hpd[i], "HPD", i);
+	}
+
+	if (rdev->family == CHIP_ARUBA)
+		WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
+	else
+		WREG32(CG_THERMAL_INT, thermal_int);
+
+	for (i = 0; i < 6; i++) {
+		radeon_irq_kms_set_irq_n_enabled(
+		    rdev, AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
+		    AFMT_AZ_FORMAT_WTRIG_MASK,
+		    rdev->irq.afmt[i], "HDMI", i);
+	}
+
+	/* posting read */
+	RREG32(SRBM_STATUS);
+
+	return 0;
+}
+
+/* Note that the order we write back regs here is important */
+static void evergreen_irq_ack(struct radeon_device *rdev)
+{
+	int i, j;
+	u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
+	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
+	u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
+
+	for (i = 0; i < 6; i++) {
+		disp_int[i] = RREG32(evergreen_disp_int_status[i]);
+		afmt_status[i] = RREG32(AFMT_STATUS + crtc_offsets[i]);
+		if (i < rdev->num_crtc)
+			grph_int[i] = RREG32(GRPH_INT_STATUS + crtc_offsets[i]);
+	}
+
+	/* We write back each interrupt register in pairs of two */
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		for (j = i; j < (i + 2); j++) {
+			if (grph_int[j] & GRPH_PFLIP_INT_OCCURRED)
+				WREG32(GRPH_INT_STATUS + crtc_offsets[j],
+				       GRPH_PFLIP_INT_CLEAR);
+		}
+
+		for (j = i; j < (i + 2); j++) {
+			if (disp_int[j] & LB_D1_VBLANK_INTERRUPT)
+				WREG32(VBLANK_STATUS + crtc_offsets[j],
+				       VBLANK_ACK);
+			if (disp_int[j] & LB_D1_VLINE_INTERRUPT)
+				WREG32(VLINE_STATUS + crtc_offsets[j],
+				       VLINE_ACK);
+		}
+	}
+
+	for (i = 0; i < 6; i++) {
+		if (disp_int[i] & DC_HPD1_INTERRUPT)
+			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_ACK);
+	}
+
+	for (i = 0; i < 6; i++) {
+		if (disp_int[i] & DC_HPD1_RX_INTERRUPT)
+			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_RX_INT_ACK);
+	}
+
+	for (i = 0; i < 6; i++) {
+		if (afmt_status[i] & AFMT_AZ_FORMAT_WTRIG)
+			WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
+				  AFMT_AZ_FORMAT_WTRIG_ACK);
+	}
+}
+
+static void evergreen_irq_disable(struct radeon_device *rdev)
+{
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	evergreen_irq_ack(rdev);
+	evergreen_disable_interrupt_state(rdev);
+}
+
+void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+	evergreen_irq_disable(rdev);
+	r600_rlc_stop(rdev);
+}
+
+static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		wptr &= ~RB_OVERFLOW;
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
+	u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
+	u32 crtc_idx, hpd_idx, afmt_idx;
+	u32 mask;
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+	bool queue_dp = false;
+	bool queue_thermal = false;
+	u32 status, addr;
+	const char *event_name;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	wptr = evergreen_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	evergreen_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+		case 2: /* D2 vblank/vline */
+		case 3: /* D3 vblank/vline */
+		case 4: /* D4 vblank/vline */
+		case 5: /* D5 vblank/vline */
+		case 6: /* D6 vblank/vline */
+			crtc_idx = src_id - 1;
+
+			if (src_data == 0) { /* vblank */
+				mask = LB_D1_VBLANK_INTERRUPT;
+				event_name = "vblank";
+
+				if (rdev->irq.crtc_vblank_int[crtc_idx]) {
+					drm_handle_vblank(rdev->ddev, crtc_idx);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
+					radeon_crtc_handle_vblank(rdev,
+								  crtc_idx);
+				}
+
+			} else if (src_data == 1) { /* vline */
+				mask = LB_D1_VLINE_INTERRUPT;
+				event_name = "vline";
+			} else {
+				DRM_DEBUG("Unhandled interrupt: %d %d\n",
+					  src_id, src_data);
+				break;
+			}
+
+			if (!(disp_int[crtc_idx] & mask)) {
+				DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
+					  crtc_idx + 1, event_name);
+			}
+
+			disp_int[crtc_idx] &= ~mask;
+			DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
+
+			break;
+		case 8: /* D1 page flip */
+		case 10: /* D2 page flip */
+		case 12: /* D3 page flip */
+		case 14: /* D4 page flip */
+		case 16: /* D5 page flip */
+		case 18: /* D6 page flip */
+			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+			if (radeon_use_pflipirq > 0)
+				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+			break;
+		case 42: /* HPD hotplug */
+			if (src_data <= 5) {
+				hpd_idx = src_data;
+				mask = DC_HPD1_INTERRUPT;
+				queue_hotplug = true;
+				event_name = "HPD";
+
+			} else if (src_data <= 11) {
+				hpd_idx = src_data - 6;
+				mask = DC_HPD1_RX_INTERRUPT;
+				queue_dp = true;
+				event_name = "HPD_RX";
+
+			} else {
+				DRM_DEBUG("Unhandled interrupt: %d %d\n",
+					  src_id, src_data);
+				break;
+			}
+
+			if (!(disp_int[hpd_idx] & mask))
+				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+			disp_int[hpd_idx] &= ~mask;
+			DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
+
+			break;
+		case 44: /* hdmi */
+			afmt_idx = src_data;
+			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
+				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+			if (afmt_idx > 5) {
+				DRM_ERROR("Unhandled interrupt: %d %d\n",
+					  src_id, src_data);
+				break;
+			}
+			afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
+			queue_hdmi = true;
+			DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+			break;
+		case 96:
+			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+			WREG32(SRBM_INT_ACK, 0x1);
+			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
+		case 146:
+		case 147:
+			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			if (addr == 0x0 && status == 0x0)
+				break;
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				addr);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				status);
+			cayman_vm_decode_fault(rdev, status, addr);
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			if (rdev->family >= CHIP_CAYMAN) {
+				switch (src_data) {
+				case 0:
+					radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+					break;
+				case 1:
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+					break;
+				case 2:
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+					break;
+				}
+			} else
+				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 230: /* thermal low to high */
+			DRM_DEBUG("IH: thermal low to high\n");
+			rdev->pm.dpm.thermal.high_to_low = false;
+			queue_thermal = true;
+			break;
+		case 231: /* thermal high to low */
+			DRM_DEBUG("IH: thermal high to low\n");
+			rdev->pm.dpm.thermal.high_to_low = true;
+			queue_thermal = true;
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		case 244: /* DMA trap event */
+			if (rdev->family >= CHIP_CAYMAN) {
+				DRM_DEBUG("IH: DMA1 trap\n");
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			}
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+		WREG32(IH_RB_RPTR, rptr);
+	}
+	if (queue_dp)
+		schedule_work(&rdev->dp_work);
+	if (queue_hotplug)
+		schedule_delayed_work(&rdev->hotplug_work, 0);
+	if (queue_hdmi)
+		schedule_work(&rdev->audio_work);
+	if (queue_thermal && rdev->pm.dpm_enabled)
+		schedule_work(&rdev->pm.dpm.thermal.work);
+	rdev->ih.rptr = rptr;
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = evergreen_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+static void evergreen_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void evergreen_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void evergreen_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static int evergreen_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	evergreen_pcie_gen2_enable(rdev);
+	/* enable aspm */
+	evergreen_program_aspm(rdev);
+
+	/* scratch needs to be initialized before MC */
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	evergreen_mc_program(rdev);
+
+	if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
+		r = ni_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		evergreen_agp_enable(rdev);
+	} else {
+		r = evergreen_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	evergreen_gpu_init(rdev);
+
+	/* allocate rlc buffers */
+	if (rdev->flags & RADEON_IS_IGP) {
+		rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
+		rdev->rlc.reg_list_size =
+			(u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
+		rdev->rlc.cs_data = evergreen_cs_data;
+		r = sumo_rlc_init(rdev);
+		if (r) {
+			DRM_ERROR("Failed to init rlc BOs!\n");
+			return r;
+		}
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	evergreen_uvd_start(rdev);
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	evergreen_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+	if (r)
+		return r;
+
+	r = evergreen_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = evergreen_cp_resume(rdev);
+	if (r)
+		return r;
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	evergreen_uvd_resume(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int evergreen_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* reset the asic, the gfx blocks are often in a bad state
+	 * after the driver is unloaded or after a resume
+	 */
+	if (radeon_asic_reset(rdev))
+		dev_warn(rdev->dev, "GPU reset failed !\n");
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	evergreen_init_golden_registers(rdev);
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_resume(rdev);
+
+	rdev->accel_working = true;
+	r = evergreen_startup(rdev);
+	if (r) {
+		DRM_ERROR("evergreen startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int evergreen_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	r700_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	evergreen_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	evergreen_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int evergreen_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* reset the asic, the gfx blocks are often in a bad state
+	 * after the driver is unloaded or after a resume
+	 */
+	if (radeon_asic_reset(rdev))
+		dev_warn(rdev->dev, "GPU reset failed !\n");
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	evergreen_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	/* initialize memory controller */
+	r = evergreen_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = r600_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	evergreen_uvd_init(rdev);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = evergreen_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		if (rdev->flags & RADEON_IS_IGP)
+			sumo_rlc_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		evergreen_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing on BTC parts.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+			DRM_ERROR("radeon: MC ucode required for NI+.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void evergreen_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	radeon_audio_fini(rdev);
+	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	if (rdev->flags & RADEON_IS_IGP)
+		sumo_rlc_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	uvd_v1_0_fini(rdev);
+	radeon_uvd_fini(rdev);
+	evergreen_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, speed_cntl;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if (speed_cntl & LC_CURRENT_DATA_RATE) {
+		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+		return;
+	}
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
+
+void evergreen_program_aspm(struct radeon_device *rdev)
+{
+	u32 data, orig;
+	u32 pcie_lc_cntl, pcie_lc_cntl_old;
+	bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
+	/* fusion_platform = true
+	 * if the system is a fusion system
+	 * (APU or DGPU in a fusion system).
+	 * todo: check if the system is a fusion platform.
+	 */
+	bool fusion_platform = false;
+
+	if (radeon_aspm == 0)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	switch (rdev->family) {
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+	case CHIP_JUNIPER:
+	case CHIP_REDWOOD:
+	case CHIP_CEDAR:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_PALM:
+	case CHIP_ARUBA:
+		disable_l0s = true;
+		break;
+	default:
+		disable_l0s = false;
+		break;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP)
+		fusion_platform = true; /* XXX also dGPUs in a fusion system */
+
+	data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
+	if (fusion_platform)
+		data &= ~MULTI_PIF;
+	else
+		data |= MULTI_PIF;
+	if (data != orig)
+		WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
+
+	data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
+	if (fusion_platform)
+		data &= ~MULTI_PIF;
+	else
+		data |= MULTI_PIF;
+	if (data != orig)
+		WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
+
+	pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+	pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+	if (!disable_l0s) {
+		if (rdev->family >= CHIP_BARTS)
+			pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
+		else
+			pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
+	}
+
+	if (!disable_l1) {
+		if (rdev->family >= CHIP_BARTS)
+			pcie_lc_cntl |= LC_L1_INACTIVITY(7);
+		else
+			pcie_lc_cntl |= LC_L1_INACTIVITY(8);
+
+		if (!disable_plloff_in_l1) {
+			data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (data != orig)
+				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
+
+			data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (data != orig)
+				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
+
+			data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (data != orig)
+				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
+
+			data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (data != orig)
+				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
+
+			if (rdev->family >= CHIP_BARTS) {
+				data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				data |= PLL_RAMP_UP_TIME_0(4);
+				if (data != orig)
+					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
+
+				data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				data |= PLL_RAMP_UP_TIME_1(4);
+				if (data != orig)
+					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
+
+				data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				data |= PLL_RAMP_UP_TIME_0(4);
+				if (data != orig)
+					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
+
+				data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				data |= PLL_RAMP_UP_TIME_1(4);
+				if (data != orig)
+					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
+			}
+
+			data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+			data |= LC_DYN_LANES_PWR_STATE(3);
+			if (data != orig)
+				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+			if (rdev->family >= CHIP_BARTS) {
+				data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
+				data &= ~LS2_EXIT_TIME_MASK;
+				data |= LS2_EXIT_TIME(1);
+				if (data != orig)
+					WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
+
+				data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
+				data &= ~LS2_EXIT_TIME_MASK;
+				data |= LS2_EXIT_TIME(1);
+				if (data != orig)
+					WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
+			}
+		}
+	}
+
+	/* evergreen parts only */
+	if (rdev->family < CHIP_BARTS)
+		pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
+
+	if (pcie_lc_cntl != pcie_lc_cntl_old)
+		WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
new file mode 100644
index 0000000..1a96ddb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The register state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 evergreen_default_state[] =
+{
+	0xc0016900,
+	0x0000023b,
+	0x00000000, /* SQ_LDS_ALLOC_PS */
+
+	0xc0066900,
+	0x00000240,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000247,
+	0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x00000010,
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0106900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc00d6900,
+	0x00000202,
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x0000022a,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0096900,
+	0x00000100,
+	0x00ffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /*  */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000, /*  */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc0016900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+
+	0xc0016900,
+	0x00000187,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc00b6900,
+	0x000001b3,
+	0x20000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00100000, /* SPI_BARYC_CNTL */
+	0x00000000, /* SPI_PS_IN_CONTROL_2 */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
new file mode 100644
index 0000000..bb8d6c7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef EVERGREEN_BLIT_SHADERS_H
+#define EVERGREEN_BLIT_SHADERS_H
+
+extern const u32 evergreen_ps[];
+extern const u32 evergreen_vs[];
+extern const u32 evergreen_default_state[];
+
+extern const u32 evergreen_ps_size, evergreen_vs_size;
+extern const u32 evergreen_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 0000000..5432433
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,3656 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "evergreen_reg_safe.h"
+#include "cayman_reg_safe.h"
+
+#define MAX(a,b)                   (((a)>(b))?(a):(b))
+#define MIN(a,b)                   (((a)<(b))?(a):(b))
+
+#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
+
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_bo_list **cs_reloc);
+struct evergreen_cs_track {
+	u32			group_size;
+	u32			nbanks;
+	u32			npipes;
+	u32			row_size;
+	/* value we track */
+	u32			nsamples;		/* unused */
+	struct radeon_bo	*cb_color_bo[12];
+	u32			cb_color_bo_offset[12];
+	struct radeon_bo	*cb_color_fmask_bo[8];	/* unused */
+	struct radeon_bo	*cb_color_cmask_bo[8];	/* unused */
+	u32			cb_color_info[12];
+	u32			cb_color_view[12];
+	u32			cb_color_pitch[12];
+	u32			cb_color_slice[12];
+	u32			cb_color_slice_idx[12];
+	u32			cb_color_attrib[12];
+	u32			cb_color_cmask_slice[8];/* unused */
+	u32			cb_color_fmask_slice[8];/* unused */
+	u32			cb_target_mask;
+	u32			cb_shader_mask; /* unused */
+	u32			vgt_strmout_config;
+	u32			vgt_strmout_buffer_config;
+	struct radeon_bo	*vgt_strmout_bo[4];
+	u32			vgt_strmout_bo_offset[4];
+	u32			vgt_strmout_size[4];
+	u32			db_depth_control;
+	u32			db_depth_view;
+	u32			db_depth_slice;
+	u32			db_depth_size;
+	u32			db_z_info;
+	u32			db_z_read_offset;
+	u32			db_z_write_offset;
+	struct radeon_bo	*db_z_read_bo;
+	struct radeon_bo	*db_z_write_bo;
+	u32			db_s_info;
+	u32			db_s_read_offset;
+	u32			db_s_write_offset;
+	struct radeon_bo	*db_s_read_bo;
+	struct radeon_bo	*db_s_write_bo;
+	bool			sx_misc_kill_all_prims;
+	bool			cb_dirty;
+	bool			db_dirty;
+	bool			streamout_dirty;
+	u32			htile_offset;
+	u32			htile_surface;
+	struct radeon_bo	*htile_bo;
+	unsigned long		indirect_draw_buffer_size;
+	const unsigned		*reg_safe_bm;
+};
+
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+	if (tiling_flags & RADEON_TILING_MACRO)
+		return ARRAY_2D_TILED_THIN1;
+	else if (tiling_flags & RADEON_TILING_MICRO)
+		return ARRAY_1D_TILED_THIN1;
+	else
+		return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+	switch (nbanks) {
+	case 2:
+		return ADDR_SURF_2_BANK;
+	case 4:
+		return ADDR_SURF_4_BANK;
+	case 8:
+	default:
+		return ADDR_SURF_8_BANK;
+	case 16:
+		return ADDR_SURF_16_BANK;
+	}
+}
+
+static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		track->cb_color_fmask_bo[i] = NULL;
+		track->cb_color_cmask_bo[i] = NULL;
+		track->cb_color_cmask_slice[i] = 0;
+		track->cb_color_fmask_slice[i] = 0;
+	}
+
+	for (i = 0; i < 12; i++) {
+		track->cb_color_bo[i] = NULL;
+		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+		track->cb_color_info[i] = 0;
+		track->cb_color_view[i] = 0xFFFFFFFF;
+		track->cb_color_pitch[i] = 0;
+		track->cb_color_slice[i] = 0xfffffff;
+		track->cb_color_slice_idx[i] = 0;
+	}
+	track->cb_target_mask = 0xFFFFFFFF;
+	track->cb_shader_mask = 0xFFFFFFFF;
+	track->cb_dirty = true;
+
+	track->db_depth_slice = 0xffffffff;
+	track->db_depth_view = 0xFFFFC000;
+	track->db_depth_size = 0xFFFFFFFF;
+	track->db_depth_control = 0xFFFFFFFF;
+	track->db_z_info = 0xFFFFFFFF;
+	track->db_z_read_offset = 0xFFFFFFFF;
+	track->db_z_write_offset = 0xFFFFFFFF;
+	track->db_z_read_bo = NULL;
+	track->db_z_write_bo = NULL;
+	track->db_s_info = 0xFFFFFFFF;
+	track->db_s_read_offset = 0xFFFFFFFF;
+	track->db_s_write_offset = 0xFFFFFFFF;
+	track->db_s_read_bo = NULL;
+	track->db_s_write_bo = NULL;
+	track->db_dirty = true;
+	track->htile_bo = NULL;
+	track->htile_offset = 0xFFFFFFFF;
+	track->htile_surface = 0;
+
+	for (i = 0; i < 4; i++) {
+		track->vgt_strmout_size[i] = 0;
+		track->vgt_strmout_bo[i] = NULL;
+		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+	}
+	track->streamout_dirty = true;
+	track->sx_misc_kill_all_prims = false;
+}
+
+struct eg_surface {
+	/* value gathered from cs */
+	unsigned	nbx;
+	unsigned	nby;
+	unsigned	format;
+	unsigned	mode;
+	unsigned	nbanks;
+	unsigned	bankw;
+	unsigned	bankh;
+	unsigned	tsplit;
+	unsigned	mtilea;
+	unsigned	nsamples;
+	/* output value */
+	unsigned	bpe;
+	unsigned	layer_size;
+	unsigned	palign;
+	unsigned	halign;
+	unsigned long	base_align;
+};
+
+static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
+					  struct eg_surface *surf,
+					  const char *prefix)
+{
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+	surf->base_align = surf->bpe;
+	surf->palign = 1;
+	surf->halign = 1;
+	return 0;
+}
+
+static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
+						  struct eg_surface *surf,
+						  const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign;
+
+	palign = MAX(64, track->group_size / surf->bpe);
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+	surf->base_align = track->group_size;
+	surf->palign = palign;
+	surf->halign = 1;
+	if (surf->nbx & (palign - 1)) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign);
+		}
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
+				      struct eg_surface *surf,
+				      const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign;
+
+	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
+	palign = MAX(8, palign);
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe;
+	surf->base_align = track->group_size;
+	surf->palign = palign;
+	surf->halign = 8;
+	if ((surf->nbx & (palign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign,
+				 track->group_size, surf->bpe, surf->nsamples);
+		}
+		return -EINVAL;
+	}
+	if ((surf->nby & (8 - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
+				 __func__, __LINE__, prefix, surf->nby);
+		}
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
+				      struct eg_surface *surf,
+				      const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign, halign, tileb, slice_pt;
+	unsigned mtile_pr, mtile_ps, mtileb;
+
+	tileb = 64 * surf->bpe * surf->nsamples;
+	slice_pt = 1;
+	if (tileb > surf->tsplit) {
+		slice_pt = tileb / surf->tsplit;
+	}
+	tileb = tileb / slice_pt;
+	/* macro tile width & height */
+	palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
+	halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
+	mtileb = (palign / 8) * (halign / 8) * tileb;
+	mtile_pr = surf->nbx / palign;
+	mtile_ps = (mtile_pr * surf->nby) / halign;
+	surf->layer_size = mtile_ps * mtileb * slice_pt;
+	surf->base_align = (palign / 8) * (halign / 8) * tileb;
+	surf->palign = palign;
+	surf->halign = halign;
+
+	if ((surf->nbx & (palign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign);
+		}
+		return -EINVAL;
+	}
+	if ((surf->nby & (halign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nby, halign);
+		}
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int evergreen_surface_check(struct radeon_cs_parser *p,
+				   struct eg_surface *surf,
+				   const char *prefix)
+{
+	/* some common value computed here */
+	surf->bpe = r600_fmt_get_blocksize(surf->format);
+
+	switch (surf->mode) {
+	case ARRAY_LINEAR_GENERAL:
+		return evergreen_surface_check_linear(p, surf, prefix);
+	case ARRAY_LINEAR_ALIGNED:
+		return evergreen_surface_check_linear_aligned(p, surf, prefix);
+	case ARRAY_1D_TILED_THIN1:
+		return evergreen_surface_check_1d(p, surf, prefix);
+	case ARRAY_2D_TILED_THIN1:
+		return evergreen_surface_check_2d(p, surf, prefix);
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+				__func__, __LINE__, prefix, surf->mode);
+		return -EINVAL;
+	}
+	return -EINVAL;
+}
+
+static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
+					      struct eg_surface *surf,
+					      const char *prefix)
+{
+	switch (surf->mode) {
+	case ARRAY_2D_TILED_THIN1:
+		break;
+	case ARRAY_LINEAR_GENERAL:
+	case ARRAY_LINEAR_ALIGNED:
+	case ARRAY_1D_TILED_THIN1:
+		return 0;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+				__func__, __LINE__, prefix, surf->mode);
+		return -EINVAL;
+	}
+
+	switch (surf->nbanks) {
+	case 0: surf->nbanks = 2; break;
+	case 1: surf->nbanks = 4; break;
+	case 2: surf->nbanks = 8; break;
+	case 3: surf->nbanks = 16; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
+			 __func__, __LINE__, prefix, surf->nbanks);
+		return -EINVAL;
+	}
+	switch (surf->bankw) {
+	case 0: surf->bankw = 1; break;
+	case 1: surf->bankw = 2; break;
+	case 2: surf->bankw = 4; break;
+	case 3: surf->bankw = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
+			 __func__, __LINE__, prefix, surf->bankw);
+		return -EINVAL;
+	}
+	switch (surf->bankh) {
+	case 0: surf->bankh = 1; break;
+	case 1: surf->bankh = 2; break;
+	case 2: surf->bankh = 4; break;
+	case 3: surf->bankh = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
+			 __func__, __LINE__, prefix, surf->bankh);
+		return -EINVAL;
+	}
+	switch (surf->mtilea) {
+	case 0: surf->mtilea = 1; break;
+	case 1: surf->mtilea = 2; break;
+	case 2: surf->mtilea = 4; break;
+	case 3: surf->mtilea = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
+			 __func__, __LINE__, prefix, surf->mtilea);
+		return -EINVAL;
+	}
+	switch (surf->tsplit) {
+	case 0: surf->tsplit = 64; break;
+	case 1: surf->tsplit = 128; break;
+	case 2: surf->tsplit = 256; break;
+	case 3: surf->tsplit = 512; break;
+	case 4: surf->tsplit = 1024; break;
+	case 5: surf->tsplit = 2048; break;
+	case 6: surf->tsplit = 4096; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
+			 __func__, __LINE__, prefix, surf->tsplit);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
+	pitch = track->cb_color_pitch[id];
+	slice = track->cb_color_slice[id];
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
+	surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
+	surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
+	surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
+	surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
+	surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
+	surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
+	surf.nsamples = 1;
+
+	if (!r600_fmt_is_valid_color(surf.format)) {
+		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
+			 __func__, __LINE__, surf.format,
+			id, track->cb_color_info[id]);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "cb");
+	if (r) {
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, "cb");
+	if (r) {
+		dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, id, track->cb_color_pitch[id],
+			 track->cb_color_slice[id], track->cb_color_attrib[id],
+			 track->cb_color_info[id]);
+		return r;
+	}
+
+	offset = track->cb_color_bo_offset[id] << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, id, offset, surf.base_align);
+		return -EINVAL;
+	}
+
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+		/* old ddx are broken they allocate bo with w*h*bpp but
+		 * program slice with ALIGN(h, 8), catch this and patch
+		 * command stream.
+		 */
+		if (!surf.mode) {
+			uint32_t *ib = p->ib.ptr;
+			unsigned long tmp, nby, bsize, size, min = 0;
+
+			/* find the height the ddx wants */
+			if (surf.nby > 8) {
+				min = surf.nby - 8;
+			}
+			bsize = radeon_bo_size(track->cb_color_bo[id]);
+			tmp = track->cb_color_bo_offset[id] << 8;
+			for (nby = surf.nby; nby > min; nby--) {
+				size = nby * surf.nbx * surf.bpe * surf.nsamples;
+				if ((tmp + size * mslice) <= bsize) {
+					break;
+				}
+			}
+			if (nby > min) {
+				surf.nby = nby;
+				slice = ((nby * surf.nbx) / 64) - 1;
+				if (!evergreen_surface_check(p, &surf, "cb")) {
+					/* check if this one works */
+					tmp += surf.layer_size * mslice;
+					if (tmp <= bsize) {
+						ib[track->cb_color_slice_idx[id]] = slice;
+						goto old_ddx_ok;
+					}
+				}
+			}
+		}
+		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
+			 "offset %d, max layer %d, bo size %ld, slice %d)\n",
+			 __func__, __LINE__, id, surf.layer_size,
+			track->cb_color_bo_offset[id] << 8, mslice,
+			radeon_bo_size(track->cb_color_bo[id]), slice);
+		dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+			 __func__, __LINE__, surf.nbx, surf.nby,
+			surf.mode, surf.bpe, surf.nsamples,
+			surf.bankw, surf.bankh,
+			surf.tsplit, surf.mtilea);
+		return -EINVAL;
+	}
+old_ddx_ok:
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
+						unsigned nbx, unsigned nby)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned long size;
+
+	if (track->htile_bo == NULL) {
+		dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+				__func__, __LINE__, track->db_z_info);
+		return -EINVAL;
+	}
+
+	if (G_028ABC_LINEAR(track->htile_surface)) {
+		/* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
+		nbx = round_up(nbx, 16 * 8);
+		/* height is npipes htiles aligned == npipes * 8 pixel aligned */
+		nby = round_up(nby, track->npipes * 8);
+	} else {
+		/* always assume 8x8 htile */
+		/* align is htile align * 8, htile align vary according to
+		 * number of pipe and tile width and nby
+		 */
+		switch (track->npipes) {
+		case 8:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 64 * 8);
+			nby = round_up(nby, 64 * 8);
+			break;
+		case 4:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 64 * 8);
+			nby = round_up(nby, 32 * 8);
+			break;
+		case 2:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 32 * 8);
+			nby = round_up(nby, 32 * 8);
+			break;
+		case 1:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 32 * 8);
+			nby = round_up(nby, 16 * 8);
+			break;
+		default:
+			dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+					__func__, __LINE__, track->npipes);
+			return -EINVAL;
+		}
+	}
+	/* compute number of htile */
+	nbx = nbx >> 3;
+	nby = nby >> 3;
+	/* size must be aligned on npipes * 2K boundary */
+	size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+	size += track->htile_offset;
+
+	if (size > radeon_bo_size(track->htile_bo)) {
+		dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+				__func__, __LINE__, radeon_bo_size(track->htile_bo),
+				size, nbx, nby);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+	slice = track->db_depth_slice;
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+	surf.format = G_028044_FORMAT(track->db_s_info);
+	surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
+	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+	surf.nsamples = 1;
+
+	if (surf.format != 1) {
+		dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+	/* replace by color format so we can use same code */
+	surf.format = V_028C70_COLOR_8;
+
+	r = evergreen_surface_value_conv_check(p, &surf, "stencil");
+	if (r) {
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, NULL);
+	if (r) {
+		/* old userspace doesn't compute proper depth/stencil alignment
+		 * check that alignment against a bigger byte per elements and
+		 * only report if that alignment is wrong too.
+		 */
+		surf.format = V_028C70_COLOR_8_8_8_8;
+		r = evergreen_surface_check(p, &surf, "stencil");
+		if (r) {
+			dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+				 __func__, __LINE__, track->db_depth_size,
+				 track->db_depth_slice, track->db_s_info, track->db_z_info);
+		}
+		return r;
+	}
+
+	offset = track->db_s_read_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_s_read_bo)) {
+		dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_s_read_offset << 8, mslice,
+			radeon_bo_size(track->db_s_read_bo));
+		dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_s_info, track->db_z_info);
+		return -EINVAL;
+	}
+
+	offset = track->db_s_write_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_s_write_bo)) {
+		dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_s_write_offset << 8, mslice,
+			radeon_bo_size(track->db_s_write_bo));
+		return -EINVAL;
+	}
+
+	/* hyperz */
+	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+		if (r) {
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+	slice = track->db_depth_slice;
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+	surf.format = G_028040_FORMAT(track->db_z_info);
+	surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
+	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+	surf.nsamples = 1;
+
+	switch (surf.format) {
+	case V_028040_Z_16:
+		surf.format = V_028C70_COLOR_16;
+		break;
+	case V_028040_Z_24:
+	case V_028040_Z_32_FLOAT:
+		surf.format = V_028C70_COLOR_8_8_8_8;
+		break;
+	default:
+		dev_warn(p->dev, "%s:%d depth invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "depth");
+	if (r) {
+		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_z_info);
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, "depth");
+	if (r) {
+		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_z_info);
+		return r;
+	}
+
+	offset = track->db_z_read_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_z_read_bo)) {
+		dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_z_read_offset << 8, mslice,
+			radeon_bo_size(track->db_z_read_bo));
+		return -EINVAL;
+	}
+
+	offset = track->db_z_write_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_z_write_bo)) {
+		dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_z_write_offset << 8, mslice,
+			radeon_bo_size(track->db_z_write_bo));
+		return -EINVAL;
+	}
+
+	/* hyperz */
+	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+		if (r) {
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
+					       struct radeon_bo *texture,
+					       struct radeon_bo *mipmap,
+					       unsigned idx)
+{
+	struct eg_surface surf;
+	unsigned long toffset, moffset;
+	unsigned dim, llevel, mslice, width, height, depth, i;
+	u32 texdw[8];
+	int r;
+
+	texdw[0] = radeon_get_ib_value(p, idx + 0);
+	texdw[1] = radeon_get_ib_value(p, idx + 1);
+	texdw[2] = radeon_get_ib_value(p, idx + 2);
+	texdw[3] = radeon_get_ib_value(p, idx + 3);
+	texdw[4] = radeon_get_ib_value(p, idx + 4);
+	texdw[5] = radeon_get_ib_value(p, idx + 5);
+	texdw[6] = radeon_get_ib_value(p, idx + 6);
+	texdw[7] = radeon_get_ib_value(p, idx + 7);
+	dim = G_030000_DIM(texdw[0]);
+	llevel = G_030014_LAST_LEVEL(texdw[5]);
+	mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
+	width = G_030000_TEX_WIDTH(texdw[0]) + 1;
+	height =  G_030004_TEX_HEIGHT(texdw[1]) + 1;
+	depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
+	surf.format = G_03001C_DATA_FORMAT(texdw[7]);
+	surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
+	surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
+	surf.nby = r600_fmt_get_nblocksy(surf.format, height);
+	surf.mode = G_030004_ARRAY_MODE(texdw[1]);
+	surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
+	surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
+	surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
+	surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
+	surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
+	surf.nsamples = 1;
+	toffset = texdw[2] << 8;
+	moffset = texdw[3] << 8;
+
+	if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
+		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+	switch (dim) {
+	case V_030000_SQ_TEX_DIM_1D:
+	case V_030000_SQ_TEX_DIM_2D:
+	case V_030000_SQ_TEX_DIM_CUBEMAP:
+	case V_030000_SQ_TEX_DIM_1D_ARRAY:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY:
+		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_2D_MSAA:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		surf.nsamples = 1 << llevel;
+		llevel = 0;
+		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_3D:
+		break;
+	default:
+		dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
+			 __func__, __LINE__, dim);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "texture");
+	if (r) {
+		return r;
+	}
+
+	/* align height */
+	evergreen_surface_check(p, &surf, NULL);
+	surf.nby = ALIGN(surf.nby, surf.halign);
+
+	r = evergreen_surface_check(p, &surf, "texture");
+	if (r) {
+		dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
+			 texdw[5], texdw[6], texdw[7]);
+		return r;
+	}
+
+	/* check texture size */
+	if (toffset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, toffset, surf.base_align);
+		return -EINVAL;
+	}
+	if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, moffset, surf.base_align);
+		return -EINVAL;
+	}
+	if (dim == SQ_TEX_DIM_3D) {
+		toffset += surf.layer_size * depth;
+	} else {
+		toffset += surf.layer_size * mslice;
+	}
+	if (toffset > radeon_bo_size(texture)) {
+		dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
+			 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)texdw[2] << 8, mslice,
+			depth, radeon_bo_size(texture),
+			surf.nbx, surf.nby);
+		return -EINVAL;
+	}
+
+	if (!mipmap) {
+		if (llevel) {
+			dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
+				 __func__, __LINE__);
+			return -EINVAL;
+		} else {
+			return 0; /* everything's ok */
+		}
+	}
+
+	/* check mipmap size */
+	for (i = 1; i <= llevel; i++) {
+		unsigned w, h, d;
+
+		w = r600_mip_minify(width, i);
+		h = r600_mip_minify(height, i);
+		d = r600_mip_minify(depth, i);
+		surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
+		surf.nby = r600_fmt_get_nblocksy(surf.format, h);
+
+		switch (surf.mode) {
+		case ARRAY_2D_TILED_THIN1:
+			if (surf.nbx < surf.palign || surf.nby < surf.halign) {
+				surf.mode = ARRAY_1D_TILED_THIN1;
+			}
+			/* recompute alignment */
+			evergreen_surface_check(p, &surf, NULL);
+			break;
+		case ARRAY_LINEAR_GENERAL:
+		case ARRAY_LINEAR_ALIGNED:
+		case ARRAY_1D_TILED_THIN1:
+			break;
+		default:
+			dev_warn(p->dev, "%s:%d invalid array mode %d\n",
+				 __func__, __LINE__, surf.mode);
+			return -EINVAL;
+		}
+		surf.nbx = ALIGN(surf.nbx, surf.palign);
+		surf.nby = ALIGN(surf.nby, surf.halign);
+
+		r = evergreen_surface_check(p, &surf, "mipmap");
+		if (r) {
+			return r;
+		}
+
+		if (dim == SQ_TEX_DIM_3D) {
+			moffset += surf.layer_size * d;
+		} else {
+			moffset += surf.layer_size * mslice;
+		}
+		if (moffset > radeon_bo_size(mipmap)) {
+			dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
+					"offset %ld, coffset %ld, max layer %d, depth %d, "
+					"bo size %ld) level0 (%d %d %d)\n",
+					__func__, __LINE__, i, surf.layer_size,
+					(unsigned long)texdw[3] << 8, moffset, mslice,
+					d, radeon_bo_size(mipmap),
+					width, height, depth);
+			dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+				 __func__, __LINE__, surf.nbx, surf.nby,
+				surf.mode, surf.bpe, surf.nsamples,
+				surf.bankw, surf.bankh,
+				surf.tsplit, surf.mtilea);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned tmp, i;
+	int r;
+	unsigned buffer_mask = 0;
+
+	/* check streamout */
+	if (track->streamout_dirty && track->vgt_strmout_config) {
+		for (i = 0; i < 4; i++) {
+			if (track->vgt_strmout_config & (1 << i)) {
+				buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
+			}
+		}
+
+		for (i = 0; i < 4; i++) {
+			if (buffer_mask & (1 << i)) {
+				if (track->vgt_strmout_bo[i]) {
+					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+							(u64)track->vgt_strmout_size[i];
+					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+							  i, offset,
+							  radeon_bo_size(track->vgt_strmout_bo[i]));
+						return -EINVAL;
+					}
+				} else {
+					dev_warn(p->dev, "No buffer for streamout %d\n", i);
+					return -EINVAL;
+				}
+			}
+		}
+		track->streamout_dirty = false;
+	}
+
+	if (track->sx_misc_kill_all_prims)
+		return 0;
+
+	/* check that we have a cb for each enabled target
+	 */
+	if (track->cb_dirty) {
+		tmp = track->cb_target_mask;
+		for (i = 0; i < 8; i++) {
+			u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+			if (format != V_028C70_COLOR_INVALID &&
+			    (tmp >> (i * 4)) & 0xF) {
+				/* at least one component is enabled */
+				if (track->cb_color_bo[i] == NULL) {
+					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+					return -EINVAL;
+				}
+				/* check cb */
+				r = evergreen_cs_track_validate_cb(p, i);
+				if (r) {
+					return r;
+				}
+			}
+		}
+		track->cb_dirty = false;
+	}
+
+	if (track->db_dirty) {
+		/* Check stencil buffer */
+		if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+		    G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+			r = evergreen_cs_track_validate_stencil(p);
+			if (r)
+				return r;
+		}
+		/* Check depth buffer */
+		if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+		    G_028800_Z_ENABLE(track->db_depth_control)) {
+			r = evergreen_cs_track_validate_depth(p);
+			if (r)
+				return r;
+		}
+		track->db_dirty = false;
+	}
+
+	return 0;
+}
+
+/**
+ * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * This is an Evergreen(+)-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+
+	static uint32_t vline_start_end[6] = {
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
+	};
+	static uint32_t vline_status[6] = {
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
+	};
+
+	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+}
+
+static int evergreen_packet0_check(struct radeon_cs_parser *p,
+				   struct radeon_cs_packet *pkt,
+				   unsigned idx, unsigned reg)
+{
+	int r;
+
+	switch (reg) {
+	case EVERGREEN_VLINE_START_END:
+		r = evergreen_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			return r;
+		}
+		break;
+	default:
+		pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt)
+{
+	unsigned reg, i;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+		r = evergreen_packet0_check(p, pkt, idx, reg);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * evergreen_cs_handle_reg() - process registers that need special handling.
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ */
+static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
+	struct radeon_bo_list *reloc;
+	u32 tmp, *ib;
+	int r;
+
+	ib = p->ib.ptr;
+	switch (reg) {
+	/* force following reg to 0 in an attempt to disable out buffer
+	 * which will need us to better understand how it works to perform
+	 * security check on it (Jerome)
+	 */
+	case SQ_ESGS_RING_SIZE:
+	case SQ_GSVS_RING_SIZE:
+	case SQ_ESTMP_RING_SIZE:
+	case SQ_GSTMP_RING_SIZE:
+	case SQ_HSTMP_RING_SIZE:
+	case SQ_LSTMP_RING_SIZE:
+	case SQ_PSTMP_RING_SIZE:
+	case SQ_VSTMP_RING_SIZE:
+	case SQ_ESGS_RING_ITEMSIZE:
+	case SQ_ESTMP_RING_ITEMSIZE:
+	case SQ_GSTMP_RING_ITEMSIZE:
+	case SQ_GSVS_RING_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE_1:
+	case SQ_GS_VERT_ITEMSIZE_2:
+	case SQ_GS_VERT_ITEMSIZE_3:
+	case SQ_GSVS_RING_OFFSET_1:
+	case SQ_GSVS_RING_OFFSET_2:
+	case SQ_GSVS_RING_OFFSET_3:
+	case SQ_HSTMP_RING_ITEMSIZE:
+	case SQ_LSTMP_RING_ITEMSIZE:
+	case SQ_PSTMP_RING_ITEMSIZE:
+	case SQ_VSTMP_RING_ITEMSIZE:
+	case VGT_TF_RING_SIZE:
+		/* get value to populate the IB don't remove */
+		/*tmp =radeon_get_ib_value(p, idx);
+		  ib[idx] = 0;*/
+		break;
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_HSTMP_RING_BASE:
+	case SQ_LSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case DB_DEPTH_CONTROL:
+		track->db_depth_control = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case CAYMAN_DB_EQAA:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		break;
+	case CAYMAN_DB_DEPTH_INFO:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		break;
+	case DB_Z_INFO:
+		track->db_z_info = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] &= ~Z_ARRAY_MODE(0xf);
+			track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+			ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+			track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= DB_TILE_SPLIT(tile_split) |
+						DB_BANK_WIDTH(bankw) |
+						DB_BANK_HEIGHT(bankh) |
+						DB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_INFO:
+		track->db_s_info = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_DEPTH_VIEW:
+		track->db_depth_view = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_DEPTH_SIZE:
+		track->db_depth_size = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_02805C_DB_DEPTH_SLICE:
+		track->db_depth_slice = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_Z_READ_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_z_read_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->db_z_read_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_Z_WRITE_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_z_write_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->db_z_write_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_READ_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_s_read_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->db_s_read_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_WRITE_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_s_write_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->db_s_write_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case VGT_STRMOUT_CONFIG:
+		track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_CONFIG:
+		track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_BASE_0:
+	case VGT_STRMOUT_BUFFER_BASE_1:
+	case VGT_STRMOUT_BUFFER_BASE_2:
+	case VGT_STRMOUT_BUFFER_BASE_3:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->vgt_strmout_bo[tmp] = reloc->robj;
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_SIZE_0:
+	case VGT_STRMOUT_BUFFER_SIZE_1:
+	case VGT_STRMOUT_BUFFER_SIZE_2:
+	case VGT_STRMOUT_BUFFER_SIZE_3:
+		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+		/* size in register is DWs, convert to bytes */
+		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+		track->streamout_dirty = true;
+		break;
+	case CP_COHER_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+	case CB_TARGET_MASK:
+		track->cb_target_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_SHADER_MASK:
+		track->cb_shader_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case PA_SC_AA_CONFIG:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
+		track->nsamples = 1 << tmp;
+		break;
+	case CAYMAN_PA_SC_AA_CONFIG:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
+		track->nsamples = 1 << tmp;
+		break;
+	case CB_COLOR0_VIEW:
+	case CB_COLOR1_VIEW:
+	case CB_COLOR2_VIEW:
+	case CB_COLOR3_VIEW:
+	case CB_COLOR4_VIEW:
+	case CB_COLOR5_VIEW:
+	case CB_COLOR6_VIEW:
+	case CB_COLOR7_VIEW:
+		tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_VIEW:
+	case CB_COLOR9_VIEW:
+	case CB_COLOR10_VIEW:
+	case CB_COLOR11_VIEW:
+		tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_INFO:
+	case CB_COLOR1_INFO:
+	case CB_COLOR2_INFO:
+	case CB_COLOR3_INFO:
+	case CB_COLOR4_INFO:
+	case CB_COLOR5_INFO:
+	case CB_COLOR6_INFO:
+	case CB_COLOR7_INFO:
+		tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+		}
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_INFO:
+	case CB_COLOR9_INFO:
+	case CB_COLOR10_INFO:
+	case CB_COLOR11_INFO:
+		tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+		}
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_PITCH:
+	case CB_COLOR1_PITCH:
+	case CB_COLOR2_PITCH:
+	case CB_COLOR3_PITCH:
+	case CB_COLOR4_PITCH:
+	case CB_COLOR5_PITCH:
+	case CB_COLOR6_PITCH:
+	case CB_COLOR7_PITCH:
+		tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_PITCH:
+	case CB_COLOR9_PITCH:
+	case CB_COLOR10_PITCH:
+	case CB_COLOR11_PITCH:
+		tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_SLICE:
+	case CB_COLOR1_SLICE:
+	case CB_COLOR2_SLICE:
+	case CB_COLOR3_SLICE:
+	case CB_COLOR4_SLICE:
+	case CB_COLOR5_SLICE:
+	case CB_COLOR6_SLICE:
+	case CB_COLOR7_SLICE:
+		tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_SLICE:
+	case CB_COLOR9_SLICE:
+	case CB_COLOR10_SLICE:
+	case CB_COLOR11_SLICE:
+		tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_ATTRIB:
+	case CB_COLOR1_ATTRIB:
+	case CB_COLOR2_ATTRIB:
+	case CB_COLOR3_ATTRIB:
+	case CB_COLOR4_ATTRIB:
+	case CB_COLOR5_ATTRIB:
+	case CB_COLOR6_ATTRIB:
+	case CB_COLOR7_ATTRIB:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= CB_TILE_SPLIT(tile_split) |
+					   CB_BANK_WIDTH(bankw) |
+					   CB_BANK_HEIGHT(bankh) |
+					   CB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
+		track->cb_color_attrib[tmp] = ib[idx];
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_ATTRIB:
+	case CB_COLOR9_ATTRIB:
+	case CB_COLOR10_ATTRIB:
+	case CB_COLOR11_ATTRIB:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= CB_TILE_SPLIT(tile_split) |
+					   CB_BANK_WIDTH(bankw) |
+					   CB_BANK_HEIGHT(bankh) |
+					   CB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
+		track->cb_color_attrib[tmp] = ib[idx];
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_FMASK:
+	case CB_COLOR1_FMASK:
+	case CB_COLOR2_FMASK:
+	case CB_COLOR3_FMASK:
+	case CB_COLOR4_FMASK:
+	case CB_COLOR5_FMASK:
+	case CB_COLOR6_FMASK:
+	case CB_COLOR7_FMASK:
+		tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_fmask_bo[tmp] = reloc->robj;
+		break;
+	case CB_COLOR0_CMASK:
+	case CB_COLOR1_CMASK:
+	case CB_COLOR2_CMASK:
+	case CB_COLOR3_CMASK:
+	case CB_COLOR4_CMASK:
+	case CB_COLOR5_CMASK:
+	case CB_COLOR6_CMASK:
+	case CB_COLOR7_CMASK:
+		tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_cmask_bo[tmp] = reloc->robj;
+		break;
+	case CB_COLOR0_FMASK_SLICE:
+	case CB_COLOR1_FMASK_SLICE:
+	case CB_COLOR2_FMASK_SLICE:
+	case CB_COLOR3_FMASK_SLICE:
+	case CB_COLOR4_FMASK_SLICE:
+	case CB_COLOR5_FMASK_SLICE:
+	case CB_COLOR6_FMASK_SLICE:
+	case CB_COLOR7_FMASK_SLICE:
+		tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
+		track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
+		break;
+	case CB_COLOR0_CMASK_SLICE:
+	case CB_COLOR1_CMASK_SLICE:
+	case CB_COLOR2_CMASK_SLICE:
+	case CB_COLOR3_CMASK_SLICE:
+	case CB_COLOR4_CMASK_SLICE:
+	case CB_COLOR5_CMASK_SLICE:
+	case CB_COLOR6_CMASK_SLICE:
+	case CB_COLOR7_CMASK_SLICE:
+		tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
+		track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
+		break;
+	case CB_COLOR0_BASE:
+	case CB_COLOR1_BASE:
+	case CB_COLOR2_BASE:
+	case CB_COLOR3_BASE:
+	case CB_COLOR4_BASE:
+	case CB_COLOR5_BASE:
+	case CB_COLOR6_BASE:
+	case CB_COLOR7_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_BASE:
+	case CB_COLOR9_BASE:
+	case CB_COLOR10_BASE:
+	case CB_COLOR11_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_dirty = true;
+		break;
+	case DB_HTILE_DATA_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->htile_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->htile_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_SURFACE:
+		/* 8x8 only */
+		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
+		track->db_dirty = true;
+		break;
+	case CB_IMMED0_BASE:
+	case CB_IMMED1_BASE:
+	case CB_IMMED2_BASE:
+	case CB_IMMED3_BASE:
+	case CB_IMMED4_BASE:
+	case CB_IMMED5_BASE:
+	case CB_IMMED6_BASE:
+	case CB_IMMED7_BASE:
+	case CB_IMMED8_BASE:
+	case CB_IMMED9_BASE:
+	case CB_IMMED10_BASE:
+	case CB_IMMED11_BASE:
+	case SQ_PGM_START_FS:
+	case SQ_PGM_START_ES:
+	case SQ_PGM_START_VS:
+	case SQ_PGM_START_GS:
+	case SQ_PGM_START_PS:
+	case SQ_PGM_START_HS:
+	case SQ_PGM_START_LS:
+	case SQ_CONST_MEM_BASE:
+	case SQ_ALU_CONST_CACHE_GS_0:
+	case SQ_ALU_CONST_CACHE_GS_1:
+	case SQ_ALU_CONST_CACHE_GS_2:
+	case SQ_ALU_CONST_CACHE_GS_3:
+	case SQ_ALU_CONST_CACHE_GS_4:
+	case SQ_ALU_CONST_CACHE_GS_5:
+	case SQ_ALU_CONST_CACHE_GS_6:
+	case SQ_ALU_CONST_CACHE_GS_7:
+	case SQ_ALU_CONST_CACHE_GS_8:
+	case SQ_ALU_CONST_CACHE_GS_9:
+	case SQ_ALU_CONST_CACHE_GS_10:
+	case SQ_ALU_CONST_CACHE_GS_11:
+	case SQ_ALU_CONST_CACHE_GS_12:
+	case SQ_ALU_CONST_CACHE_GS_13:
+	case SQ_ALU_CONST_CACHE_GS_14:
+	case SQ_ALU_CONST_CACHE_GS_15:
+	case SQ_ALU_CONST_CACHE_PS_0:
+	case SQ_ALU_CONST_CACHE_PS_1:
+	case SQ_ALU_CONST_CACHE_PS_2:
+	case SQ_ALU_CONST_CACHE_PS_3:
+	case SQ_ALU_CONST_CACHE_PS_4:
+	case SQ_ALU_CONST_CACHE_PS_5:
+	case SQ_ALU_CONST_CACHE_PS_6:
+	case SQ_ALU_CONST_CACHE_PS_7:
+	case SQ_ALU_CONST_CACHE_PS_8:
+	case SQ_ALU_CONST_CACHE_PS_9:
+	case SQ_ALU_CONST_CACHE_PS_10:
+	case SQ_ALU_CONST_CACHE_PS_11:
+	case SQ_ALU_CONST_CACHE_PS_12:
+	case SQ_ALU_CONST_CACHE_PS_13:
+	case SQ_ALU_CONST_CACHE_PS_14:
+	case SQ_ALU_CONST_CACHE_PS_15:
+	case SQ_ALU_CONST_CACHE_VS_0:
+	case SQ_ALU_CONST_CACHE_VS_1:
+	case SQ_ALU_CONST_CACHE_VS_2:
+	case SQ_ALU_CONST_CACHE_VS_3:
+	case SQ_ALU_CONST_CACHE_VS_4:
+	case SQ_ALU_CONST_CACHE_VS_5:
+	case SQ_ALU_CONST_CACHE_VS_6:
+	case SQ_ALU_CONST_CACHE_VS_7:
+	case SQ_ALU_CONST_CACHE_VS_8:
+	case SQ_ALU_CONST_CACHE_VS_9:
+	case SQ_ALU_CONST_CACHE_VS_10:
+	case SQ_ALU_CONST_CACHE_VS_11:
+	case SQ_ALU_CONST_CACHE_VS_12:
+	case SQ_ALU_CONST_CACHE_VS_13:
+	case SQ_ALU_CONST_CACHE_VS_14:
+	case SQ_ALU_CONST_CACHE_VS_15:
+	case SQ_ALU_CONST_CACHE_HS_0:
+	case SQ_ALU_CONST_CACHE_HS_1:
+	case SQ_ALU_CONST_CACHE_HS_2:
+	case SQ_ALU_CONST_CACHE_HS_3:
+	case SQ_ALU_CONST_CACHE_HS_4:
+	case SQ_ALU_CONST_CACHE_HS_5:
+	case SQ_ALU_CONST_CACHE_HS_6:
+	case SQ_ALU_CONST_CACHE_HS_7:
+	case SQ_ALU_CONST_CACHE_HS_8:
+	case SQ_ALU_CONST_CACHE_HS_9:
+	case SQ_ALU_CONST_CACHE_HS_10:
+	case SQ_ALU_CONST_CACHE_HS_11:
+	case SQ_ALU_CONST_CACHE_HS_12:
+	case SQ_ALU_CONST_CACHE_HS_13:
+	case SQ_ALU_CONST_CACHE_HS_14:
+	case SQ_ALU_CONST_CACHE_HS_15:
+	case SQ_ALU_CONST_CACHE_LS_0:
+	case SQ_ALU_CONST_CACHE_LS_1:
+	case SQ_ALU_CONST_CACHE_LS_2:
+	case SQ_ALU_CONST_CACHE_LS_3:
+	case SQ_ALU_CONST_CACHE_LS_4:
+	case SQ_ALU_CONST_CACHE_LS_5:
+	case SQ_ALU_CONST_CACHE_LS_6:
+	case SQ_ALU_CONST_CACHE_LS_7:
+	case SQ_ALU_CONST_CACHE_LS_8:
+	case SQ_ALU_CONST_CACHE_LS_9:
+	case SQ_ALU_CONST_CACHE_LS_10:
+	case SQ_ALU_CONST_CACHE_LS_11:
+	case SQ_ALU_CONST_CACHE_LS_12:
+	case SQ_ALU_CONST_CACHE_LS_13:
+	case SQ_ALU_CONST_CACHE_LS_14:
+	case SQ_ALU_CONST_CACHE_LS_15:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MEMORY_EXPORT_BASE:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case CAYMAN_SX_SCATTER_EXPORT_BASE:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MISC:
+		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+		break;
+	default:
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * evergreen_is_safe_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ *
+ * This function will test against reg_safe_bm and return true
+ * if register is safe or false otherwise.
+ */
+static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
+{
+	struct evergreen_cs_track *track = p->track;
+	u32 m, i;
+
+	i = (reg >> 7);
+	if (unlikely(i >= REG_SAFE_BM_SIZE)) {
+		return false;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (!(track->reg_safe_bm[i] & m))
+		return true;
+
+	return false;
+}
+
+static int evergreen_packet3_check(struct radeon_cs_parser *p,
+				   struct radeon_cs_packet *pkt)
+{
+	struct radeon_bo_list *reloc;
+	struct evergreen_cs_track *track;
+	uint32_t *ib;
+	unsigned idx;
+	unsigned i;
+	unsigned start_reg, end_reg, reg;
+	int r;
+	u32 idx_value;
+
+	track = (struct evergreen_cs_track *)p->track;
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (pkt->opcode) {
+	case PACKET3_SET_PREDICATION:
+	{
+		int pred_op;
+		int tmp;
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		tmp = radeon_get_ib_value(p, idx + 1);
+		pred_op = (tmp >> 16) & 0x7;
+
+		/* for the clear predicate operation */
+		if (pred_op == 0)
+			return 0;
+
+		if (pred_op > 2) {
+			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+			return -EINVAL;
+		}
+
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 (idx_value & 0xfffffff0) +
+			 ((u64)(tmp & 0xff) << 32);
+
+		ib[idx + 0] = offset;
+		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+	}
+	break;
+	case PACKET3_CONTEXT_CONTROL:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad CONTEXT_CONTROL\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_CLEAR_STATE:
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+			return -EINVAL;
+		}
+		break;
+	case CAYMAN_PACKET3_DEALLOC_STATE:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_BASE:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad INDEX_BASE\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad INDEX_BASE\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 idx_value +
+			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_INDEX_BUFFER_SIZE:
+	{
+		if (pkt->count != 0) {
+			DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
+			return -EINVAL;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX:
+	{
+		uint64_t offset;
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 idx_value +
+			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_2:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad DRAW_INDEX_2\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX_2\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 radeon_get_ib_value(p, idx+1) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset;
+		ib[idx+2] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_AUTO:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+		if (pkt->count != 2) {
+			DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_IMMD:
+		if (pkt->count < 2) {
+			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_OFFSET:
+		if (pkt->count != 2) {
+			DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_SET_BASE:
+	{
+		/*
+		DW 1 HEADER Header of the packet. Shader_Type in bit 1 of the Header will correspond to the shader type of the Load, see Type-3 Packet.
+		   2 BASE_INDEX Bits [3:0] BASE_INDEX - Base Index specifies which base address is specified in the last two DWs.
+		     0001: DX11 Draw_Index_Indirect Patch Table Base: Base address for Draw_Index_Indirect data.
+		   3 ADDRESS_LO Bits [31:3] - Lower bits of QWORD-Aligned Address. Bits [2:0] - Reserved
+		   4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
+		*/
+		if (pkt->count != 2) {
+			DRM_ERROR("bad SET_BASE\n");
+			return -EINVAL;
+		}
+
+		/* currently only supporting setting indirect draw buffer base address */
+		if (idx_value != 1) {
+			DRM_ERROR("bad SET_BASE\n");
+			return -EINVAL;
+		}
+
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad SET_BASE\n");
+			return -EINVAL;
+		}
+
+		track->indirect_draw_buffer_size = radeon_bo_size(reloc->robj);
+
+		ib[idx+1] = reloc->gpu_offset;
+		ib[idx+2] = upper_32_bits(reloc->gpu_offset) & 0xff;
+
+		break;
+	}
+	case PACKET3_DRAW_INDIRECT:
+	case PACKET3_DRAW_INDEX_INDIRECT:
+	{
+		u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20;
+
+		/*
+		DW 1 HEADER
+		   2 DATA_OFFSET Bits [31:0] + byte aligned offset where the required data structure starts. Bits 1:0 are zero
+		   3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
+		*/
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DRAW_INDIRECT\n");
+			return -EINVAL;
+		}
+
+		if (idx_value + size > track->indirect_draw_buffer_size) {
+			dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
+				idx_value, size, track->indirect_draw_buffer_size);
+			return -EINVAL;
+		}
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DISPATCH_DIRECT:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DISPATCH_DIRECT\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DISPATCH_INDIRECT:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DISPATCH_INDIRECT\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad DISPATCH_INDIRECT\n");
+			return -EINVAL;
+		}
+		ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_WAIT_REG_MEM:
+		if (pkt->count != 5) {
+			DRM_ERROR("bad WAIT_REG_MEM\n");
+			return -EINVAL;
+		}
+		/* bit 4 is reg (0) or mem (1) */
+		if (idx_value & 0x10) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad WAIT_REG_MEM\n");
+				return -EINVAL;
+			}
+
+			offset = reloc->gpu_offset +
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else if (idx_value & 0x100) {
+			DRM_ERROR("cannot use PFP on REG wait\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size, info;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		info = radeon_get_ib_value(p, idx+1);
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if (size % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			/* GDS is ok */
+			if (((info & 0x60000000) >> 29) != 1) {
+				DRM_ERROR("CP DMA SAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			if (((info & 0x60000000) >> 29) == 0) {
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad CP DMA SRC\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx) +
+					((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+				offset = reloc->gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+						 tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx] = offset;
+				ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+			} else if (((info & 0x60000000) >> 29) != 2) {
+				DRM_ERROR("bad CP DMA SRC_SEL\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			/* GDS is ok */
+			if (((info & 0x00300000) >> 20) != 1) {
+				DRM_ERROR("CP DMA DAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			if (((info & 0x00300000) >> 20) == 0) {
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad CP DMA DST\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx+2) +
+					((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+				offset = reloc->gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+						 tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx+2] = offset;
+				ib[idx+3] = upper_32_bits(offset) & 0xff;
+			} else {
+				DRM_ERROR("bad CP DMA DST_SEL\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	}
+	case PACKET3_PFP_SYNC_ME:
+		if (pkt->count) {
+			DRM_ERROR("bad PFP_SYNC_ME\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SURFACE_SYNC:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad SURFACE_SYNC\n");
+			return -EINVAL;
+		}
+		/* 0xffffffff/0x0 is flush all cache flag */
+		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+		    radeon_get_ib_value(p, idx + 2) != 0) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad SURFACE_SYNC\n");
+				return -EINVAL;
+			}
+			ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_EVENT_WRITE:
+		if (pkt->count != 2 && pkt->count != 0) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad EVENT_WRITE\n");
+				return -EINVAL;
+			}
+			offset = reloc->gpu_offset +
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = offset & 0xfffffff8;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_EVENT_WRITE_EOP:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_EVENT_WRITE_EOS:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
+			if (evergreen_is_safe_reg(p, reg))
+				continue;
+			r = evergreen_cs_handle_reg(p, reg, idx);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_CONTEXT_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+			return -EINVAL;
+		}
+		for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
+			if (evergreen_is_safe_reg(p, reg))
+				continue;
+			r = evergreen_cs_handle_reg(p, reg, idx);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_RESOURCE:
+		if (pkt->count % 8) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_RESOURCE_START) ||
+		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
+		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < (pkt->count / 8); i++) {
+			struct radeon_bo *texture, *mipmap;
+			u32 toffset, moffset;
+			u32 size, offset, mip_address, tex_dim;
+
+			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+			case SQ_TEX_VTX_VALID_TEXTURE:
+				/* tex base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE (tex)\n");
+					return -EINVAL;
+				}
+				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+					ib[idx+1+(i*8)+1] |=
+						TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+					if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+						unsigned bankw, bankh, mtaspect, tile_split;
+
+						evergreen_tiling_fields(reloc->tiling_flags,
+									&bankw, &bankh, &mtaspect,
+									&tile_split);
+						ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
+						ib[idx+1+(i*8)+7] |=
+							TEX_BANK_WIDTH(bankw) |
+							TEX_BANK_HEIGHT(bankh) |
+							MACRO_TILE_ASPECT(mtaspect) |
+							TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+					}
+				}
+				texture = reloc->robj;
+				toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+
+				/* tex mip base */
+				tex_dim = ib[idx+1+(i*8)+0] & 0x7;
+				mip_address = ib[idx+1+(i*8)+3];
+
+				if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
+				    !mip_address &&
+				    !radeon_cs_packet_next_is_pkt3_nop(p)) {
+					/* MIP_ADDRESS should point to FMASK for an MSAA texture.
+					 * It should be 0 if FMASK is disabled. */
+					moffset = 0;
+					mipmap = NULL;
+				} else {
+					r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+					if (r) {
+						DRM_ERROR("bad SET_RESOURCE (tex)\n");
+						return -EINVAL;
+					}
+					moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+					mipmap = reloc->robj;
+				}
+
+				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
+				if (r)
+					return r;
+				ib[idx+1+(i*8)+2] += toffset;
+				ib[idx+1+(i*8)+3] += moffset;
+				break;
+			case SQ_TEX_VTX_VALID_BUFFER:
+			{
+				uint64_t offset64;
+				/* vtx base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+					return -EINVAL;
+				}
+				offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
+				size = radeon_get_ib_value(p, idx+1+(i*8)+1);
+				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+					/* force size to size of the buffer */
+					dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+					ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
+				}
+
+				offset64 = reloc->gpu_offset + offset;
+				ib[idx+1+(i*8)+0] = offset64;
+				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+						    (upper_32_bits(offset64) & 0xff);
+				break;
+			}
+			case SQ_TEX_VTX_INVALID_TEXTURE:
+			case SQ_TEX_VTX_INVALID_BUFFER:
+			default:
+				DRM_ERROR("bad SET_RESOURCE\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_ALU_CONST:
+		/* XXX fix me ALU const buffers only */
+		break;
+	case PACKET3_SET_BOOL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
+		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+			DRM_ERROR("bad SET_BOOL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_LOOP_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
+		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+			DRM_ERROR("bad SET_LOOP_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CTL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
+		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+			DRM_ERROR("bad SET_CTL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_SAMPLER:
+		if (pkt->count % 3) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_SAMPLER_START) ||
+		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
+		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+			return -EINVAL;
+		}
+		/* Updating memory at DST_ADDRESS. */
+		if (idx_value & 0x1) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		/* Reading data from SRC_ADDRESS. */
+		if (((idx_value >> 1) & 0x3) == 2) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_MEM_WRITE:
+	{
+		u64 offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+			return -EINVAL;
+		}
+		offset = radeon_get_ib_value(p, idx+0);
+		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+		if (offset & 0x7) {
+			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+			return -EINVAL;
+		}
+		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+				  offset + 8, radeon_bo_size(reloc->robj));
+			return -EINVAL;
+		}
+		offset += reloc->gpu_offset;
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+		break;
+	}
+	case PACKET3_COPY_DW:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad COPY_DW (invalid count)\n");
+			return -EINVAL;
+		}
+		if (idx_value & 0x1) {
+			u64 offset;
+			/* SRC is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* SRC is a reg. */
+			reg = radeon_get_ib_value(p, idx+1) << 2;
+			if (!evergreen_is_safe_reg(p, reg)) {
+				dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+					 reg, idx + 1);
+				return -EINVAL;
+			}
+		}
+		if (idx_value & 0x2) {
+			u64 offset;
+			/* DST is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* DST is a reg. */
+			reg = radeon_get_ib_value(p, idx+3) << 2;
+			if (!evergreen_is_safe_reg(p, reg)) {
+				dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+					 reg, idx + 3);
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_APPEND_CNT:
+	{
+		uint32_t areg;
+		uint32_t allowed_reg_base;
+		uint32_t source_sel;
+		if (pkt->count != 2) {
+			DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+			return -EINVAL;
+		}
+
+		allowed_reg_base = GDS_APPEND_COUNT_0;
+		allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
+		allowed_reg_base >>= 2;
+
+		areg = idx_value >> 16;
+		if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
+			dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
+				 areg, idx);
+			return -EINVAL;
+		}
+
+		source_sel = G_PACKET3_SET_APPEND_CNT_SRC_SELECT(idx_value);
+		if (source_sel == PACKET3_SAC_SRC_SEL_MEM) {
+			uint64_t offset;
+			uint32_t swap;
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx + 1);
+			swap = offset & 0x3;
+			offset &= ~0x3;
+
+			offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
+
+			offset += reloc->gpu_offset;
+			ib[idx+1] = (offset & 0xfffffffc) | swap;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
+			return -EINVAL;
+		}
+		break;
+	}
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int evergreen_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct evergreen_cs_track *track;
+	u32 tmp;
+	int r;
+
+	if (p->track == NULL) {
+		/* initialize tracker, we are in kms */
+		track = kzalloc(sizeof(*track), GFP_KERNEL);
+		if (track == NULL)
+			return -ENOMEM;
+		evergreen_cs_track_init(track);
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			tmp = p->rdev->config.cayman.tile_config;
+			track->reg_safe_bm = cayman_reg_safe_bm;
+		} else {
+			tmp = p->rdev->config.evergreen.tile_config;
+			track->reg_safe_bm = evergreen_reg_safe_bm;
+		}
+		BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
+		BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
+		switch (tmp & 0xf) {
+		case 0:
+			track->npipes = 1;
+			break;
+		case 1:
+		default:
+			track->npipes = 2;
+			break;
+		case 2:
+			track->npipes = 4;
+			break;
+		case 3:
+			track->npipes = 8;
+			break;
+		}
+
+		switch ((tmp & 0xf0) >> 4) {
+		case 0:
+			track->nbanks = 4;
+			break;
+		case 1:
+		default:
+			track->nbanks = 8;
+			break;
+		case 2:
+			track->nbanks = 16;
+			break;
+		}
+
+		switch ((tmp & 0xf00) >> 8) {
+		case 0:
+			track->group_size = 256;
+			break;
+		case 1:
+		default:
+			track->group_size = 512;
+			break;
+		}
+
+		switch ((tmp & 0xf000) >> 12) {
+		case 0:
+			track->row_size = 1;
+			break;
+		case 1:
+		default:
+			track->row_size = 2;
+			break;
+		case 2:
+			track->row_size = 4;
+			break;
+		}
+
+		p->track = track;
+	}
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = evergreen_cs_parse_packet0(p, &pkt);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = evergreen_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			kfree(p->track);
+			p->track = NULL;
+			return -EINVAL;
+		}
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunk_ib->length_dw);
+#if 0
+	for (r = 0; r < p->ib.length_dw; r++) {
+		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	kfree(p->track);
+	p->track = NULL;
+	return 0;
+}
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+	struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
+	u32 header, cmd, count, sub_cmd;
+	uint32_t *ib = p->ib.ptr;
+	u32 idx;
+	u64 src_offset, dst_offset, dst2_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		sub_cmd = GET_DMA_SUB_CMD(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			switch (sub_cmd) {
+			/* tiled */
+			case 8:
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				p->idx += count + 7;
+				break;
+			/* linear */
+			case 0:
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				p->idx += count + 3;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+				return -EINVAL;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+					 dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			switch (sub_cmd) {
+			/* Copy L2L, DW aligned */
+			case 0x00:
+				/* L2L, dw */
+				src_offset = radeon_get_ib_value(p, idx+2);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+				p->idx += 5;
+				break;
+			/* Copy L2T/T2L */
+			case 0x08:
+				/* detile bit */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					/* tiled src, linear dst */
+					src_offset = radeon_get_ib_value(p, idx+1);
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+
+					dst_offset = radeon_get_ib_value(p, idx + 7);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = radeon_get_ib_value(p, idx+7);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				}
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				p->idx += 9;
+				break;
+			/* Copy L2L, byte aligned */
+			case 0x40:
+				/* L2L, byte */
+				src_offset = radeon_get_ib_value(p, idx+2);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+				if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+							src_offset + count, radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+							dst_offset + count, radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
+				ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
+				ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+				p->idx += 5;
+				break;
+			/* Copy L2L, partial */
+			case 0x41:
+				/* L2L, partial */
+				if (p->family < CHIP_CAYMAN) {
+					DRM_ERROR("L2L Partial is cayman only !\n");
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
+				ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+				ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
+				ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+
+				p->idx += 9;
+				break;
+			/* Copy L2L, DW aligned, broadcast */
+			case 0x44:
+				/* L2L, dw, broadcast */
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+				src_offset = radeon_get_ib_value(p, idx+3);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
+				ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+				p->idx += 7;
+				break;
+			/* Copy L2T Frame to Field */
+			case 0x48:
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset <<= 8;
+				src_offset = radeon_get_ib_value(p, idx+8);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+				ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+				p->idx += 10;
+				break;
+			/* Copy L2T/T2L, partial */
+			case 0x49:
+				/* L2T, T2L partial */
+				if (p->family < CHIP_CAYMAN) {
+					DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+					return -EINVAL;
+				}
+				/* detile bit */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					/* tiled src, linear dst */
+					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+
+					ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+
+					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				}
+				p->idx += 12;
+				break;
+			/* Copy L2T broadcast */
+			case 0x4b:
+				/* L2T, broadcast */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset <<= 8;
+				src_offset = radeon_get_ib_value(p, idx+8);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+				ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+				p->idx += 10;
+				break;
+			/* Copy L2T/T2L (tile units) */
+			case 0x4c:
+				/* L2T, T2L */
+				/* detile bit */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					/* tiled src, linear dst */
+					src_offset = radeon_get_ib_value(p, idx+1);
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+
+					dst_offset = radeon_get_ib_value(p, idx+7);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = radeon_get_ib_value(p, idx+7);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				}
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				p->idx += 9;
+				break;
+			/* Copy T2T, partial (tile units) */
+			case 0x4d:
+				/* T2T partial */
+				if (p->family < CHIP_CAYMAN) {
+					DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+				ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
+				p->idx += 13;
+				break;
+			/* Copy L2T broadcast (tile units) */
+			case 0x4f:
+				/* L2T, broadcast */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset <<= 8;
+				src_offset = radeon_get_ib_value(p, idx+8);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
+				ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+				p->idx += 10;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+				return -EINVAL;
+			}
+			dst_offset = radeon_get_ib_value(p, idx+1);
+			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+					 dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunk_ib->length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}
+
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+	/* context regs are fine */
+	if (reg >= 0x28000)
+		return true;
+
+	/* check config regs */
+	switch (reg) {
+	case WAIT_UNTIL:
+	case GRBM_GFX_INDEX:
+	case CP_STRMOUT_CNTL:
+	case CP_COHER_CNTL:
+	case CP_COHER_SIZE:
+	case VGT_VTX_VECT_EJECT_REG:
+	case VGT_CACHE_INVALIDATION:
+	case VGT_GS_VERTEX_REUSE:
+	case VGT_PRIMITIVE_TYPE:
+	case VGT_INDEX_TYPE:
+	case VGT_NUM_INDICES:
+	case VGT_NUM_INSTANCES:
+	case VGT_COMPUTE_DIM_X:
+	case VGT_COMPUTE_DIM_Y:
+	case VGT_COMPUTE_DIM_Z:
+	case VGT_COMPUTE_START_X:
+	case VGT_COMPUTE_START_Y:
+	case VGT_COMPUTE_START_Z:
+	case VGT_COMPUTE_INDEX:
+	case VGT_COMPUTE_THREAD_GROUP_SIZE:
+	case VGT_HS_OFFCHIP_PARAM:
+	case PA_CL_ENHANCE:
+	case PA_SU_LINE_STIPPLE_VALUE:
+	case PA_SC_LINE_STIPPLE_STATE:
+	case PA_SC_ENHANCE:
+	case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+	case SQ_DYN_GPR_SIMD_LOCK_EN:
+	case SQ_CONFIG:
+	case SQ_GPR_RESOURCE_MGMT_1:
+	case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+	case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+	case SQ_CONST_MEM_BASE:
+	case SQ_STATIC_THREAD_MGMT_1:
+	case SQ_STATIC_THREAD_MGMT_2:
+	case SQ_STATIC_THREAD_MGMT_3:
+	case SPI_CONFIG_CNTL:
+	case SPI_CONFIG_CNTL_1:
+	case TA_CNTL_AUX:
+	case DB_DEBUG:
+	case DB_DEBUG2:
+	case DB_DEBUG3:
+	case DB_DEBUG4:
+	case DB_WATERMARKS:
+	case TD_PS_BORDER_COLOR_INDEX:
+	case TD_PS_BORDER_COLOR_RED:
+	case TD_PS_BORDER_COLOR_GREEN:
+	case TD_PS_BORDER_COLOR_BLUE:
+	case TD_PS_BORDER_COLOR_ALPHA:
+	case TD_VS_BORDER_COLOR_INDEX:
+	case TD_VS_BORDER_COLOR_RED:
+	case TD_VS_BORDER_COLOR_GREEN:
+	case TD_VS_BORDER_COLOR_BLUE:
+	case TD_VS_BORDER_COLOR_ALPHA:
+	case TD_GS_BORDER_COLOR_INDEX:
+	case TD_GS_BORDER_COLOR_RED:
+	case TD_GS_BORDER_COLOR_GREEN:
+	case TD_GS_BORDER_COLOR_BLUE:
+	case TD_GS_BORDER_COLOR_ALPHA:
+	case TD_HS_BORDER_COLOR_INDEX:
+	case TD_HS_BORDER_COLOR_RED:
+	case TD_HS_BORDER_COLOR_GREEN:
+	case TD_HS_BORDER_COLOR_BLUE:
+	case TD_HS_BORDER_COLOR_ALPHA:
+	case TD_LS_BORDER_COLOR_INDEX:
+	case TD_LS_BORDER_COLOR_RED:
+	case TD_LS_BORDER_COLOR_GREEN:
+	case TD_LS_BORDER_COLOR_BLUE:
+	case TD_LS_BORDER_COLOR_ALPHA:
+	case TD_CS_BORDER_COLOR_INDEX:
+	case TD_CS_BORDER_COLOR_RED:
+	case TD_CS_BORDER_COLOR_GREEN:
+	case TD_CS_BORDER_COLOR_BLUE:
+	case TD_CS_BORDER_COLOR_ALPHA:
+	case SQ_ESGS_RING_SIZE:
+	case SQ_GSVS_RING_SIZE:
+	case SQ_ESTMP_RING_SIZE:
+	case SQ_GSTMP_RING_SIZE:
+	case SQ_HSTMP_RING_SIZE:
+	case SQ_LSTMP_RING_SIZE:
+	case SQ_PSTMP_RING_SIZE:
+	case SQ_VSTMP_RING_SIZE:
+	case SQ_ESGS_RING_ITEMSIZE:
+	case SQ_ESTMP_RING_ITEMSIZE:
+	case SQ_GSTMP_RING_ITEMSIZE:
+	case SQ_GSVS_RING_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE_1:
+	case SQ_GS_VERT_ITEMSIZE_2:
+	case SQ_GS_VERT_ITEMSIZE_3:
+	case SQ_GSVS_RING_OFFSET_1:
+	case SQ_GSVS_RING_OFFSET_2:
+	case SQ_GSVS_RING_OFFSET_3:
+	case SQ_HSTMP_RING_ITEMSIZE:
+	case SQ_LSTMP_RING_ITEMSIZE:
+	case SQ_PSTMP_RING_ITEMSIZE:
+	case SQ_VSTMP_RING_ITEMSIZE:
+	case VGT_TF_RING_SIZE:
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_HSTMP_RING_BASE:
+	case SQ_LSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+	case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+	case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+		return true;
+	default:
+		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+		return false;
+	}
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+				      u32 *ib, struct radeon_cs_packet *pkt)
+{
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, end_reg, reg, i;
+	u32 command, info;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+		break;
+	case PACKET3_SET_BASE:
+		if (idx_value != 1) {
+			DRM_ERROR("bad SET_BASE");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_INDEX_BUFFER_SIZE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_MODE_CONTROL:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_DRAW_INDIRECT:
+	case PACKET3_DRAW_INDEX_INDIRECT:
+	case PACKET3_INDEX_BASE:
+	case PACKET3_DRAW_INDEX_2:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_DRAW_INDEX_OFFSET:
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_DRAW_INDEX:
+	case PACKET3_DRAW_INDEX_AUTO:
+	case PACKET3_DRAW_INDEX_IMMD:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+	case PACKET3_MPEG_INDEX:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_PFP_SYNC_ME:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_BOOL_CONST:
+	case PACKET3_SET_LOOP_CONST:
+	case PACKET3_SET_RESOURCE:
+	case PACKET3_SET_SAMPLER:
+	case PACKET3_SET_CTL_CONST:
+	case PACKET3_SET_RESOURCE_OFFSET:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_RESOURCE_INDIRECT:
+	case CAYMAN_PACKET3_DEALLOC_STATE:
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		command = ib[idx + 4];
+		info = ib[idx + 1];
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if ((command & 0x1fffff) % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			if (((info & 0x60000000) >> 29) == 0) {
+				start_reg = idx_value << 2;
+				if (command & PACKET3_CP_DMA_CMD_SAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad SRC register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			if (((info & 0x00300000) >> 20) == 0) {
+				start_reg = ib[idx + 2];
+				if (command & PACKET3_CP_DMA_CMD_DAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad DST register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		break;
+	case PACKET3_SET_APPEND_CNT: {
+		uint32_t areg;
+		uint32_t allowed_reg_base;
+
+		if (pkt->count != 2) {
+			DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+			return -EINVAL;
+		}
+
+		allowed_reg_base = GDS_APPEND_COUNT_0;
+		allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
+		allowed_reg_base >>= 2;
+
+		areg = idx_value >> 16;
+		if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
+			DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
+				  areg, idx);
+			return -EINVAL;
+		}
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	int ret = 0;
+	u32 idx = 0;
+	struct radeon_cs_packet pkt;
+
+	do {
+		pkt.idx = idx;
+		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+		pkt.one_reg_wr = 0;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			dev_err(rdev->dev, "Packet0 not allowed!\n");
+			ret = -EINVAL;
+			break;
+		case RADEON_PACKET_TYPE2:
+			idx += 1;
+			break;
+		case RADEON_PACKET_TYPE3:
+			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+			ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+			idx += pkt.count + 2;
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+			ret = -EINVAL;
+			break;
+		}
+		if (ret)
+			break;
+	} while (idx < ib->length_dw);
+
+	return ret;
+}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib:	radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	u32 idx = 0;
+	u32 header, cmd, count, sub_cmd;
+
+	do {
+		header = ib->ptr[idx];
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		sub_cmd = GET_DMA_SUB_CMD(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			switch (sub_cmd) {
+			/* tiled */
+			case 8:
+				idx += count + 7;
+				break;
+			/* linear */
+			case 0:
+				idx += count + 3;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			switch (sub_cmd) {
+			/* Copy L2L, DW aligned */
+			case 0x00:
+				idx += 5;
+				break;
+			/* Copy L2T/T2L */
+			case 0x08:
+				idx += 9;
+				break;
+			/* Copy L2L, byte aligned */
+			case 0x40:
+				idx += 5;
+				break;
+			/* Copy L2L, partial */
+			case 0x41:
+				idx += 9;
+				break;
+			/* Copy L2L, DW aligned, broadcast */
+			case 0x44:
+				idx += 7;
+				break;
+			/* Copy L2T Frame to Field */
+			case 0x48:
+				idx += 10;
+				break;
+			/* Copy L2T/T2L, partial */
+			case 0x49:
+				idx += 12;
+				break;
+			/* Copy L2T broadcast */
+			case 0x4b:
+				idx += 10;
+				break;
+			/* Copy L2T/T2L (tile units) */
+			case 0x4c:
+				idx += 9;
+				break;
+			/* Copy T2T, partial (tile units) */
+			case 0x4d:
+				idx += 13;
+				break;
+			/* Copy L2T broadcast (tile units) */
+			case 0x4f:
+				idx += 10;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (idx < ib->length_dw);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
new file mode 100644
index 0000000..96535aa
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, fence->seq);
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
+	/* flush HDP */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
+					uint64_t src_offset,
+					uint64_t dst_offset,
+					unsigned num_gpu_pages,
+					struct reservation_object *resv)
+{
+	struct radeon_fence *fence;
+	struct radeon_sync sync;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	radeon_sync_create(&sync);
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_rings(rdev, &sync, ring->idx);
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFFF)
+			cur_size_in_dw = 0xFFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_sync_free(rdev, &sync, fence);
+
+	return fence;
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & RADEON_RESET_DMA)) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 0000000..f766c96
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ *          Rafał Miłecki
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/* enable the audio stream */
+void dce4_audio_enable(struct radeon_device *rdev,
+			      struct r600_audio_pin *pin,
+			      u8 enable_mask)
+{
+	u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
+
+	if (!pin)
+		return;
+
+	if (enable_mask) {
+		tmp |= AUDIO_ENABLED;
+		if (enable_mask & 1)
+			tmp |= PIN0_AUDIO_ENABLED;
+		if (enable_mask & 2)
+			tmp |= PIN1_AUDIO_ENABLED;
+		if (enable_mask & 4)
+			tmp |= PIN2_AUDIO_ENABLED;
+		if (enable_mask & 8)
+			tmp |= PIN3_AUDIO_ENABLED;
+	} else {
+		tmp &= ~(AUDIO_ENABLED |
+			 PIN0_AUDIO_ENABLED |
+			 PIN1_AUDIO_ENABLED |
+			 PIN2_AUDIO_ENABLED |
+			 PIN3_AUDIO_ENABLED);
+	}
+
+	WREG32(AZ_HOT_PLUG_CONTROL, tmp);
+}
+
+void evergreen_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+	const struct radeon_hdmi_acr *acr)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int bpc = 8;
+
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		bpc = radeon_crtc->bpc;
+	}
+
+	if (bpc > 8)
+		WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+			HDMI_ACR_AUTO_SEND);	/* allow hw to sent ACR packets when required */
+	else
+		WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+			HDMI_ACR_SOURCE |		/* select SW CTS value */
+			HDMI_ACR_AUTO_SEND);	/* allow hw to sent ACR packets when required */
+
+	WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr->cts_32khz));
+	WREG32(HDMI_ACR_32_1 + offset, acr->n_32khz);
+
+	WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr->cts_44_1khz));
+	WREG32(HDMI_ACR_44_1 + offset, acr->n_44_1khz);
+
+	WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr->cts_48khz));
+	WREG32(HDMI_ACR_48_1 + offset, acr->n_48khz);
+}
+
+void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+		struct drm_connector *connector, struct drm_display_mode *mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	u32 tmp = 0;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (connector->latency_present[1])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+				AUDIO_LIPSYNC(connector->audio_latency[1]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	} else {
+		if (connector->latency_present[0])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+				AUDIO_LIPSYNC(connector->audio_latency[0]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	}
+	WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
+void dce4_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+	u8 *sadb, int sad_count)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	u32 tmp;
+
+	/* program the speaker allocation */
+	tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+	tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+	/* set HDMI mode */
+	tmp |= HDMI_CONNECTION;
+	if (sad_count)
+		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+	else
+		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+	WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void dce4_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+	u8 *sadb, int sad_count)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	u32 tmp;
+
+	/* program the speaker allocation */
+	tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+	tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
+	/* set DP mode */
+	tmp |= DP_CONNECTION;
+	if (sad_count)
+		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+	else
+		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+	WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+}
+
+void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
+	struct cea_sad *sads, int sad_count)
+{
+	int i;
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	static const u16 eld_reg_to_type[][2] = {
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+	};
+
+	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+		u32 value = 0;
+		u8 stereo_freqs = 0;
+		int max_channels = -1;
+		int j;
+
+		for (j = 0; j < sad_count; j++) {
+			struct cea_sad *sad = &sads[j];
+
+			if (sad->format == eld_reg_to_type[i][1]) {
+				if (sad->channels > max_channels) {
+					value = MAX_CHANNELS(sad->channels) |
+						DESCRIPTOR_BYTE_2(sad->byte2) |
+						SUPPORTED_FREQUENCIES(sad->freq);
+					max_channels = sad->channels;
+				}
+
+				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+					stereo_freqs |= sad->freq;
+				else
+					break;
+			}
+		}
+
+		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
+		WREG32_ENDPOINT(0, eld_reg_to_type[i][0], value);
+	}
+}
+
+/*
+ * build a AVI Info Frame
+ */
+void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
+			      unsigned char *buffer, size_t size)
+{
+	uint8_t *frame = buffer + 3;
+
+	WREG32(AFMT_AVI_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(AFMT_AVI_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+	WREG32(AFMT_AVI_INFO2 + offset,
+		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+	WREG32(AFMT_AVI_INFO3 + offset,
+		frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+
+	WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+		 HDMI_AVI_INFO_LINE(2),	/* anything other than 0 */
+		 ~HDMI_AVI_INFO_LINE_MASK);
+}
+
+void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock)
+{
+	unsigned int max_ratio = clock / 24000;
+	u32 dto_phase;
+	u32 wallclock_ratio;
+	u32 value;
+
+	if (max_ratio >= 8) {
+		dto_phase = 192 * 1000;
+		wallclock_ratio = 3;
+	} else if (max_ratio >= 4) {
+		dto_phase = 96 * 1000;
+		wallclock_ratio = 2;
+	} else if (max_ratio >= 2) {
+		dto_phase = 48 * 1000;
+		wallclock_ratio = 1;
+	} else {
+		dto_phase = 24 * 1000;
+		wallclock_ratio = 0;
+	}
+
+	value = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+	value |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+	value &= ~DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+	WREG32(DCCG_AUDIO_DTO0_CNTL, value);
+
+	/* Two dtos; generally use dto0 for HDMI */
+	value = 0;
+
+	if (crtc)
+		value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
+
+	WREG32(DCCG_AUDIO_DTO_SOURCE, value);
+
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+	WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+}
+
+void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+			   struct radeon_crtc *crtc, unsigned int clock)
+{
+	u32 value;
+
+	value = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+	value |= DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+	WREG32(DCCG_AUDIO_DTO1_CNTL, value);
+
+	/* Two dtos; generally use dto1 for DP */
+	value = 0;
+	value |= DCCG_AUDIO_DTO_SEL;
+
+	if (crtc)
+		value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
+
+	WREG32(DCCG_AUDIO_DTO_SOURCE, value);
+
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	if (ASIC_IS_DCE41(rdev)) {
+		unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
+			DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+			DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+		div = radeon_audio_decode_dfs_div(div);
+
+		if (div)
+			clock = 100 * clock / div;
+	}
+
+	WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+	WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+}
+
+void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+		HDMI_NULL_SEND |	/* send null packets when required */
+		HDMI_GC_SEND |		/* send general control packets */
+		HDMI_GC_CONT);		/* send general control packets every frame */
+}
+
+void dce4_hdmi_set_color_depth(struct drm_encoder *encoder, u32 offset, int bpc)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	uint32_t val;
+
+	val = RREG32(HDMI_CONTROL + offset);
+	val &= ~HDMI_DEEP_COLOR_ENABLE;
+	val &= ~HDMI_DEEP_COLOR_DEPTH_MASK;
+
+	switch (bpc) {
+		case 0:
+		case 6:
+		case 8:
+		case 16:
+		default:
+			DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
+					 connector->name, bpc);
+			break;
+		case 10:
+			val |= HDMI_DEEP_COLOR_ENABLE;
+			val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR);
+			DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
+					 connector->name);
+			break;
+		case 12:
+			val |= HDMI_DEEP_COLOR_ENABLE;
+			val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR);
+			DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
+					 connector->name);
+			break;
+	}
+
+	WREG32(HDMI_CONTROL + offset, val);
+}
+
+void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+		AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+	WREG32(AFMT_60958_0 + offset,
+		AFMT_60958_CS_CHANNEL_NUMBER_L(1));
+
+	WREG32(AFMT_60958_1 + offset,
+		AFMT_60958_CS_CHANNEL_NUMBER_R(2));
+
+	WREG32(AFMT_60958_2 + offset,
+		AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
+		AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
+		AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
+		AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
+		AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
+		AFMT_60958_CS_CHANNEL_NUMBER_7(8));
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
+		AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+
+	WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+	       HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+	       HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+	/* allow 60958 channel status and send audio packets fields to be updated */
+	WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+		  AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
+}
+
+
+void dce4_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (mute)
+		WREG32_OR(HDMI_GC + offset, HDMI_GC_AVMUTE);
+	else
+		WREG32_AND(HDMI_GC + offset, ~HDMI_GC_AVMUTE);
+}
+
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (enable) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+		if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+			WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+			       HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+			       HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
+			       HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+			       HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+			WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+				  AFMT_AUDIO_SAMPLE_SEND);
+		} else {
+			WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+			       HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+			       HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+			WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+				   ~AFMT_AUDIO_SAMPLE_SEND);
+		}
+	} else {
+		WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+			   ~AFMT_AUDIO_SAMPLE_SEND);
+		WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
+	}
+
+	dig->afmt->enabled = enable;
+
+	DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		  enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
+
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (enable && connector &&
+	    drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector;
+		uint32_t val;
+
+		WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+			  AFMT_AUDIO_SAMPLE_SEND);
+
+		WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+		       EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+
+		if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
+			dig_connector = radeon_connector->con_priv;
+			val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
+			val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
+
+			if (dig_connector->dp_clock == 162000)
+				val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(3);
+			else
+				val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
+
+			WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
+		}
+
+		WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
+			EVERGREEN_DP_SEC_ASP_ENABLE |		/* Audio packet transmission */
+			EVERGREEN_DP_SEC_ATP_ENABLE |		/* Audio timestamp packet transmission */
+			EVERGREEN_DP_SEC_AIP_ENABLE |		/* Audio infoframe packet transmission */
+			EVERGREEN_DP_SEC_STREAM_ENABLE);	/* Master enable for secondary stream engine */
+	} else {
+		WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
+		WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+			   ~AFMT_AUDIO_SAMPLE_SEND);
+	}
+
+	dig->afmt->enabled = enable;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 0000000..b436bad
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __EVERGREEN_REG_H__
+#define __EVERGREEN_REG_H__
+
+/* trinity */
+#define TN_SMC_IND_INDEX_0                              0x200
+#define TN_SMC_IND_DATA_0                               0x204
+
+/* evergreen */
+#define EVERGREEN_PIF_PHY0_INDEX                        0x8
+#define EVERGREEN_PIF_PHY0_DATA                         0xc
+#define EVERGREEN_PIF_PHY1_INDEX                        0x10
+#define EVERGREEN_PIF_PHY1_DATA                         0x14
+#define EVERGREEN_MM_INDEX_HI                           0x18
+
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0x310
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0x324
+#define EVERGREEN_D3VGA_CONTROL                         0x3e0
+#define EVERGREEN_D4VGA_CONTROL                         0x3e4
+#define EVERGREEN_D5VGA_CONTROL                         0x3e8
+#define EVERGREEN_D6VGA_CONTROL                         0x3ec
+
+#define EVERGREEN_P1PLL_SS_CNTL                         0x414
+#define EVERGREEN_P2PLL_SS_CNTL                         0x454
+#       define EVERGREEN_PxPLL_SS_EN                    (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL			0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV			0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK			0x5bc
+
+#define EVERGREEN_CG_IND_ADDR                           0x8f8
+#define EVERGREEN_CG_IND_DATA                           0x8fc
+
+#define EVERGREEN_AUDIO_ENABLE				0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID			0x5ec0
+
+/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+#define EVERGREEN_GRPH_ENABLE                           0x6800
+#define EVERGREEN_GRPH_CONTROL                          0x6804
+#       define EVERGREEN_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_DEPTH_8BPP                0
+#       define EVERGREEN_GRPH_DEPTH_16BPP               1
+#       define EVERGREEN_GRPH_DEPTH_32BPP               2
+#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define EVERGREEN_ADDR_SURF_2_BANK               0
+#       define EVERGREEN_ADDR_SURF_4_BANK               1
+#       define EVERGREEN_ADDR_SURF_8_BANK               2
+#       define EVERGREEN_ADDR_SURF_16_BANK              3
+#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
+#       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define EVERGREEN_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define EVERGREEN_GRPH_FORMAT_ARGB1555           0
+#       define EVERGREEN_GRPH_FORMAT_ARGB565            1
+#       define EVERGREEN_GRPH_FORMAT_ARGB4444           2
+#       define EVERGREEN_GRPH_FORMAT_AI88               3
+#       define EVERGREEN_GRPH_FORMAT_MONO16             4
+#       define EVERGREEN_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define EVERGREEN_GRPH_FORMAT_ARGB8888           0
+#       define EVERGREEN_GRPH_FORMAT_ARGB2101010        1
+#       define EVERGREEN_GRPH_FORMAT_32BPP_DIG          2
+#       define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010     3
+#       define EVERGREEN_GRPH_FORMAT_BGRA1010102        4
+#       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
+#       define EVERGREEN_GRPH_FORMAT_RGB111110          6
+#       define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL         0x6808
+#       define EVERGREEN_LUT_10BIT_BYPASS_EN            (1 << 8)
+#define EVERGREEN_GRPH_SWAP_CONTROL                     0x680c
+#       define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_ENDIAN_NONE               0
+#       define EVERGREEN_GRPH_ENDIAN_8IN16              1
+#       define EVERGREEN_GRPH_ENDIAN_8IN32              2
+#       define EVERGREEN_GRPH_ENDIAN_8IN64              3
+#       define EVERGREEN_GRPH_RED_CROSSBAR(x)           (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_RED_SEL_R                 0
+#       define EVERGREEN_GRPH_RED_SEL_G                 1
+#       define EVERGREEN_GRPH_RED_SEL_B                 2
+#       define EVERGREEN_GRPH_RED_SEL_A                 3
+#       define EVERGREEN_GRPH_GREEN_CROSSBAR(x)         (((x) & 0x3) << 6)
+#       define EVERGREEN_GRPH_GREEN_SEL_G               0
+#       define EVERGREEN_GRPH_GREEN_SEL_B               1
+#       define EVERGREEN_GRPH_GREEN_SEL_A               2
+#       define EVERGREEN_GRPH_GREEN_SEL_R               3
+#       define EVERGREEN_GRPH_BLUE_CROSSBAR(x)          (((x) & 0x3) << 8)
+#       define EVERGREEN_GRPH_BLUE_SEL_B                0
+#       define EVERGREEN_GRPH_BLUE_SEL_A                1
+#       define EVERGREEN_GRPH_BLUE_SEL_R                2
+#       define EVERGREEN_GRPH_BLUE_SEL_G                3
+#       define EVERGREEN_GRPH_ALPHA_CROSSBAR(x)         (((x) & 0x3) << 10)
+#       define EVERGREEN_GRPH_ALPHA_SEL_A               0
+#       define EVERGREEN_GRPH_ALPHA_SEL_R               1
+#       define EVERGREEN_GRPH_ALPHA_SEL_G               2
+#       define EVERGREEN_GRPH_ALPHA_SEL_B               3
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x6810
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x6814
+#       define EVERGREEN_GRPH_DFQ_ENABLE                (1 << 0)
+#       define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK      0xffffff00
+#define EVERGREEN_GRPH_PITCH                            0x6818
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x681c
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x6820
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X                 0x6824
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y                 0x6828
+#define EVERGREEN_GRPH_X_START                          0x682c
+#define EVERGREEN_GRPH_Y_START                          0x6830
+#define EVERGREEN_GRPH_X_END                            0x6834
+#define EVERGREEN_GRPH_Y_END                            0x6838
+#define EVERGREEN_GRPH_UPDATE                           0x6844
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#       define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x6848
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL                           0x6998
+#       define EVERGREEN_CURSOR_EN                      (1 << 0)
+#       define EVERGREEN_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define EVERGREEN_CURSOR_MONO                    0
+#       define EVERGREEN_CURSOR_24_1                    1
+#       define EVERGREEN_CURSOR_24_8_PRE_MULT           2
+#       define EVERGREEN_CURSOR_24_8_UNPRE_MULT         3
+#       define EVERGREEN_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define EVERGREEN_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define EVERGREEN_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define EVERGREEN_CURSOR_URGENT_ALWAYS           0
+#       define EVERGREEN_CURSOR_URGENT_1_8              1
+#       define EVERGREEN_CURSOR_URGENT_1_4              2
+#       define EVERGREEN_CURSOR_URGENT_3_8              3
+#       define EVERGREEN_CURSOR_URGENT_1_2              4
+#define EVERGREEN_CUR_SURFACE_ADDRESS                   0x699c
+#       define EVERGREEN_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define EVERGREEN_CUR_SIZE                              0x69a0
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH              0x69a4
+#define EVERGREEN_CUR_POSITION                          0x69a8
+#define EVERGREEN_CUR_HOT_SPOT                          0x69ac
+#define EVERGREEN_CUR_COLOR1                            0x69b0
+#define EVERGREEN_CUR_COLOR2                            0x69b4
+#define EVERGREEN_CUR_UPDATE                            0x69b8
+#       define EVERGREEN_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define EVERGREEN_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define EVERGREEN_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
+#define EVERGREEN_DC_LUT_RW_MODE                        0x69e0
+#define EVERGREEN_DC_LUT_RW_INDEX                       0x69e4
+#define EVERGREEN_DC_LUT_SEQ_COLOR                      0x69e8
+#define EVERGREEN_DC_LUT_PWL_DATA                       0x69ec
+#define EVERGREEN_DC_LUT_30_COLOR                       0x69f0
+#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE              0x69f4
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK                  0x69f8
+#define EVERGREEN_DC_LUT_AUTOFILL                       0x69fc
+#define EVERGREEN_DC_LUT_CONTROL                        0x6a00
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE              0x6a04
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN             0x6a08
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED               0x6a0c
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE              0x6a10
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN             0x6a14
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED               0x6a18
+
+#define EVERGREEN_DATA_FORMAT                           0x6b00
+#       define EVERGREEN_INTERLEAVE_EN                  (1 << 0)
+#define EVERGREEN_DESKTOP_HEIGHT                        0x6b04
+#define EVERGREEN_VLINE_START_END                       0x6b08
+#define EVERGREEN_VLINE_STATUS                          0x6bb8
+#       define EVERGREEN_VLINE_STAT                     (1 << 12)
+
+#define EVERGREEN_VIEWPORT_START                        0x6d70
+#define EVERGREEN_VIEWPORT_SIZE                         0x6d74
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define EVERGREEN_CRTC0_REGISTER_OFFSET                 (0x6df0 - 0x6df0)
+#define EVERGREEN_CRTC1_REGISTER_OFFSET                 (0x79f0 - 0x6df0)
+#define EVERGREEN_CRTC2_REGISTER_OFFSET                 (0x105f0 - 0x6df0)
+#define EVERGREEN_CRTC3_REGISTER_OFFSET                 (0x111f0 - 0x6df0)
+#define EVERGREEN_CRTC4_REGISTER_OFFSET                 (0x11df0 - 0x6df0)
+#define EVERGREEN_CRTC5_REGISTER_OFFSET                 (0x129f0 - 0x6df0)
+
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x6e34
+#define EVERGREEN_CRTC_CONTROL                          0x6e70
+#       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x6e74
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#define EVERGREEN_CRTC_STATUS                           0x6e8c
+#       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x6ea0
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
+
+#define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
+#define EVERGREEN_DC_GPIO_HPD_A                         0x64b4
+#define EVERGREEN_DC_GPIO_HPD_EN                        0x64b8
+#define EVERGREEN_DC_GPIO_HPD_Y                         0x64bc
+
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE				0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET                 (0x7000  - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET                 (0x7C00  - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET                 (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET                 (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET                 (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET                 (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL                               0x7000
+#       define NI_DIG_FE_CNTL_SOURCE_SELECT(x)        ((x) & 0x3)
+#       define NI_DIG_FE_CNTL_SYMCLK_FE_ON            (1<<24)
+
+
+#define NI_DIG_BE_CNTL                    0x7140
+#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
+#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL                              0x7144
+#       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
+#       define NI_DIG_BE_EN_CNTL_SYMBCLK_ON           (1 << 8)
+#       define NI_DIG_BE_DPSST 0
+
+/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET                 (0x730C  - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET                 (0x7F0C  - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET                 (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET                 (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET                 (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET                 (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
+#       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
+#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO                         0x7310
+#       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
+#define EVERGREEN_DP_SEC_CNTL                           0x7280
+#       define EVERGREEN_DP_SEC_STREAM_ENABLE           (1 << 0)
+#       define EVERGREEN_DP_SEC_ASP_ENABLE              (1 << 4)
+#       define EVERGREEN_DP_SEC_ATP_ENABLE              (1 << 8)
+#       define EVERGREEN_DP_SEC_AIP_ENABLE              (1 << 12)
+#       define EVERGREEN_DP_SEC_GSP_ENABLE              (1 << 20)
+#       define EVERGREEN_DP_SEC_AVI_ENABLE              (1 << 24)
+#       define EVERGREEN_DP_SEC_MPG_ENABLE              (1 << 28)
+#define EVERGREEN_DP_SEC_TIMESTAMP                      0x72a4
+#       define EVERGREEN_DP_SEC_TIMESTAMP_MODE(x)       (((x) & 0x3) << 0)
+#define EVERGREEN_DP_SEC_AUD_N                          0x7294
+#       define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x)      (((x) & 0xf) << 24)
+#       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
+
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1            (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1                   0x6618
+#       define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE     (1 << 0)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
new file mode 100644
index 0000000..3a03ba3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __EVERGREEN_SMC_H__
+#define __EVERGREEN_SMC_H__
+
+#include "rv770_smc.h"
+
+#pragma pack(push, 1)
+
+#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
+
+struct SMC_Evergreen_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
+
+
+struct SMC_Evergreen_MCRegisterSet
+{
+    uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
+
+struct SMC_Evergreen_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_Evergreen_MCRegisterAddress     address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+    SMC_Evergreen_MCRegisterSet         data[5];
+};
+
+typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
+
+#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters   0x8
+#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable      0xC
+#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
+
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 0000000..f3d88ca
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,2628 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS           256
+#define EVERGREEN_MAX_TEMP_GPRS         16
+#define EVERGREEN_MAX_SH_THREADS        256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES  4096
+#define EVERGREEN_MAX_FRC_EOV_CNT       16384
+#define EVERGREEN_MAX_BACKENDS          8
+#define EVERGREEN_MAX_BACKENDS_MASK     0xFF
+#define EVERGREEN_MAX_SIMDS             16
+#define EVERGREEN_MAX_SIMDS_MASK        0xFFFF
+#define EVERGREEN_MAX_PIPES             8
+#define EVERGREEN_MAX_PIPES_MASK        0xFF
+#define EVERGREEN_MAX_LDS_NUM           0xFFFF
+
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN        0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN          0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN         0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN           0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN          0x02010002
+
+/* pm registers */
+#define	SMC_MSG						0x20c
+#define		HOST_SMC_MSG(x)				((x) << 0)
+#define		HOST_SMC_MSG_MASK			(0xff << 0)
+#define		HOST_SMC_MSG_SHIFT			0
+#define		HOST_SMC_RESP(x)			((x) << 8)
+#define		HOST_SMC_RESP_MASK			(0xff << 8)
+#define		HOST_SMC_RESP_SHIFT			8
+#define		SMC_HOST_MSG(x)				((x) << 16)
+#define		SMC_HOST_MSG_MASK			(0xff << 16)
+#define		SMC_HOST_MSG_SHIFT			16
+#define		SMC_HOST_RESP(x)			((x) << 24)
+#define		SMC_HOST_RESP_MASK			(0xff << 24)
+#define		SMC_HOST_RESP_SHIFT			24
+
+#define DCCG_DISP_SLOW_SELECT_REG                       0x4fc
+#define		DCCG_DISP1_SLOW_SELECT(x)		((x) << 0)
+#define		DCCG_DISP1_SLOW_SELECT_MASK		(7 << 0)
+#define		DCCG_DISP1_SLOW_SELECT_SHIFT		0
+#define		DCCG_DISP2_SLOW_SELECT(x)		((x) << 4)
+#define		DCCG_DISP2_SLOW_SELECT_MASK		(7 << 4)
+#define		DCCG_DISP2_SLOW_SELECT_SHIFT		4
+
+#define	CG_SPLL_FUNC_CNTL				0x600
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_PDIV_A(x)				((x) << 20)
+#define		SPLL_PDIV_A_MASK			(0x7f << 20)
+#define	CG_SPLL_FUNC_CNTL_2				0x604
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SCLK_MUX_UPDATE				(1 << 26)
+#define	CG_SPLL_FUNC_CNTL_3				0x608
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_STATUS					0x60c
+#define		SPLL_CHG_STATUS				(1 << 1)
+
+#define MPLL_CNTL_MODE                                  0x61c
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#       define SS_SSEN                                  (1 << 24)
+#       define SS_DSMODE_EN                             (1 << 25)
+
+#define	MPLL_AD_FUNC_CNTL				0x624
+#define		CLKF(x)					((x) << 0)
+#define		CLKF_MASK				(0x7f << 0)
+#define		CLKR(x)					((x) << 7)
+#define		CLKR_MASK				(0x1f << 7)
+#define		CLKFRAC(x)				((x) << 12)
+#define		CLKFRAC_MASK				(0x1f << 12)
+#define		YCLK_POST_DIV(x)			((x) << 17)
+#define		YCLK_POST_DIV_MASK			(3 << 17)
+#define		IBIAS(x)				((x) << 20)
+#define		IBIAS_MASK				(0x3ff << 20)
+#define		RESET					(1 << 30)
+#define		PDNB					(1 << 31)
+#define	MPLL_AD_FUNC_CNTL_2				0x628
+#define		BYPASS					(1 << 19)
+#define		BIAS_GEN_PDNB				(1 << 24)
+#define		RESET_EN				(1 << 25)
+#define		VCO_MODE				(1 << 29)
+#define	MPLL_DQ_FUNC_CNTL				0x62c
+#define	MPLL_DQ_FUNC_CNTL_2				0x630
+
+#define GENERAL_PWRMGT                                  0x63c
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define ENABLE_GEN2PCIE                          (1 << 4)
+#       define ENABLE_GEN2XSP                           (1 << 5)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (3 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define LOW_VOLT_D2_ACPI                         (1 << 8)
+#       define LOW_VOLT_D3_ACPI                         (1 << 9)
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define BACKBIAS_PAD_EN                          (1 << 18)
+#       define BACKBIAS_VALUE                           (1 << 19)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#       define AC_DC_SW                                 (1 << 24)
+
+#define SCLK_PWRMGT_CNTL                                  0x644
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#       define DYN_LIGHT_SLEEP_EN                         (1 << 14)
+#define	MCLK_PWRMGT_CNTL				0x648
+#       define DLL_SPEED(x)				((x) << 0)
+#       define DLL_SPEED_MASK				(0x1f << 0)
+#       define MPLL_PWRMGT_OFF                          (1 << 5)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCKA0_PDNB                             (1 << 8)
+#       define MRDCKA1_PDNB                             (1 << 9)
+#       define MRDCKB0_PDNB                             (1 << 10)
+#       define MRDCKB1_PDNB                             (1 << 11)
+#       define MRDCKC0_PDNB                             (1 << 12)
+#       define MRDCKC1_PDNB                             (1 << 13)
+#       define MRDCKD0_PDNB                             (1 << 14)
+#       define MRDCKD1_PDNB                             (1 << 15)
+#       define MRDCKA0_RESET                            (1 << 16)
+#       define MRDCKA1_RESET                            (1 << 17)
+#       define MRDCKB0_RESET                            (1 << 18)
+#       define MRDCKB1_RESET                            (1 << 19)
+#       define MRDCKC0_RESET                            (1 << 20)
+#       define MRDCKC1_RESET                            (1 << 21)
+#       define MRDCKD0_RESET                            (1 << 22)
+#       define MRDCKD1_RESET                            (1 << 23)
+#       define DLL_READY_READ                           (1 << 24)
+#       define USE_DISPLAY_GAP                          (1 << 25)
+#       define USE_DISPLAY_URGENT_NORMAL                (1 << 26)
+#       define MPLL_TURNOFF_D2                          (1 << 28)
+#define	DLL_CNTL					0x64c
+#       define MRDCKA0_BYPASS                           (1 << 24)
+#       define MRDCKA1_BYPASS                           (1 << 25)
+#       define MRDCKB0_BYPASS                           (1 << 26)
+#       define MRDCKB1_BYPASS                           (1 << 27)
+#       define MRDCKC0_BYPASS                           (1 << 28)
+#       define MRDCKC1_BYPASS                           (1 << 29)
+#       define MRDCKD0_BYPASS                           (1 << 30)
+#       define MRDCKD1_BYPASS                           (1 << 31)
+
+#define CG_AT                                           0x6d4
+#       define CG_R(x)					((x) << 0)
+#       define CG_R_MASK				(0xffff << 0)
+#       define CG_L(x)					((x) << 16)
+#       define CG_L_MASK				(0xffff << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0x714
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define	CG_BIF_REQ_AND_RSP				0x7f4
+#define		CG_CLIENT_REQ(x)			((x) << 0)
+#define		CG_CLIENT_REQ_MASK			(0xff << 0)
+#define		CG_CLIENT_REQ_SHIFT			0
+#define		CG_CLIENT_RESP(x)			((x) << 8)
+#define		CG_CLIENT_RESP_MASK			(0xff << 8)
+#define		CG_CLIENT_RESP_SHIFT			8
+#define		CLIENT_CG_REQ(x)			((x) << 16)
+#define		CLIENT_CG_REQ_MASK			(0xff << 16)
+#define		CLIENT_CG_REQ_SHIFT			16
+#define		CLIENT_CG_RESP(x)			((x) << 24)
+#define		CLIENT_CG_RESP_MASK			(0xff << 24)
+#define		CLIENT_CG_RESP_SHIFT			24
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x790
+#define		SSEN					(1 << 0)
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x794
+
+#define	MPLL_SS1					0x85c
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	MPLL_SS2					0x860
+#define		CLKS(x)					((x) << 0)
+#define		CLKS_MASK				(0xfff << 0)
+
+#define	CG_IND_ADDR					0x8f8
+#define	CG_IND_DATA					0x8fc
+/* CGIND regs */
+#define	CG_CGTT_LOCAL_0					0x00
+#define	CG_CGTT_LOCAL_1					0x01
+#define	CG_CGTT_LOCAL_2					0x02
+#define	CG_CGTT_LOCAL_3					0x03
+#define	CG_CGLS_TILE_0					0x20
+#define	CG_CGLS_TILE_1					0x21
+#define	CG_CGLS_TILE_2					0x22
+#define	CG_CGLS_TILE_3					0x23
+#define	CG_CGLS_TILE_4					0x24
+#define	CG_CGLS_TILE_5					0x25
+#define	CG_CGLS_TILE_6					0x26
+#define	CG_CGLS_TILE_7					0x27
+#define	CG_CGLS_TILE_8					0x28
+#define	CG_CGLS_TILE_9					0x29
+#define	CG_CGLS_TILE_10					0x2a
+#define	CG_CGLS_TILE_11					0x2b
+
+#define VM_L2_CG                                        0x15c0
+
+#define MC_CONFIG                                       0x2000
+
+#define MC_CONFIG_MCD                                   0x20a0
+#define MC_CG_CONFIG_MCD                                0x20a4
+#define		MC_RD_ENABLE_MCD(x)			((x) << 8)
+#define		MC_RD_ENABLE_MCD_MASK			(7 << 8)
+
+#define MC_HUB_MISC_HUB_CG                              0x20b8
+#define MC_HUB_MISC_VM_CG                               0x20bc
+#define MC_HUB_MISC_SIP_CG                              0x20c0
+
+#define MC_XPB_CLK_GAT                                  0x2478
+
+#define MC_CG_CONFIG                                    0x25bc
+#define		MC_RD_ENABLE(x)				((x) << 4)
+#define		MC_RD_ENABLE_MASK			(3 << 4)
+
+#define MC_CITF_MISC_RD_CG                              0x2648
+#define MC_CITF_MISC_WR_CG                              0x264c
+#define MC_CITF_MISC_VM_CG                              0x2650
+#       define MEM_LS_ENABLE                            (1 << 19)
+
+#define MC_ARB_BURST_TIME                               0x2808
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0x1f << 0)
+#define		STATE1(x)				((x) << 5)
+#define		STATE1_MASK				(0x1f << 5)
+#define		STATE2(x)				((x) << 10)
+#define		STATE2_MASK				(0x1f << 10)
+#define		STATE3(x)				((x) << 15)
+#define		STATE3_MASK				(0x1f << 15)
+
+#define MC_SEQ_RAS_TIMING                               0x28a0
+#define MC_SEQ_CAS_TIMING                               0x28a4
+#define MC_SEQ_MISC_TIMING                              0x28a8
+#define MC_SEQ_MISC_TIMING2                             0x28ac
+
+#define MC_SEQ_RD_CTL_D0                                0x28b4
+#define MC_SEQ_RD_CTL_D1                                0x28b8
+#define MC_SEQ_WR_CTL_D0                                0x28bc
+#define MC_SEQ_WR_CTL_D1                                0x28c0
+
+#define MC_SEQ_STATUS_M                                 0x29f4
+#       define PMG_PWRSTATE                             (1 << 16)
+
+#define MC_SEQ_MISC1                                    0x2a04
+#define MC_SEQ_RESERVE_M                                0x2a08
+#define MC_PMG_CMD_EMRS                                 0x2a0c
+
+#define MC_SEQ_MISC3                                    0x2a2c
+
+#define MC_SEQ_MISC5                                    0x2a54
+#define MC_SEQ_MISC6                                    0x2a58
+
+#define MC_SEQ_MISC7                                    0x2a64
+
+#define MC_SEQ_CG                                       0x2a68
+#define		CG_SEQ_REQ(x)				((x) << 0)
+#define		CG_SEQ_REQ_MASK				(0xff << 0)
+#define		CG_SEQ_REQ_SHIFT			0
+#define		CG_SEQ_RESP(x)				((x) << 8)
+#define		CG_SEQ_RESP_MASK			(0xff << 8)
+#define		CG_SEQ_RESP_SHIFT			8
+#define		SEQ_CG_REQ(x)				((x) << 16)
+#define		SEQ_CG_REQ_MASK				(0xff << 16)
+#define		SEQ_CG_REQ_SHIFT			16
+#define		SEQ_CG_RESP(x)				((x) << 24)
+#define		SEQ_CG_RESP_MASK			(0xff << 24)
+#define		SEQ_CG_RESP_SHIFT			24
+#define MC_SEQ_RAS_TIMING_LP                            0x2a6c
+#define MC_SEQ_CAS_TIMING_LP                            0x2a70
+#define MC_SEQ_MISC_TIMING_LP                           0x2a74
+#define MC_SEQ_MISC_TIMING2_LP                          0x2a78
+#define MC_SEQ_WR_CTL_D0_LP                             0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP                             0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP                           0x2a88
+
+#define MC_PMG_CMD_MRS                                  0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP                             0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP                             0x2b20
+
+#define MC_PMG_CMD_MRS1                                 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0x2b48
+
+#define CGTS_SM_CTRL_REG                                0x9150
+
+/* Registers */
+
+#define RCU_IND_INDEX           			0x100
+#define RCU_IND_DATA            			0x104
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL				0x718
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_VCO_MODE_MASK			0x00000200
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define CG_UPLL_FUNC_CNTL_2				0x71c
+#	define UPLL_PDIV_A(x)				((x) << 0)
+#	define UPLL_PDIV_A_MASK				0x0000007F
+#	define UPLL_PDIV_B(x)				((x) << 8)
+#	define UPLL_PDIV_B_MASK				0x00007F00
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define CG_UPLL_FUNC_CNTL_3				0x720
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4				0x854
+#	define UPLL_SPARE_ISPARE9			0x00020000
+#define CG_UPLL_SPREAD_SPECTRUM				0x79c
+#	define SSEN_MASK				0x00000001
+
+/* fusion uvd clocks */
+#define CG_DCLK_CNTL                                    0x610
+#       define DCLK_DIVIDER_MASK                        0x7f
+#       define DCLK_DIR_CNTL_EN                         (1 << 8)
+#define CG_DCLK_STATUS                                  0x614
+#       define DCLK_STATUS                              (1 << 0)
+#define CG_VCLK_CNTL                                    0x618
+#define CG_VCLK_STATUS                                  0x61c
+#define	CG_SCRATCH1					0x820
+
+#define RLC_CNTL                                        0x3f00
+#       define RLC_ENABLE                               (1 << 0)
+#       define GFX_POWER_GATING_ENABLE                  (1 << 7)
+#       define GFX_POWER_GATING_SRC                     (1 << 8)
+#       define DYN_PER_SIMD_PG_ENABLE                   (1 << 27)
+#       define LB_CNT_SPIM_ACTIVE                       (1 << 30)
+#       define LOAD_BALANCE_ENABLE                      (1 << 31)
+
+#define RLC_HB_BASE                                       0x3f10
+#define RLC_HB_CNTL                                       0x3f0c
+#define RLC_HB_RPTR                                       0x3f20
+#define RLC_HB_WPTR                                       0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR                              0x3f14
+#define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_MC_CNTL                                       0x3f44
+#define RLC_UCODE_CNTL                                    0x3f48
+#define RLC_UCODE_ADDR                                    0x3f2c
+#define RLC_UCODE_DATA                                    0x3f30
+
+/* new for TN */
+#define TN_RLC_SAVE_AND_RESTORE_BASE                      0x3f10
+#define TN_RLC_LB_CNTR_MAX                                0x3f14
+#define TN_RLC_LB_CNTR_INIT                               0x3f18
+#define TN_RLC_CLEAR_STATE_RESTORE_BASE                   0x3f20
+#define TN_RLC_LB_INIT_SIMD_MASK                          0x3fe4
+#define TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK                 0x3fe8
+#define TN_RLC_LB_PARAMS                                  0x3fec
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+#define RLC_GFX_INDEX           			0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define		WRITE_DIS      				(1 << 0)
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x0000000f
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		ROW_SIZE(x)             		((x) << 28)
+#define GB_BACKEND_MAP  				0x98FC
+#define DMIF_ADDR_CONFIG  				0xBD4
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL  					0x2F4C
+#define		HDP_FLUSH_INVALIDATE_CACHE      	(1 << 0)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+#define	CGTS_USER_TCC_DISABLE				0x914C
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define	CP_STRMOUT_CNTL					0x84FC
+
+#define	CP_COHER_CNTL					0x85F0
+#define	CP_COHER_SIZE					0x85F4
+#define	CP_COHER_BASE					0x85F8
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		STQ_SPLIT(x)					((x) << 0)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_BASE					0xC100
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_SEM_WAIT_TIMER				0x85BC
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+#define	CP_DEBUG					0xC1FC
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE             0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL         (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE             0x05b0
+#define DCCG_AUDIO_DTO0_MODULE            0x05b4
+#define DCCG_AUDIO_DTO0_LOAD              0x05b8
+#define DCCG_AUDIO_DTO0_CNTL              0x05bc
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
+
+#define DCCG_AUDIO_DTO1_PHASE             0x05c0
+#define DCCG_AUDIO_DTO1_MODULE            0x05c4
+#define DCCG_AUDIO_DTO1_LOAD              0x05c8
+#define DCCG_AUDIO_DTO1_CNTL              0x05cc
+#       define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+
+#define DCE41_DENTIST_DISPCLK_CNTL			0x049c
+#       define DENTIST_DPREFCLK_WDIVIDER(x)		(((x) & 0x7f) << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_MASK		(0x7f << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_SHIFT		24
+
+/* DCE 4.0 AFMT */
+#define HDMI_CONTROL                         0x7030
+#       define HDMI_KEEPOUT_MODE             (1 << 0)
+#       define HDMI_PACKET_GEN_VERSION       (1 << 4) /* 0 = r6xx compat */
+#       define HDMI_ERROR_ACK                (1 << 8)
+#       define HDMI_ERROR_MASK               (1 << 9)
+#       define HDMI_DEEP_COLOR_ENABLE        (1 << 24)
+#       define HDMI_DEEP_COLOR_DEPTH(x)      (((x) & 3) << 28)
+#       define HDMI_24BIT_DEEP_COLOR         0
+#       define HDMI_30BIT_DEEP_COLOR         1
+#       define HDMI_36BIT_DEEP_COLOR         2
+#       define HDMI_DEEP_COLOR_DEPTH_MASK    (3 << 28)
+#define HDMI_STATUS                          0x7034
+#       define HDMI_ACTIVE_AVMUTE            (1 << 0)
+#       define HDMI_AUDIO_PACKET_ERROR       (1 << 16)
+#       define HDMI_VBI_PACKET_ERROR         (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL            0x7038
+#       define HDMI_AUDIO_DELAY_EN(x)        (((x) & 3) << 4)
+#       define HDMI_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL              0x703c
+#       define HDMI_ACR_SEND                 (1 << 0)
+#       define HDMI_ACR_CONT                 (1 << 1)
+#       define HDMI_ACR_SELECT(x)            (((x) & 3) << 4)
+#       define HDMI_ACR_HW                   0
+#       define HDMI_ACR_32                   1
+#       define HDMI_ACR_44                   2
+#       define HDMI_ACR_48                   3
+#       define HDMI_ACR_SOURCE               (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI_ACR_AUTO_SEND            (1 << 12)
+#       define HDMI_ACR_N_MULTIPLE(x)        (((x) & 7) << 16)
+#       define HDMI_ACR_X1                   1
+#       define HDMI_ACR_X2                   2
+#       define HDMI_ACR_X4                   4
+#       define HDMI_ACR_AUDIO_PRIORITY       (1 << 31)
+#define HDMI_VBI_PACKET_CONTROL              0x7040
+#       define HDMI_NULL_SEND                (1 << 0)
+#       define HDMI_GC_SEND                  (1 << 4)
+#       define HDMI_GC_CONT                  (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0              0x7044
+#       define HDMI_AVI_INFO_SEND            (1 << 0)
+#       define HDMI_AVI_INFO_CONT            (1 << 1)
+#       define HDMI_AUDIO_INFO_SEND          (1 << 4)
+#       define HDMI_AUDIO_INFO_CONT          (1 << 5)
+#       define HDMI_MPEG_INFO_SEND           (1 << 8)
+#       define HDMI_MPEG_INFO_CONT           (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1              0x7048
+#       define HDMI_AVI_INFO_LINE(x)         (((x) & 0x3f) << 0)
+#       define HDMI_AVI_INFO_LINE_MASK       (0x3f << 0)
+#       define HDMI_AUDIO_INFO_LINE(x)       (((x) & 0x3f) << 8)
+#       define HDMI_MPEG_INFO_LINE(x)        (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL          0x704c
+#       define HDMI_GENERIC0_SEND            (1 << 0)
+#       define HDMI_GENERIC0_CONT            (1 << 1)
+#       define HDMI_GENERIC1_SEND            (1 << 4)
+#       define HDMI_GENERIC1_CONT            (1 << 5)
+#       define HDMI_GENERIC0_LINE(x)         (((x) & 0x3f) << 16)
+#       define HDMI_GENERIC1_LINE(x)         (((x) & 0x3f) << 24)
+#define HDMI_GC                              0x7058
+#       define HDMI_GC_AVMUTE                (1 << 0)
+#       define HDMI_GC_AVMUTE_CONT           (1 << 2)
+#define AFMT_AUDIO_PACKET_CONTROL2           0x705c
+#       define AFMT_AUDIO_LAYOUT_OVRD        (1 << 0)
+#       define AFMT_AUDIO_LAYOUT_SELECT      (1 << 1)
+#       define AFMT_60958_CS_SOURCE          (1 << 4)
+#       define AFMT_AUDIO_CHANNEL_ENABLE(x)  (((x) & 0xff) << 8)
+#       define AFMT_DP_AUDIO_STREAM_ID(x)    (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0                       0x7084
+#       define AFMT_AVI_INFO_CHECKSUM(x)     (((x) & 0xff) << 0)
+#       define AFMT_AVI_INFO_S(x)            (((x) & 3) << 8)
+#       define AFMT_AVI_INFO_B(x)            (((x) & 3) << 10)
+#       define AFMT_AVI_INFO_A(x)            (((x) & 1) << 12)
+#       define AFMT_AVI_INFO_Y(x)            (((x) & 3) << 13)
+#       define AFMT_AVI_INFO_Y_RGB           0
+#       define AFMT_AVI_INFO_Y_YCBCR422      1
+#       define AFMT_AVI_INFO_Y_YCBCR444      2
+#       define AFMT_AVI_INFO_Y_A_B_S(x)      (((x) & 0xff) << 8)
+#       define AFMT_AVI_INFO_R(x)            (((x) & 0xf) << 16)
+#       define AFMT_AVI_INFO_M(x)            (((x) & 0x3) << 20)
+#       define AFMT_AVI_INFO_C(x)            (((x) & 0x3) << 22)
+#       define AFMT_AVI_INFO_C_M_R(x)        (((x) & 0xff) << 16)
+#       define AFMT_AVI_INFO_SC(x)           (((x) & 0x3) << 24)
+#       define AFMT_AVI_INFO_Q(x)            (((x) & 0x3) << 26)
+#       define AFMT_AVI_INFO_EC(x)           (((x) & 0x3) << 28)
+#       define AFMT_AVI_INFO_ITC(x)          (((x) & 0x1) << 31)
+#       define AFMT_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1                       0x7088
+#       define AFMT_AVI_INFO_VIC(x)          (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_PR(x)           (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_CN(x)           (((x) & 0x3) << 12)
+#       define AFMT_AVI_INFO_YQ(x)           (((x) & 0x3) << 14)
+#       define AFMT_AVI_INFO_TOP(x)          (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2                       0x708c
+#       define AFMT_AVI_INFO_BOTTOM(x)       (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_LEFT(x)         (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3                       0x7090
+#       define AFMT_AVI_INFO_RIGHT(x)        (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_VERSION(x)      (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0                      0x7094
+#       define AFMT_MPEG_INFO_CHECKSUM(x)    (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MB0(x)         (((x) & 0xff) << 8)
+#       define AFMT_MPEG_INFO_MB1(x)         (((x) & 0xff) << 16)
+#       define AFMT_MPEG_INFO_MB2(x)         (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1                      0x7098
+#       define AFMT_MPEG_INFO_MB3(x)         (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MF(x)          (((x) & 3) << 8)
+#       define AFMT_MPEG_INFO_FR(x)          (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR                    0x709c
+#define AFMT_GENERIC0_0                      0x70a0
+#define AFMT_GENERIC0_1                      0x70a4
+#define AFMT_GENERIC0_2                      0x70a8
+#define AFMT_GENERIC0_3                      0x70ac
+#define AFMT_GENERIC0_4                      0x70b0
+#define AFMT_GENERIC0_5                      0x70b4
+#define AFMT_GENERIC0_6                      0x70b8
+#define AFMT_GENERIC1_HDR                    0x70bc
+#define AFMT_GENERIC1_0                      0x70c0
+#define AFMT_GENERIC1_1                      0x70c4
+#define AFMT_GENERIC1_2                      0x70c8
+#define AFMT_GENERIC1_3                      0x70cc
+#define AFMT_GENERIC1_4                      0x70d0
+#define AFMT_GENERIC1_5                      0x70d4
+#define AFMT_GENERIC1_6                      0x70d8
+#define HDMI_ACR_32_0                        0x70dc
+#       define HDMI_ACR_CTS_32(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1                        0x70e0
+#       define HDMI_ACR_N_32(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0                        0x70e4
+#       define HDMI_ACR_CTS_44(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1                        0x70e8
+#       define HDMI_ACR_N_44(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0                        0x70ec
+#       define HDMI_ACR_CTS_48(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1                        0x70f0
+#       define HDMI_ACR_N_48(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0                    0x70f4
+#define HDMI_ACR_STATUS_1                    0x70f8
+#define AFMT_AUDIO_INFO0                     0x70fc
+#       define AFMT_AUDIO_INFO_CHECKSUM(x)   (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_CC(x)         (((x) & 7) << 8)
+#       define AFMT_AUDIO_INFO_CT(x)         (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x)   (((x) & 0xff) << 16)
+#       define AFMT_AUDIO_INFO_CXT(x)        (((x) & 0x1f) << 24)
+#define AFMT_AUDIO_INFO1                     0x7100
+#       define AFMT_AUDIO_INFO_CA(x)         (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_LSV(x)        (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_DM_INH(x)     (((x) & 1) << 15)
+#       define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#       define AFMT_AUDIO_INFO_LFEBPL(x)     (((x) & 3) << 16)
+#define AFMT_60958_0                         0x7104
+#       define AFMT_60958_CS_A(x)            (((x) & 1) << 0)
+#       define AFMT_60958_CS_B(x)            (((x) & 1) << 1)
+#       define AFMT_60958_CS_C(x)            (((x) & 1) << 2)
+#       define AFMT_60958_CS_D(x)            (((x) & 3) << 3)
+#       define AFMT_60958_CS_MODE(x)         (((x) & 3) << 6)
+#       define AFMT_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define AFMT_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define AFMT_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define AFMT_60958_1                         0x7108
+#       define AFMT_60958_CS_WORD_LENGTH(x)  (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_VALID_L(x)      (((x) & 1) << 16)
+#       define AFMT_60958_CS_VALID_R(x)      (((x) & 1) << 18)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL               0x710c
+#       define AFMT_AUDIO_CRC_EN             (1 << 0)
+#define AFMT_RAMP_CONTROL0                   0x7110
+#       define AFMT_RAMP_MAX_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_RAMP_DATA_SIGN           (1 << 31)
+#define AFMT_RAMP_CONTROL1                   0x7114
+#       define AFMT_RAMP_MIN_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2                   0x7118
+#       define AFMT_RAMP_INC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3                   0x711c
+#       define AFMT_RAMP_DEC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_60958_2                         0x7120
+#       define AFMT_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+#define AFMT_STATUS                          0x7128
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AUDIO_HBR_ENABLE         (1 << 8)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x712c
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL              0x7130
+#       define AFMT_GENERIC0_UPDATE          (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0              0x7134
+#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - afmt regs */
+#       define AFMT_AUDIO_INFO_UPDATE        (1 << 7)
+#       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
+#define AFMT_GENERIC0_7                      0x7138
+
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER          0x5f78
+#define		SPEAKER_ALLOCATION(x)			(((x) & 0x7f) << 0)
+#define		SPEAKER_ALLOCATION_MASK			(0x7f << 0)
+#define		SPEAKER_ALLOCATION_SHIFT		0
+#define		HDMI_CONNECTION				(1 << 16)
+#define		DP_CONNECTION				(1 << 17)
+
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x5fb8 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_CHANNEL_COUNT_CONTROL                          0x5fe4
+#       define HBR_CHANNEL_COUNT(x)                       (((x) & 0x7) << 0)
+#       define COMPRESSED_CHANNEL_COUNT(x)                (((x) & 0x7) << 4)
+/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
+ * 0   = use stream header
+ * 1-7 = channel count - 1
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC         0x5fe8
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR             0x5fec
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
+#       define DISPLAY0_TYPE(x)                           (((x) & 0x3) << 0)
+#       define DISPLAY_TYPE_NONE                   0
+#       define DISPLAY_TYPE_HDMI                   1
+#       define DISPLAY_TYPE_DP                     2
+#       define DISPLAY0_ID(x)                             (((x) & 0x3f) << 2)
+#       define DISPLAY1_TYPE(x)                           (((x) & 0x3) << 8)
+#       define DISPLAY1_ID(x)                             (((x) & 0x3f) << 10)
+#       define DISPLAY2_TYPE(x)                           (((x) & 0x3) << 16)
+#       define DISPLAY2_ID(x)                             (((x) & 0x3f) << 18)
+#       define DISPLAY3_TYPE(x)                           (((x) & 0x3) << 24)
+#       define DISPLAY3_ID(x)                             (((x) & 0x3f) << 26)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
+#       define DISPLAY4_TYPE(x)                           (((x) & 0x3) << 0)
+#       define DISPLAY4_ID(x)                             (((x) & 0x3f) << 2)
+#       define DISPLAY5_TYPE(x)                           (((x) & 0x3) << 8)
+#       define DISPLAY5_ID(x)                             (((x) & 0x3f) << 10)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER       0x5ffc
+#       define NUMBER_OF_DISPLAY_ID(x)                    (((x) & 0x7) << 0)
+
+#define AZ_HOT_PLUG_CONTROL                               0x5e78
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SH					(1 << 9)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VC					(1 << 13)
+#define		SOFT_RESET_VGT					(1 << 14)
+
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		SX_CLEAN					(1 << 11)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		VGT_BUSY_NO_DMA					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		SX_BUSY 					(1 << 20)
+#define		SH_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_SX_CLEAN					(1 << 0)
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SH_BUSY					(1 << 28)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+/* evergreen */
+#define	CG_THERMAL_CTRL					0x72c
+#define		TOFFSET_MASK			        0x00003FE0
+#define		TOFFSET_SHIFT			        5
+#define		DIG_THERM_DPM(x)			((x) << 14)
+#define		DIG_THERM_DPM_MASK			0x003FC000
+#define		DIG_THERM_DPM_SHIFT			14
+
+#define	CG_THERMAL_INT					0x734
+#define		DIG_THERM_INTH(x)			((x) << 8)
+#define		DIG_THERM_INTH_MASK			0x0000FF00
+#define		DIG_THERM_INTH_SHIFT			8
+#define		DIG_THERM_INTL(x)			((x) << 16)
+#define		DIG_THERM_INTL_MASK			0x00FF0000
+#define		DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+
+#define	TN_CG_THERMAL_INT_CTRL				0x738
+#define		TN_DIG_THERM_INTH(x)			((x) << 0)
+#define		TN_DIG_THERM_INTH_MASK			0x000000FF
+#define		TN_DIG_THERM_INTH_SHIFT			0
+#define		TN_DIG_THERM_INTL(x)			((x) << 8)
+#define		TN_DIG_THERM_INTL_MASK			0x0000FF00
+#define		TN_DIG_THERM_INTL_SHIFT			8
+#define 	TN_THERM_INT_MASK_HIGH			(1 << 24)
+#define 	TN_THERM_INT_MASK_LOW			(1 << 25)
+
+#define	CG_MULT_THERMAL_STATUS				0x740
+#define		ASIC_T(x)			        ((x) << 16)
+#define		ASIC_T_MASK			        0x07FF0000
+#define		ASIC_T_SHIFT			        16
+#define	CG_TS0_STATUS					0x760
+#define		TS0_ADC_DOUT_MASK			0x000003FF
+#define		TS0_ADC_DOUT_SHIFT			0
+
+/* APU */
+#define	CG_THERMAL_STATUS			        0x678
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define		BLACKOUT_MODE_MASK			0x00000007
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define	FUS_MC_ARB_RAMCFG				0x2768
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_FUS_VM_FB_OFFSET				0x2898
+#define	MC_VM_MB_L1_TLB0_CNTL				0x2234
+#define	MC_VM_MB_L1_TLB1_CNTL				0x2238
+#define	MC_VM_MB_L1_TLB2_CNTL				0x223C
+#define	MC_VM_MB_L1_TLB3_CNTL				0x2240
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			((x)<<15)
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			((x)<<18)
+#define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+#define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+#define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+
+#define	FUS_MC_VM_MD_L1_TLB0_CNTL			0x265C
+#define	FUS_MC_VM_MD_L1_TLB1_CNTL			0x2660
+#define	FUS_MC_VM_MD_L1_TLB2_CNTL			0x2664
+
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define	PA_SC_ENHANCE					0x8BF0
+#define PA_SC_AA_CONFIG					0x28C04
+#define         MSAA_NUM_SAMPLES_SHIFT                  0
+#define         MSAA_NUM_SAMPLES_MASK                   0x3
+#define PA_SC_CLIPRECT_RULE				0x2820C
+#define	PA_SC_EDGERULE					0x28230
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SMX_SAR_CTL0					0xA008
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		NUMBER_OF_SETS(x)				((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		CS_PRIO(x)					((x) << 18)
+#define		LS_PRIO(x)					((x) << 20)
+#define		HS_PRIO(x)					((x) << 22)
+#define		PS_PRIO(x)					((x) << 24)
+#define		VS_PRIO(x)					((x) << 26)
+#define		GS_PRIO(x)					((x) << 28)
+#define		ES_PRIO(x)					((x) << 30)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define	SQ_GPR_RESOURCE_MGMT_2				0x8C08
+#define		NUM_GS_GPRS(x)					((x) << 0)
+#define		NUM_ES_GPRS(x)					((x) << 16)
+#define	SQ_GPR_RESOURCE_MGMT_3				0x8C0C
+#define		NUM_HS_GPRS(x)					((x) << 0)
+#define		NUM_LS_GPRS(x)					((x) << 16)
+#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_1			0x8C10
+#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_2			0x8C14
+#define	SQ_THREAD_RESOURCE_MGMT				0x8C18
+#define		NUM_PS_THREADS(x)				((x) << 0)
+#define		NUM_VS_THREADS(x)				((x) << 8)
+#define		NUM_GS_THREADS(x)				((x) << 16)
+#define		NUM_ES_THREADS(x)				((x) << 24)
+#define	SQ_THREAD_RESOURCE_MGMT_2			0x8C1C
+#define		NUM_HS_THREADS(x)				((x) << 0)
+#define		NUM_LS_THREADS(x)				((x) << 8)
+#define	SQ_STACK_RESOURCE_MGMT_1			0x8C20
+#define		NUM_PS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_VS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_2			0x8C24
+#define		NUM_GS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_ES_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_3			0x8C28
+#define		NUM_HS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_LS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
+#define	SQ_DYN_GPR_SIMD_LOCK_EN    			0x8D94
+#define	SQ_STATIC_THREAD_MGMT_1    			0x8E20
+#define	SQ_STATIC_THREAD_MGMT_2    			0x8E24
+#define	SQ_STATIC_THREAD_MGMT_3    			0x8E28
+#define	SQ_LDS_RESOURCE_MGMT    			0x8E2C
+
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_MEMORY_EXPORT_BASE				0x9010
+#define	SX_MISC						0x28350
+
+#define CB_PERF_CTR0_SEL_0				0x9A20
+#define CB_PERF_CTR0_SEL_1				0x9A24
+#define CB_PERF_CTR1_SEL_0				0x9A28
+#define CB_PERF_CTR1_SEL_1				0x9A2C
+#define CB_PERF_CTR2_SEL_0				0x9A30
+#define CB_PERF_CTR2_SEL_1				0x9A34
+#define CB_PERF_CTR3_SEL_0				0x9A38
+#define CB_PERF_CTR3_SEL_1				0x9A3C
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT1_CNTL2				0x1434
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+
+#define	WAIT_UNTIL					0x8040
+
+#define	SRBM_STATUS				        0x0E50
+#define		RLC_RQ_PENDING 				(1 << 3)
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		RLC_BUSY 				(1 << 15)
+#define		IH_BUSY 				(1 << 17)
+#define	SRBM_STATUS2				        0x0EC4
+#define		DMA_BUSY 				(1 << 5)
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SRBM_SOFT_RESET_ALL_MASK    	       	0x00FEEFA6
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
+/* display watermarks */
+#define	DC_LB_MEMORY_SPLIT				  0x6b0c
+#define	PRIORITY_A_CNT			                  0x6b18
+#define		PRIORITY_MARK_MASK			  0x7fff
+#define		PRIORITY_OFF				  (1 << 16)
+#define		PRIORITY_ALWAYS_ON			  (1 << 20)
+#define	PRIORITY_B_CNT			                  0x6b1c
+#define	PIPE0_ARBITRATION_CONTROL3			  0x0bf0
+#       define LATENCY_WATERMARK_MASK(x)                  ((x) << 16)
+#define	PIPE0_LATENCY_CONTROL			          0x0bf4
+#       define LATENCY_LOW_WATERMARK(x)                   ((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)                  ((x) << 16)
+
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0ca0
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define CP_INT_CNTL                                     0xc124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define SCRATCH_INT_ENABLE                       (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define IB2_INT_ENABLE                           (1 << 29)
+#       define IB1_INT_ENABLE                           (1 << 30)
+#       define RB_INT_ENABLE                            (1 << 31)
+#define CP_INT_STATUS                                   0xc128
+#       define SCRATCH_INT_STAT                         (1 << 25)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define IB2_INT_STAT                             (1 << 29)
+#       define IB1_INT_STAT                             (1 << 30)
+#       define RB_INT_STAT                              (1 << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DACA_AUTODETECT_INT_CONTROL			0x66c8
+#define	DACB_AUTODETECT_INT_CONTROL			0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+/* DCE4/5/6 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL                 0x6fb4
+#       define FMT_DYNAMIC_EXP_EN            (1 << 0)
+#       define FMT_DYNAMIC_EXP_MODE          (1 << 4)
+        /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL                          0x6fb8
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6fc8
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_DEPTH            (1 << 4)
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x6fe4
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
+/* ASYNC DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG  				  0xD0B8
+
+#define CAYMAN_DMA1_CNTL                                  0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) |    \
+                                    (((sub_cmd) & 0xFF) << 20) |\
+                                    (((n) & 0xFFFFF) << 0))
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE                        0x2
+#define	DMA_PACKET_COPY                         0x3
+#define	DMA_PACKET_INDIRECT_BUFFER              0x4
+#define	DMA_PACKET_SEMAPHORE                    0x5
+#define	DMA_PACKET_FENCE                        0x6
+#define	DMA_PACKET_TRAP                         0x7
+#define	DMA_PACKET_SRBM_WRITE                   0x9
+#define	DMA_PACKET_CONSTANT_FILL                0xd
+#define	DMA_PACKET_NOP                          0xf
+
+/* PIF PHY0 indirect regs */
+#define PB0_PIF_CNTL                                      0x10
+#       define LS2_EXIT_TIME(x)                           ((x) << 17)
+#       define LS2_EXIT_TIME_MASK                         (0x7 << 17)
+#       define LS2_EXIT_TIME_SHIFT                        17
+#define PB0_PIF_PAIRING                                   0x11
+#       define MULTI_PIF                                  (1 << 25)
+#define PB0_PIF_PWRDOWN_0                                 0x12
+#       define PLL_POWER_STATE_IN_TXS2_0(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_0(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_SHIFT             10
+#       define PLL_RAMP_UP_TIME_0(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_0_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_0_SHIFT                   24
+#define PB0_PIF_PWRDOWN_1                                 0x13
+#       define PLL_POWER_STATE_IN_TXS2_1(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_1(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_SHIFT             10
+#       define PLL_RAMP_UP_TIME_1(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_1_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_1_SHIFT                   24
+/* PIF PHY1 indirect regs */
+#define PB1_PIF_CNTL                                      0x10
+#define PB1_PIF_PAIRING                                   0x11
+#define PB1_PIF_PWRDOWN_0                                 0x12
+#define PB1_PIF_PWRDOWN_1                                 0x13
+/* PCIE PORT indirect regs */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#       define LC_DYN_LANES_PWR_STATE(x)                  ((x) << 21)
+#       define LC_DYN_LANES_PWR_STATE_MASK                (0x3 << 21)
+#       define LC_DYN_LANES_PWR_STATE_SHIFT               21
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_HW_VOLTAGE_IF_CONTROL(x)                ((x) << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_MASK              (3 << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_SHIFT             12
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG				0xef4c
+#define UVD_UDEC_DB_ADDR_CONFIG				0xef50
+#define UVD_UDEC_DBW_ADDR_CONFIG			0xef54
+#define UVD_NO_OP					0xeffc
+#define UVD_RBC_RB_RPTR					0xf690
+#define UVD_RBC_RB_WPTR					0xf694
+#define UVD_STATUS					0xf6bc
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_OFFSET			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
+#              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
+#              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_VC_ACTION_ENA        (1 << 24)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_RB_OFFSET				0x4B
+#define	PACKET3_ALU_PS_CONST_BUFFER_COPY		0x4C
+#define	PACKET3_ALU_VS_CONST_BUFFER_COPY		0x4D
+#define	PACKET3_ALU_PS_CONST_UPDATE		        0x4E
+#define	PACKET3_ALU_VS_CONST_UPDATE		        0x4F
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+/* alu const buffers only; no reg file */
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_START			0x0003a500
+#define		PACKET3_SET_BOOL_CONST_END			0x0003a518
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_START			0x0003a200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003a500
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_START			0x00030000
+#define		PACKET3_SET_RESOURCE_END			0x00038000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_START			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003c600
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_START			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003ff0c
+#define	PACKET3_SET_RESOURCE_OFFSET			0x70
+#define	PACKET3_SET_ALU_CONST_VS			0x71
+#define	PACKET3_SET_ALU_CONST_DI			0x72
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_APPEND_CNT			        0x75
+/* SET_APPEND_CNT - documentation
+ * 1. header
+ * 2. COMMAND
+ *  1:0 - SOURCE SEL
+ *  15:2 - Reserved
+ *  31:16 - WR_REG_OFFSET - context register to write source data to.
+ *          (one of R_02872C_GDS_APPEND_COUNT_0-11)
+ * 3. CONTROL
+ *  (for source == mem)
+ *  31:2 SRC_ADDRESS_LO
+ *  0:1 SWAP
+ *  (for source == GDS)
+ *  31:0 GDS offset
+ *  (for source == DATA)
+ *  31:0 DATA
+ *  (for source == REG)
+ *  31:0 REG
+ * 4. SRC_ADDRESS_HI[7:0]
+ * kernel driver 2.44 only supports SRC == MEM.
+ */
+#define PACKET3_SET_APPEND_CNT_SRC_SELECT(x) ((x) << 0)
+#define G_PACKET3_SET_APPEND_CNT_SRC_SELECT(x) ((x & 0x3) >> 0)
+/* source is from the data in CONTROL */
+#define PACKET3_SAC_SRC_SEL_DATA 0x0
+/* source is from register */
+#define PACKET3_SAC_SRC_SEL_REG  0x1
+/* source is from GDS offset in CONTROL */
+#define PACKET3_SAC_SRC_SEL_GDS  0x2
+/* source is from memory address */
+#define PACKET3_SAC_SRC_SEL_MEM  0x3
+
+#define	SQ_RESOURCE_CONSTANT_WORD7_0				0x3001c
+#define		S__SQ_CONSTANT_TYPE(x)			(((x) & 3) << 30)
+#define		G__SQ_CONSTANT_TYPE(x)			(((x) >> 30) & 3)
+#define			SQ_TEX_VTX_INVALID_TEXTURE			0x0
+#define			SQ_TEX_VTX_INVALID_BUFFER			0x1
+#define			SQ_TEX_VTX_VALID_TEXTURE			0x2
+#define			SQ_TEX_VTX_VALID_BUFFER				0x3
+
+#define VGT_VTX_VECT_EJECT_REG				0x88b0
+
+#define SQ_CONST_MEM_BASE				0x8df8
+
+#define SQ_ESGS_RING_BASE				0x8c40
+#define SQ_ESGS_RING_SIZE				0x8c44
+#define SQ_GSVS_RING_BASE				0x8c48
+#define SQ_GSVS_RING_SIZE				0x8c4c
+#define SQ_ESTMP_RING_BASE				0x8c50
+#define SQ_ESTMP_RING_SIZE				0x8c54
+#define SQ_GSTMP_RING_BASE				0x8c58
+#define SQ_GSTMP_RING_SIZE				0x8c5c
+#define SQ_VSTMP_RING_BASE				0x8c60
+#define SQ_VSTMP_RING_SIZE				0x8c64
+#define SQ_PSTMP_RING_BASE				0x8c68
+#define SQ_PSTMP_RING_SIZE				0x8c6c
+#define SQ_LSTMP_RING_BASE				0x8e10
+#define SQ_LSTMP_RING_SIZE				0x8e14
+#define SQ_HSTMP_RING_BASE				0x8e18
+#define SQ_HSTMP_RING_SIZE				0x8e1c
+#define VGT_TF_RING_SIZE				0x8988
+
+#define SQ_ESGS_RING_ITEMSIZE				0x28900
+#define SQ_GSVS_RING_ITEMSIZE				0x28904
+#define SQ_ESTMP_RING_ITEMSIZE				0x28908
+#define SQ_GSTMP_RING_ITEMSIZE				0x2890c
+#define SQ_VSTMP_RING_ITEMSIZE				0x28910
+#define SQ_PSTMP_RING_ITEMSIZE				0x28914
+#define SQ_LSTMP_RING_ITEMSIZE				0x28830
+#define SQ_HSTMP_RING_ITEMSIZE				0x28834
+
+#define SQ_GS_VERT_ITEMSIZE				0x2891c
+#define SQ_GS_VERT_ITEMSIZE_1				0x28920
+#define SQ_GS_VERT_ITEMSIZE_2				0x28924
+#define SQ_GS_VERT_ITEMSIZE_3				0x28928
+#define SQ_GSVS_RING_OFFSET_1				0x2892c
+#define SQ_GSVS_RING_OFFSET_2				0x28930
+#define SQ_GSVS_RING_OFFSET_3				0x28934
+
+#define SQ_ALU_CONST_BUFFER_SIZE_PS_0			0x28140
+#define SQ_ALU_CONST_BUFFER_SIZE_HS_0			0x28f80
+
+#define SQ_ALU_CONST_CACHE_PS_0				0x28940
+#define SQ_ALU_CONST_CACHE_PS_1				0x28944
+#define SQ_ALU_CONST_CACHE_PS_2				0x28948
+#define SQ_ALU_CONST_CACHE_PS_3				0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4				0x28950
+#define SQ_ALU_CONST_CACHE_PS_5				0x28954
+#define SQ_ALU_CONST_CACHE_PS_6				0x28958
+#define SQ_ALU_CONST_CACHE_PS_7				0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8				0x28960
+#define SQ_ALU_CONST_CACHE_PS_9				0x28964
+#define SQ_ALU_CONST_CACHE_PS_10			0x28968
+#define SQ_ALU_CONST_CACHE_PS_11			0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12			0x28970
+#define SQ_ALU_CONST_CACHE_PS_13			0x28974
+#define SQ_ALU_CONST_CACHE_PS_14			0x28978
+#define SQ_ALU_CONST_CACHE_PS_15			0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0				0x28980
+#define SQ_ALU_CONST_CACHE_VS_1				0x28984
+#define SQ_ALU_CONST_CACHE_VS_2				0x28988
+#define SQ_ALU_CONST_CACHE_VS_3				0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4				0x28990
+#define SQ_ALU_CONST_CACHE_VS_5				0x28994
+#define SQ_ALU_CONST_CACHE_VS_6				0x28998
+#define SQ_ALU_CONST_CACHE_VS_7				0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8				0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9				0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10			0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11			0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12			0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13			0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14			0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15			0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0				0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1				0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2				0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3				0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4				0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5				0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6				0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7				0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8				0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9				0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10			0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11			0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12			0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13			0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14			0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15			0x289fc
+#define SQ_ALU_CONST_CACHE_HS_0				0x28f00
+#define SQ_ALU_CONST_CACHE_HS_1				0x28f04
+#define SQ_ALU_CONST_CACHE_HS_2				0x28f08
+#define SQ_ALU_CONST_CACHE_HS_3				0x28f0c
+#define SQ_ALU_CONST_CACHE_HS_4				0x28f10
+#define SQ_ALU_CONST_CACHE_HS_5				0x28f14
+#define SQ_ALU_CONST_CACHE_HS_6				0x28f18
+#define SQ_ALU_CONST_CACHE_HS_7				0x28f1c
+#define SQ_ALU_CONST_CACHE_HS_8				0x28f20
+#define SQ_ALU_CONST_CACHE_HS_9				0x28f24
+#define SQ_ALU_CONST_CACHE_HS_10			0x28f28
+#define SQ_ALU_CONST_CACHE_HS_11			0x28f2c
+#define SQ_ALU_CONST_CACHE_HS_12			0x28f30
+#define SQ_ALU_CONST_CACHE_HS_13			0x28f34
+#define SQ_ALU_CONST_CACHE_HS_14			0x28f38
+#define SQ_ALU_CONST_CACHE_HS_15			0x28f3c
+#define SQ_ALU_CONST_CACHE_LS_0				0x28f40
+#define SQ_ALU_CONST_CACHE_LS_1				0x28f44
+#define SQ_ALU_CONST_CACHE_LS_2				0x28f48
+#define SQ_ALU_CONST_CACHE_LS_3				0x28f4c
+#define SQ_ALU_CONST_CACHE_LS_4				0x28f50
+#define SQ_ALU_CONST_CACHE_LS_5				0x28f54
+#define SQ_ALU_CONST_CACHE_LS_6				0x28f58
+#define SQ_ALU_CONST_CACHE_LS_7				0x28f5c
+#define SQ_ALU_CONST_CACHE_LS_8				0x28f60
+#define SQ_ALU_CONST_CACHE_LS_9				0x28f64
+#define SQ_ALU_CONST_CACHE_LS_10			0x28f68
+#define SQ_ALU_CONST_CACHE_LS_11			0x28f6c
+#define SQ_ALU_CONST_CACHE_LS_12			0x28f70
+#define SQ_ALU_CONST_CACHE_LS_13			0x28f74
+#define SQ_ALU_CONST_CACHE_LS_14			0x28f78
+#define SQ_ALU_CONST_CACHE_LS_15			0x28f7c
+
+#define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+#define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+#define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define VGT_INDEX_TYPE                                  0x895C
+
+#define VGT_NUM_INDICES                                 0x8970
+
+#define VGT_COMPUTE_DIM_X                               0x8990
+#define VGT_COMPUTE_DIM_Y                               0x8994
+#define VGT_COMPUTE_DIM_Z                               0x8998
+#define VGT_COMPUTE_START_X                             0x899C
+#define VGT_COMPUTE_START_Y                             0x89A0
+#define VGT_COMPUTE_START_Z                             0x89A4
+#define VGT_COMPUTE_INDEX                               0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE                   0x89AC
+#define VGT_HS_OFFCHIP_PARAM                            0x89B0
+
+#define DB_DEBUG					0x9830
+#define DB_DEBUG2					0x9834
+#define DB_DEBUG3					0x9838
+#define DB_DEBUG4					0x983C
+#define DB_WATERMARKS					0x9854
+#define DB_DEPTH_CONTROL				0x28800
+#define R_028800_DB_DEPTH_CONTROL                    0x028800
+#define   S_028800_STENCIL_ENABLE(x)                   (((x) & 0x1) << 0)
+#define   G_028800_STENCIL_ENABLE(x)                   (((x) >> 0) & 0x1)
+#define   C_028800_STENCIL_ENABLE                      0xFFFFFFFE
+#define   S_028800_Z_ENABLE(x)                         (((x) & 0x1) << 1)
+#define   G_028800_Z_ENABLE(x)                         (((x) >> 1) & 0x1)
+#define   C_028800_Z_ENABLE                            0xFFFFFFFD
+#define   S_028800_Z_WRITE_ENABLE(x)                   (((x) & 0x1) << 2)
+#define   G_028800_Z_WRITE_ENABLE(x)                   (((x) >> 2) & 0x1)
+#define   C_028800_Z_WRITE_ENABLE                      0xFFFFFFFB
+#define   S_028800_ZFUNC(x)                            (((x) & 0x7) << 4)
+#define   G_028800_ZFUNC(x)                            (((x) >> 4) & 0x7)
+#define   C_028800_ZFUNC                               0xFFFFFF8F
+#define   S_028800_BACKFACE_ENABLE(x)                  (((x) & 0x1) << 7)
+#define   G_028800_BACKFACE_ENABLE(x)                  (((x) >> 7) & 0x1)
+#define   C_028800_BACKFACE_ENABLE                     0xFFFFFF7F
+#define   S_028800_STENCILFUNC(x)                      (((x) & 0x7) << 8)
+#define   G_028800_STENCILFUNC(x)                      (((x) >> 8) & 0x7)
+#define   C_028800_STENCILFUNC                         0xFFFFF8FF
+#define     V_028800_STENCILFUNC_NEVER                 0x00000000
+#define     V_028800_STENCILFUNC_LESS                  0x00000001
+#define     V_028800_STENCILFUNC_EQUAL                 0x00000002
+#define     V_028800_STENCILFUNC_LEQUAL                0x00000003
+#define     V_028800_STENCILFUNC_GREATER               0x00000004
+#define     V_028800_STENCILFUNC_NOTEQUAL              0x00000005
+#define     V_028800_STENCILFUNC_GEQUAL                0x00000006
+#define     V_028800_STENCILFUNC_ALWAYS                0x00000007
+#define   S_028800_STENCILFAIL(x)                      (((x) & 0x7) << 11)
+#define   G_028800_STENCILFAIL(x)                      (((x) >> 11) & 0x7)
+#define   C_028800_STENCILFAIL                         0xFFFFC7FF
+#define     V_028800_STENCIL_KEEP                      0x00000000
+#define     V_028800_STENCIL_ZERO                      0x00000001
+#define     V_028800_STENCIL_REPLACE                   0x00000002
+#define     V_028800_STENCIL_INCR                      0x00000003
+#define     V_028800_STENCIL_DECR                      0x00000004
+#define     V_028800_STENCIL_INVERT                    0x00000005
+#define     V_028800_STENCIL_INCR_WRAP                 0x00000006
+#define     V_028800_STENCIL_DECR_WRAP                 0x00000007
+#define   S_028800_STENCILZPASS(x)                     (((x) & 0x7) << 14)
+#define   G_028800_STENCILZPASS(x)                     (((x) >> 14) & 0x7)
+#define   C_028800_STENCILZPASS                        0xFFFE3FFF
+#define   S_028800_STENCILZFAIL(x)                     (((x) & 0x7) << 17)
+#define   G_028800_STENCILZFAIL(x)                     (((x) >> 17) & 0x7)
+#define   C_028800_STENCILZFAIL                        0xFFF1FFFF
+#define   S_028800_STENCILFUNC_BF(x)                   (((x) & 0x7) << 20)
+#define   G_028800_STENCILFUNC_BF(x)                   (((x) >> 20) & 0x7)
+#define   C_028800_STENCILFUNC_BF                      0xFF8FFFFF
+#define   S_028800_STENCILFAIL_BF(x)                   (((x) & 0x7) << 23)
+#define   G_028800_STENCILFAIL_BF(x)                   (((x) >> 23) & 0x7)
+#define   C_028800_STENCILFAIL_BF                      0xFC7FFFFF
+#define   S_028800_STENCILZPASS_BF(x)                  (((x) & 0x7) << 26)
+#define   G_028800_STENCILZPASS_BF(x)                  (((x) >> 26) & 0x7)
+#define   C_028800_STENCILZPASS_BF                     0xE3FFFFFF
+#define   S_028800_STENCILZFAIL_BF(x)                  (((x) & 0x7) << 29)
+#define   G_028800_STENCILZFAIL_BF(x)                  (((x) >> 29) & 0x7)
+#define   C_028800_STENCILZFAIL_BF                     0x1FFFFFFF
+#define DB_DEPTH_VIEW					0x28008
+#define R_028008_DB_DEPTH_VIEW                       0x00028008
+#define   S_028008_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028008_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028008_SLICE_START                         0xFFFFF800
+#define   S_028008_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028008_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028008_SLICE_MAX                           0xFF001FFF
+#define DB_HTILE_DATA_BASE				0x28014
+#define DB_HTILE_SURFACE				0x28abc
+#define   S_028ABC_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
+#define   G_028ABC_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
+#define   C_028ABC_HTILE_WIDTH                         0xFFFFFFFE
+#define   S_028ABC_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
+#define   G_028ABC_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
+#define   C_028ABC_HTILE_HEIGHT                         0xFFFFFFFD
+#define   G_028ABC_LINEAR(x)                           (((x) >> 2) & 0x1)
+#define DB_Z_INFO					0x28040
+#       define Z_ARRAY_MODE(x)                          ((x) << 4)
+#       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+#       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+#       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+#       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
+#       define DB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 24)
+#define R_028040_DB_Z_INFO                       0x028040
+#define   S_028040_FORMAT(x)                           (((x) & 0x3) << 0)
+#define   G_028040_FORMAT(x)                           (((x) >> 0) & 0x3)
+#define   C_028040_FORMAT                              0xFFFFFFFC
+#define     V_028040_Z_INVALID                     0x00000000
+#define     V_028040_Z_16                          0x00000001
+#define     V_028040_Z_24                          0x00000002
+#define     V_028040_Z_32_FLOAT                    0x00000003
+#define   S_028040_ARRAY_MODE(x)                       (((x) & 0xF) << 4)
+#define   G_028040_ARRAY_MODE(x)                       (((x) >> 4) & 0xF)
+#define   C_028040_ARRAY_MODE                          0xFFFFFF0F
+#define   S_028040_READ_SIZE(x)                        (((x) & 0x1) << 28)
+#define   G_028040_READ_SIZE(x)                        (((x) >> 28) & 0x1)
+#define   C_028040_READ_SIZE                           0xEFFFFFFF
+#define   S_028040_TILE_SURFACE_ENABLE(x)              (((x) & 0x1) << 29)
+#define   G_028040_TILE_SURFACE_ENABLE(x)              (((x) >> 29) & 0x1)
+#define   C_028040_TILE_SURFACE_ENABLE                 0xDFFFFFFF
+#define   S_028040_ZRANGE_PRECISION(x)                 (((x) & 0x1) << 31)
+#define   G_028040_ZRANGE_PRECISION(x)                 (((x) >> 31) & 0x1)
+#define   C_028040_ZRANGE_PRECISION                    0x7FFFFFFF
+#define   S_028040_TILE_SPLIT(x)                       (((x) & 0x7) << 8)
+#define   G_028040_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
+#define   S_028040_NUM_BANKS(x)                        (((x) & 0x3) << 12)
+#define   G_028040_NUM_BANKS(x)                        (((x) >> 12) & 0x3)
+#define   S_028040_BANK_WIDTH(x)                       (((x) & 0x3) << 16)
+#define   G_028040_BANK_WIDTH(x)                       (((x) >> 16) & 0x3)
+#define   S_028040_BANK_HEIGHT(x)                      (((x) & 0x3) << 20)
+#define   G_028040_BANK_HEIGHT(x)                      (((x) >> 20) & 0x3)
+#define   S_028040_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 24)
+#define   G_028040_MACRO_TILE_ASPECT(x)                (((x) >> 24) & 0x3)
+#define DB_STENCIL_INFO					0x28044
+#define R_028044_DB_STENCIL_INFO                     0x028044
+#define   S_028044_FORMAT(x)                           (((x) & 0x1) << 0)
+#define   G_028044_FORMAT(x)                           (((x) >> 0) & 0x1)
+#define   C_028044_FORMAT                              0xFFFFFFFE
+#define	    V_028044_STENCIL_INVALID			0
+#define	    V_028044_STENCIL_8				1
+#define   G_028044_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
+#define DB_Z_READ_BASE					0x28048
+#define DB_STENCIL_READ_BASE				0x2804c
+#define DB_Z_WRITE_BASE					0x28050
+#define DB_STENCIL_WRITE_BASE				0x28054
+#define DB_DEPTH_SIZE					0x28058
+#define R_028058_DB_DEPTH_SIZE                       0x028058
+#define   S_028058_PITCH_TILE_MAX(x)                   (((x) & 0x7FF) << 0)
+#define   G_028058_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x7FF)
+#define   C_028058_PITCH_TILE_MAX                      0xFFFFF800
+#define   S_028058_HEIGHT_TILE_MAX(x)                   (((x) & 0x7FF) << 11)
+#define   G_028058_HEIGHT_TILE_MAX(x)                   (((x) >> 11) & 0x7FF)
+#define   C_028058_HEIGHT_TILE_MAX                      0xFFC007FF
+#define R_02805C_DB_DEPTH_SLICE                      0x02805C
+#define   S_02805C_SLICE_TILE_MAX(x)                   (((x) & 0x3FFFFF) << 0)
+#define   G_02805C_SLICE_TILE_MAX(x)                   (((x) >> 0) & 0x3FFFFF)
+#define   C_02805C_SLICE_TILE_MAX                      0xFFC00000
+
+#define SQ_PGM_START_PS					0x28840
+#define SQ_PGM_START_VS					0x2885c
+#define SQ_PGM_START_GS					0x28874
+#define SQ_PGM_START_ES					0x2888c
+#define SQ_PGM_START_FS					0x288a4
+#define SQ_PGM_START_HS					0x288b8
+#define SQ_PGM_START_LS					0x288d0
+
+#define	VGT_STRMOUT_BUFFER_BASE_0			0x28AD8
+#define	VGT_STRMOUT_BUFFER_BASE_1			0x28AE8
+#define	VGT_STRMOUT_BUFFER_BASE_2			0x28AF8
+#define	VGT_STRMOUT_BUFFER_BASE_3			0x28B08
+#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
+#define VGT_STRMOUT_CONFIG				0x28b94
+#define VGT_STRMOUT_BUFFER_CONFIG			0x28b98
+
+#define CB_TARGET_MASK					0x28238
+#define CB_SHADER_MASK					0x2823c
+
+#define GDS_ADDR_BASE					0x28720
+
+#define GDS_APPEND_COUNT_0				0x2872C
+#define GDS_APPEND_COUNT_1				0x28730
+#define GDS_APPEND_COUNT_2				0x28734
+#define GDS_APPEND_COUNT_3				0x28738
+#define GDS_APPEND_COUNT_4				0x2873C
+#define GDS_APPEND_COUNT_5				0x28740
+#define GDS_APPEND_COUNT_6				0x28744
+#define GDS_APPEND_COUNT_7				0x28748
+#define GDS_APPEND_COUNT_8				0x2874c
+#define GDS_APPEND_COUNT_9				0x28750
+#define GDS_APPEND_COUNT_10				0x28754
+#define GDS_APPEND_COUNT_11				0x28758
+
+#define	CB_IMMED0_BASE					0x28b9c
+#define	CB_IMMED1_BASE					0x28ba0
+#define	CB_IMMED2_BASE					0x28ba4
+#define	CB_IMMED3_BASE					0x28ba8
+#define	CB_IMMED4_BASE					0x28bac
+#define	CB_IMMED5_BASE					0x28bb0
+#define	CB_IMMED6_BASE					0x28bb4
+#define	CB_IMMED7_BASE					0x28bb8
+#define	CB_IMMED8_BASE					0x28bbc
+#define	CB_IMMED9_BASE					0x28bc0
+#define	CB_IMMED10_BASE					0x28bc4
+#define	CB_IMMED11_BASE					0x28bc8
+
+/* all 12 CB blocks have these regs */
+#define	CB_COLOR0_BASE					0x28c60
+#define	CB_COLOR0_PITCH					0x28c64
+#define	CB_COLOR0_SLICE					0x28c68
+#define	CB_COLOR0_VIEW					0x28c6c
+#define R_028C6C_CB_COLOR0_VIEW                      0x00028C6C
+#define   S_028C6C_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028C6C_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028C6C_SLICE_START                         0xFFFFF800
+#define   S_028C6C_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028C6C_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028C6C_SLICE_MAX                           0xFF001FFF
+#define R_028C70_CB_COLOR0_INFO                      0x028C70
+#define   S_028C70_ENDIAN(x)                           (((x) & 0x3) << 0)
+#define   G_028C70_ENDIAN(x)                           (((x) >> 0) & 0x3)
+#define   C_028C70_ENDIAN                              0xFFFFFFFC
+#define   S_028C70_FORMAT(x)                           (((x) & 0x3F) << 2)
+#define   G_028C70_FORMAT(x)                           (((x) >> 2) & 0x3F)
+#define   C_028C70_FORMAT                              0xFFFFFF03
+#define     V_028C70_COLOR_INVALID                     0x00000000
+#define     V_028C70_COLOR_8                           0x00000001
+#define     V_028C70_COLOR_4_4                         0x00000002
+#define     V_028C70_COLOR_3_3_2                       0x00000003
+#define     V_028C70_COLOR_16                          0x00000005
+#define     V_028C70_COLOR_16_FLOAT                    0x00000006
+#define     V_028C70_COLOR_8_8                         0x00000007
+#define     V_028C70_COLOR_5_6_5                       0x00000008
+#define     V_028C70_COLOR_6_5_5                       0x00000009
+#define     V_028C70_COLOR_1_5_5_5                     0x0000000A
+#define     V_028C70_COLOR_4_4_4_4                     0x0000000B
+#define     V_028C70_COLOR_5_5_5_1                     0x0000000C
+#define     V_028C70_COLOR_32                          0x0000000D
+#define     V_028C70_COLOR_32_FLOAT                    0x0000000E
+#define     V_028C70_COLOR_16_16                       0x0000000F
+#define     V_028C70_COLOR_16_16_FLOAT                 0x00000010
+#define     V_028C70_COLOR_8_24                        0x00000011
+#define     V_028C70_COLOR_8_24_FLOAT                  0x00000012
+#define     V_028C70_COLOR_24_8                        0x00000013
+#define     V_028C70_COLOR_24_8_FLOAT                  0x00000014
+#define     V_028C70_COLOR_10_11_11                    0x00000015
+#define     V_028C70_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_028C70_COLOR_11_11_10                    0x00000017
+#define     V_028C70_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_028C70_COLOR_2_10_10_10                  0x00000019
+#define     V_028C70_COLOR_8_8_8_8                     0x0000001A
+#define     V_028C70_COLOR_10_10_10_2                  0x0000001B
+#define     V_028C70_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_028C70_COLOR_32_32                       0x0000001D
+#define     V_028C70_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_028C70_COLOR_16_16_16_16                 0x0000001F
+#define     V_028C70_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_028C70_COLOR_32_32_32_32                 0x00000022
+#define     V_028C70_COLOR_32_32_32_32_FLOAT           0x00000023
+#define     V_028C70_COLOR_32_32_32_FLOAT              0x00000030
+#define   S_028C70_ARRAY_MODE(x)                       (((x) & 0xF) << 8)
+#define   G_028C70_ARRAY_MODE(x)                       (((x) >> 8) & 0xF)
+#define   C_028C70_ARRAY_MODE                          0xFFFFF0FF
+#define     V_028C70_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_028C70_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_028C70_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_028C70_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_028C70_NUMBER_TYPE(x)                      (((x) & 0x7) << 12)
+#define   G_028C70_NUMBER_TYPE(x)                      (((x) >> 12) & 0x7)
+#define   C_028C70_NUMBER_TYPE                         0xFFFF8FFF
+#define     V_028C70_NUMBER_UNORM                      0x00000000
+#define     V_028C70_NUMBER_SNORM                      0x00000001
+#define     V_028C70_NUMBER_USCALED                    0x00000002
+#define     V_028C70_NUMBER_SSCALED                    0x00000003
+#define     V_028C70_NUMBER_UINT                       0x00000004
+#define     V_028C70_NUMBER_SINT                       0x00000005
+#define     V_028C70_NUMBER_SRGB                       0x00000006
+#define     V_028C70_NUMBER_FLOAT                      0x00000007
+#define   S_028C70_COMP_SWAP(x)                        (((x) & 0x3) << 15)
+#define   G_028C70_COMP_SWAP(x)                        (((x) >> 15) & 0x3)
+#define   C_028C70_COMP_SWAP                           0xFFFE7FFF
+#define     V_028C70_SWAP_STD                          0x00000000
+#define     V_028C70_SWAP_ALT                          0x00000001
+#define     V_028C70_SWAP_STD_REV                      0x00000002
+#define     V_028C70_SWAP_ALT_REV                      0x00000003
+#define   S_028C70_FAST_CLEAR(x)                       (((x) & 0x1) << 17)
+#define   G_028C70_FAST_CLEAR(x)                       (((x) >> 17) & 0x1)
+#define   C_028C70_FAST_CLEAR                          0xFFFDFFFF
+#define   S_028C70_COMPRESSION(x)                      (((x) & 0x3) << 18)
+#define   G_028C70_COMPRESSION(x)                      (((x) >> 18) & 0x3)
+#define   C_028C70_COMPRESSION                         0xFFF3FFFF
+#define   S_028C70_BLEND_CLAMP(x)                      (((x) & 0x1) << 19)
+#define   G_028C70_BLEND_CLAMP(x)                      (((x) >> 19) & 0x1)
+#define   C_028C70_BLEND_CLAMP                         0xFFF7FFFF
+#define   S_028C70_BLEND_BYPASS(x)                     (((x) & 0x1) << 20)
+#define   G_028C70_BLEND_BYPASS(x)                     (((x) >> 20) & 0x1)
+#define   C_028C70_BLEND_BYPASS                        0xFFEFFFFF
+#define   S_028C70_SIMPLE_FLOAT(x)                     (((x) & 0x1) << 21)
+#define   G_028C70_SIMPLE_FLOAT(x)                     (((x) >> 21) & 0x1)
+#define   C_028C70_SIMPLE_FLOAT                        0xFFDFFFFF
+#define   S_028C70_ROUND_MODE(x)                       (((x) & 0x1) << 22)
+#define   G_028C70_ROUND_MODE(x)                       (((x) >> 22) & 0x1)
+#define   C_028C70_ROUND_MODE                          0xFFBFFFFF
+#define   S_028C70_TILE_COMPACT(x)                     (((x) & 0x1) << 23)
+#define   G_028C70_TILE_COMPACT(x)                     (((x) >> 23) & 0x1)
+#define   C_028C70_TILE_COMPACT                        0xFF7FFFFF
+#define   S_028C70_SOURCE_FORMAT(x)                    (((x) & 0x3) << 24)
+#define   G_028C70_SOURCE_FORMAT(x)                    (((x) >> 24) & 0x3)
+#define   C_028C70_SOURCE_FORMAT                       0xFCFFFFFF
+#define     V_028C70_EXPORT_4C_32BPC                   0x0
+#define     V_028C70_EXPORT_4C_16BPC                   0x1
+#define     V_028C70_EXPORT_2C_32BPC                   0x2 /* Do not use */
+#define   S_028C70_RAT(x)                              (((x) & 0x1) << 26)
+#define   G_028C70_RAT(x)                              (((x) >> 26) & 0x1)
+#define   C_028C70_RAT                                 0xFBFFFFFF
+#define   S_028C70_RESOURCE_TYPE(x)                    (((x) & 0x7) << 27)
+#define   G_028C70_RESOURCE_TYPE(x)                    (((x) >> 27) & 0x7)
+#define   C_028C70_RESOURCE_TYPE                       0xC7FFFFFF
+
+#define	CB_COLOR0_INFO					0x28c70
+#	define CB_FORMAT(x)				((x) << 2)
+#       define CB_ARRAY_MODE(x)                         ((x) << 8)
+#       define ARRAY_LINEAR_GENERAL                     0
+#       define ARRAY_LINEAR_ALIGNED                     1
+#       define ARRAY_1D_TILED_THIN1                     2
+#       define ARRAY_2D_TILED_THIN1                     4
+#	define CB_SOURCE_FORMAT(x)			((x) << 24)
+#	define CB_SF_EXPORT_FULL			0
+#	define CB_SF_EXPORT_NORM			1
+#define R_028C74_CB_COLOR0_ATTRIB                      0x028C74
+#define   S_028C74_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 4)
+#define   G_028C74_NON_DISP_TILING_ORDER(x)            (((x) >> 4) & 0x1)
+#define   C_028C74_NON_DISP_TILING_ORDER               0xFFFFFFEF
+#define   S_028C74_TILE_SPLIT(x)                       (((x) & 0xf) << 5)
+#define   G_028C74_TILE_SPLIT(x)                       (((x) >> 5) & 0xf)
+#define   S_028C74_NUM_BANKS(x)                        (((x) & 0x3) << 10)
+#define   G_028C74_NUM_BANKS(x)                        (((x) >> 10) & 0x3)
+#define   S_028C74_BANK_WIDTH(x)                       (((x) & 0x3) << 13)
+#define   G_028C74_BANK_WIDTH(x)                       (((x) >> 13) & 0x3)
+#define   S_028C74_BANK_HEIGHT(x)                      (((x) & 0x3) << 16)
+#define   G_028C74_BANK_HEIGHT(x)                      (((x) >> 16) & 0x3)
+#define   S_028C74_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 19)
+#define   G_028C74_MACRO_TILE_ASPECT(x)                (((x) >> 19) & 0x3)
+#define	CB_COLOR0_ATTRIB				0x28c74
+#       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+#       define ADDR_SURF_TILE_SPLIT_64B                 0
+#       define ADDR_SURF_TILE_SPLIT_128B                1
+#       define ADDR_SURF_TILE_SPLIT_256B                2
+#       define ADDR_SURF_TILE_SPLIT_512B                3
+#       define ADDR_SURF_TILE_SPLIT_1KB                 4
+#       define ADDR_SURF_TILE_SPLIT_2KB                 5
+#       define ADDR_SURF_TILE_SPLIT_4KB                 6
+#       define CB_NUM_BANKS(x)                          (((x) & 0x3) << 10)
+#       define ADDR_SURF_2_BANK                         0
+#       define ADDR_SURF_4_BANK                         1
+#       define ADDR_SURF_8_BANK                         2
+#       define ADDR_SURF_16_BANK                        3
+#       define CB_BANK_WIDTH(x)                         (((x) & 0x3) << 13)
+#       define ADDR_SURF_BANK_WIDTH_1                   0
+#       define ADDR_SURF_BANK_WIDTH_2                   1
+#       define ADDR_SURF_BANK_WIDTH_4                   2
+#       define ADDR_SURF_BANK_WIDTH_8                   3
+#       define CB_BANK_HEIGHT(x)                        (((x) & 0x3) << 16)
+#       define ADDR_SURF_BANK_HEIGHT_1                  0
+#       define ADDR_SURF_BANK_HEIGHT_2                  1
+#       define ADDR_SURF_BANK_HEIGHT_4                  2
+#       define ADDR_SURF_BANK_HEIGHT_8                  3
+#       define CB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 19)
+#define	CB_COLOR0_DIM					0x28c78
+/* only CB0-7 blocks have these regs */
+#define	CB_COLOR0_CMASK					0x28c7c
+#define	CB_COLOR0_CMASK_SLICE				0x28c80
+#define	CB_COLOR0_FMASK					0x28c84
+#define	CB_COLOR0_FMASK_SLICE				0x28c88
+#define	CB_COLOR0_CLEAR_WORD0				0x28c8c
+#define	CB_COLOR0_CLEAR_WORD1				0x28c90
+#define	CB_COLOR0_CLEAR_WORD2				0x28c94
+#define	CB_COLOR0_CLEAR_WORD3				0x28c98
+
+#define	CB_COLOR1_BASE					0x28c9c
+#define	CB_COLOR2_BASE					0x28cd8
+#define	CB_COLOR3_BASE					0x28d14
+#define	CB_COLOR4_BASE					0x28d50
+#define	CB_COLOR5_BASE					0x28d8c
+#define	CB_COLOR6_BASE					0x28dc8
+#define	CB_COLOR7_BASE					0x28e04
+#define	CB_COLOR8_BASE					0x28e40
+#define	CB_COLOR9_BASE					0x28e5c
+#define	CB_COLOR10_BASE					0x28e78
+#define	CB_COLOR11_BASE					0x28e94
+
+#define	CB_COLOR1_PITCH					0x28ca0
+#define	CB_COLOR2_PITCH					0x28cdc
+#define	CB_COLOR3_PITCH					0x28d18
+#define	CB_COLOR4_PITCH					0x28d54
+#define	CB_COLOR5_PITCH					0x28d90
+#define	CB_COLOR6_PITCH					0x28dcc
+#define	CB_COLOR7_PITCH					0x28e08
+#define	CB_COLOR8_PITCH					0x28e44
+#define	CB_COLOR9_PITCH					0x28e60
+#define	CB_COLOR10_PITCH				0x28e7c
+#define	CB_COLOR11_PITCH				0x28e98
+
+#define	CB_COLOR1_SLICE					0x28ca4
+#define	CB_COLOR2_SLICE					0x28ce0
+#define	CB_COLOR3_SLICE					0x28d1c
+#define	CB_COLOR4_SLICE					0x28d58
+#define	CB_COLOR5_SLICE					0x28d94
+#define	CB_COLOR6_SLICE					0x28dd0
+#define	CB_COLOR7_SLICE					0x28e0c
+#define	CB_COLOR8_SLICE					0x28e48
+#define	CB_COLOR9_SLICE					0x28e64
+#define	CB_COLOR10_SLICE				0x28e80
+#define	CB_COLOR11_SLICE				0x28e9c
+
+#define	CB_COLOR1_VIEW					0x28ca8
+#define	CB_COLOR2_VIEW					0x28ce4
+#define	CB_COLOR3_VIEW					0x28d20
+#define	CB_COLOR4_VIEW					0x28d5c
+#define	CB_COLOR5_VIEW					0x28d98
+#define	CB_COLOR6_VIEW					0x28dd4
+#define	CB_COLOR7_VIEW					0x28e10
+#define	CB_COLOR8_VIEW					0x28e4c
+#define	CB_COLOR9_VIEW					0x28e68
+#define	CB_COLOR10_VIEW					0x28e84
+#define	CB_COLOR11_VIEW					0x28ea0
+
+#define	CB_COLOR1_INFO					0x28cac
+#define	CB_COLOR2_INFO					0x28ce8
+#define	CB_COLOR3_INFO					0x28d24
+#define	CB_COLOR4_INFO					0x28d60
+#define	CB_COLOR5_INFO					0x28d9c
+#define	CB_COLOR6_INFO					0x28dd8
+#define	CB_COLOR7_INFO					0x28e14
+#define	CB_COLOR8_INFO					0x28e50
+#define	CB_COLOR9_INFO					0x28e6c
+#define	CB_COLOR10_INFO					0x28e88
+#define	CB_COLOR11_INFO					0x28ea4
+
+#define	CB_COLOR1_ATTRIB				0x28cb0
+#define	CB_COLOR2_ATTRIB				0x28cec
+#define	CB_COLOR3_ATTRIB				0x28d28
+#define	CB_COLOR4_ATTRIB				0x28d64
+#define	CB_COLOR5_ATTRIB				0x28da0
+#define	CB_COLOR6_ATTRIB				0x28ddc
+#define	CB_COLOR7_ATTRIB				0x28e18
+#define	CB_COLOR8_ATTRIB				0x28e54
+#define	CB_COLOR9_ATTRIB				0x28e70
+#define	CB_COLOR10_ATTRIB				0x28e8c
+#define	CB_COLOR11_ATTRIB				0x28ea8
+
+#define	CB_COLOR1_DIM					0x28cb4
+#define	CB_COLOR2_DIM					0x28cf0
+#define	CB_COLOR3_DIM					0x28d2c
+#define	CB_COLOR4_DIM					0x28d68
+#define	CB_COLOR5_DIM					0x28da4
+#define	CB_COLOR6_DIM					0x28de0
+#define	CB_COLOR7_DIM					0x28e1c
+#define	CB_COLOR8_DIM					0x28e58
+#define	CB_COLOR9_DIM					0x28e74
+#define	CB_COLOR10_DIM					0x28e90
+#define	CB_COLOR11_DIM					0x28eac
+
+#define	CB_COLOR1_CMASK					0x28cb8
+#define	CB_COLOR2_CMASK					0x28cf4
+#define	CB_COLOR3_CMASK					0x28d30
+#define	CB_COLOR4_CMASK					0x28d6c
+#define	CB_COLOR5_CMASK					0x28da8
+#define	CB_COLOR6_CMASK					0x28de4
+#define	CB_COLOR7_CMASK					0x28e20
+
+#define	CB_COLOR1_CMASK_SLICE				0x28cbc
+#define	CB_COLOR2_CMASK_SLICE				0x28cf8
+#define	CB_COLOR3_CMASK_SLICE				0x28d34
+#define	CB_COLOR4_CMASK_SLICE				0x28d70
+#define	CB_COLOR5_CMASK_SLICE				0x28dac
+#define	CB_COLOR6_CMASK_SLICE				0x28de8
+#define	CB_COLOR7_CMASK_SLICE				0x28e24
+
+#define	CB_COLOR1_FMASK					0x28cc0
+#define	CB_COLOR2_FMASK					0x28cfc
+#define	CB_COLOR3_FMASK					0x28d38
+#define	CB_COLOR4_FMASK					0x28d74
+#define	CB_COLOR5_FMASK					0x28db0
+#define	CB_COLOR6_FMASK					0x28dec
+#define	CB_COLOR7_FMASK					0x28e28
+
+#define	CB_COLOR1_FMASK_SLICE				0x28cc4
+#define	CB_COLOR2_FMASK_SLICE				0x28d00
+#define	CB_COLOR3_FMASK_SLICE				0x28d3c
+#define	CB_COLOR4_FMASK_SLICE				0x28d78
+#define	CB_COLOR5_FMASK_SLICE				0x28db4
+#define	CB_COLOR6_FMASK_SLICE				0x28df0
+#define	CB_COLOR7_FMASK_SLICE				0x28e2c
+
+#define	CB_COLOR1_CLEAR_WORD0				0x28cc8
+#define	CB_COLOR2_CLEAR_WORD0				0x28d04
+#define	CB_COLOR3_CLEAR_WORD0				0x28d40
+#define	CB_COLOR4_CLEAR_WORD0				0x28d7c
+#define	CB_COLOR5_CLEAR_WORD0				0x28db8
+#define	CB_COLOR6_CLEAR_WORD0				0x28df4
+#define	CB_COLOR7_CLEAR_WORD0				0x28e30
+
+#define	CB_COLOR1_CLEAR_WORD1				0x28ccc
+#define	CB_COLOR2_CLEAR_WORD1				0x28d08
+#define	CB_COLOR3_CLEAR_WORD1				0x28d44
+#define	CB_COLOR4_CLEAR_WORD1				0x28d80
+#define	CB_COLOR5_CLEAR_WORD1				0x28dbc
+#define	CB_COLOR6_CLEAR_WORD1				0x28df8
+#define	CB_COLOR7_CLEAR_WORD1				0x28e34
+
+#define	CB_COLOR1_CLEAR_WORD2				0x28cd0
+#define	CB_COLOR2_CLEAR_WORD2				0x28d0c
+#define	CB_COLOR3_CLEAR_WORD2				0x28d48
+#define	CB_COLOR4_CLEAR_WORD2				0x28d84
+#define	CB_COLOR5_CLEAR_WORD2				0x28dc0
+#define	CB_COLOR6_CLEAR_WORD2				0x28dfc
+#define	CB_COLOR7_CLEAR_WORD2				0x28e38
+
+#define	CB_COLOR1_CLEAR_WORD3				0x28cd4
+#define	CB_COLOR2_CLEAR_WORD3				0x28d10
+#define	CB_COLOR3_CLEAR_WORD3				0x28d4c
+#define	CB_COLOR4_CLEAR_WORD3				0x28d88
+#define	CB_COLOR5_CLEAR_WORD3				0x28dc4
+#define	CB_COLOR6_CLEAR_WORD3				0x28e00
+#define	CB_COLOR7_CLEAR_WORD3				0x28e3c
+
+#define SQ_TEX_RESOURCE_WORD0_0                         0x30000
+#	define TEX_DIM(x)				((x) << 0)
+#	define SQ_TEX_DIM_1D				0
+#	define SQ_TEX_DIM_2D				1
+#	define SQ_TEX_DIM_3D				2
+#	define SQ_TEX_DIM_CUBEMAP			3
+#	define SQ_TEX_DIM_1D_ARRAY			4
+#	define SQ_TEX_DIM_2D_ARRAY			5
+#	define SQ_TEX_DIM_2D_MSAA			6
+#	define SQ_TEX_DIM_2D_ARRAY_MSAA			7
+#define SQ_TEX_RESOURCE_WORD1_0                         0x30004
+#       define TEX_ARRAY_MODE(x)                        ((x) << 28)
+#define SQ_TEX_RESOURCE_WORD2_0                         0x30008
+#define SQ_TEX_RESOURCE_WORD3_0                         0x3000C
+#define SQ_TEX_RESOURCE_WORD4_0                         0x30010
+#	define TEX_DST_SEL_X(x)				((x) << 16)
+#	define TEX_DST_SEL_Y(x)				((x) << 19)
+#	define TEX_DST_SEL_Z(x)				((x) << 22)
+#	define TEX_DST_SEL_W(x)				((x) << 25)
+#	define SQ_SEL_X					0
+#	define SQ_SEL_Y					1
+#	define SQ_SEL_Z					2
+#	define SQ_SEL_W					3
+#	define SQ_SEL_0					4
+#	define SQ_SEL_1					5
+#define SQ_TEX_RESOURCE_WORD5_0                         0x30014
+#define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+#       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
+#define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
+#       define MACRO_TILE_ASPECT(x)                     (((x) & 0x3) << 6)
+#       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+#       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+#       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
+#define R_030000_SQ_TEX_RESOURCE_WORD0_0             0x030000
+#define   S_030000_DIM(x)                              (((x) & 0x7) << 0)
+#define   G_030000_DIM(x)                              (((x) >> 0) & 0x7)
+#define   C_030000_DIM                                 0xFFFFFFF8
+#define     V_030000_SQ_TEX_DIM_1D                     0x00000000
+#define     V_030000_SQ_TEX_DIM_2D                     0x00000001
+#define     V_030000_SQ_TEX_DIM_3D                     0x00000002
+#define     V_030000_SQ_TEX_DIM_CUBEMAP                0x00000003
+#define     V_030000_SQ_TEX_DIM_1D_ARRAY               0x00000004
+#define     V_030000_SQ_TEX_DIM_2D_ARRAY               0x00000005
+#define     V_030000_SQ_TEX_DIM_2D_MSAA                0x00000006
+#define     V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA          0x00000007
+#define   S_030000_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 5)
+#define   G_030000_NON_DISP_TILING_ORDER(x)            (((x) >> 5) & 0x1)
+#define   C_030000_NON_DISP_TILING_ORDER               0xFFFFFFDF
+#define   S_030000_PITCH(x)                            (((x) & 0xFFF) << 6)
+#define   G_030000_PITCH(x)                            (((x) >> 6) & 0xFFF)
+#define   C_030000_PITCH                               0xFFFC003F
+#define   S_030000_TEX_WIDTH(x)                        (((x) & 0x3FFF) << 18)
+#define   G_030000_TEX_WIDTH(x)                        (((x) >> 18) & 0x3FFF)
+#define   C_030000_TEX_WIDTH                           0x0003FFFF
+#define R_030004_SQ_TEX_RESOURCE_WORD1_0             0x030004
+#define   S_030004_TEX_HEIGHT(x)                       (((x) & 0x3FFF) << 0)
+#define   G_030004_TEX_HEIGHT(x)                       (((x) >> 0) & 0x3FFF)
+#define   C_030004_TEX_HEIGHT                          0xFFFFC000
+#define   S_030004_TEX_DEPTH(x)                        (((x) & 0x1FFF) << 14)
+#define   G_030004_TEX_DEPTH(x)                        (((x) >> 14) & 0x1FFF)
+#define   C_030004_TEX_DEPTH                           0xF8003FFF
+#define   S_030004_ARRAY_MODE(x)                       (((x) & 0xF) << 28)
+#define   G_030004_ARRAY_MODE(x)                       (((x) >> 28) & 0xF)
+#define   C_030004_ARRAY_MODE                          0x0FFFFFFF
+#define R_030008_SQ_TEX_RESOURCE_WORD2_0             0x030008
+#define   S_030008_BASE_ADDRESS(x)                     (((x) & 0xFFFFFFFF) << 0)
+#define   G_030008_BASE_ADDRESS(x)                     (((x) >> 0) & 0xFFFFFFFF)
+#define   C_030008_BASE_ADDRESS                        0x00000000
+#define R_03000C_SQ_TEX_RESOURCE_WORD3_0             0x03000C
+#define   S_03000C_MIP_ADDRESS(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_03000C_MIP_ADDRESS(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_03000C_MIP_ADDRESS                         0x00000000
+#define R_030010_SQ_TEX_RESOURCE_WORD4_0             0x030010
+#define   S_030010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
+#define   G_030010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
+#define   C_030010_FORMAT_COMP_X                       0xFFFFFFFC
+#define     V_030010_SQ_FORMAT_COMP_UNSIGNED           0x00000000
+#define     V_030010_SQ_FORMAT_COMP_SIGNED             0x00000001
+#define     V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED    0x00000002
+#define   S_030010_FORMAT_COMP_Y(x)                    (((x) & 0x3) << 2)
+#define   G_030010_FORMAT_COMP_Y(x)                    (((x) >> 2) & 0x3)
+#define   C_030010_FORMAT_COMP_Y                       0xFFFFFFF3
+#define   S_030010_FORMAT_COMP_Z(x)                    (((x) & 0x3) << 4)
+#define   G_030010_FORMAT_COMP_Z(x)                    (((x) >> 4) & 0x3)
+#define   C_030010_FORMAT_COMP_Z                       0xFFFFFFCF
+#define   S_030010_FORMAT_COMP_W(x)                    (((x) & 0x3) << 6)
+#define   G_030010_FORMAT_COMP_W(x)                    (((x) >> 6) & 0x3)
+#define   C_030010_FORMAT_COMP_W                       0xFFFFFF3F
+#define   S_030010_NUM_FORMAT_ALL(x)                   (((x) & 0x3) << 8)
+#define   G_030010_NUM_FORMAT_ALL(x)                   (((x) >> 8) & 0x3)
+#define   C_030010_NUM_FORMAT_ALL                      0xFFFFFCFF
+#define     V_030010_SQ_NUM_FORMAT_NORM                0x00000000
+#define     V_030010_SQ_NUM_FORMAT_INT                 0x00000001
+#define     V_030010_SQ_NUM_FORMAT_SCALED              0x00000002
+#define   S_030010_SRF_MODE_ALL(x)                     (((x) & 0x1) << 10)
+#define   G_030010_SRF_MODE_ALL(x)                     (((x) >> 10) & 0x1)
+#define   C_030010_SRF_MODE_ALL                        0xFFFFFBFF
+#define     V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE     0x00000000
+#define     V_030010_SRF_MODE_NO_ZERO                  0x00000001
+#define   S_030010_FORCE_DEGAMMA(x)                    (((x) & 0x1) << 11)
+#define   G_030010_FORCE_DEGAMMA(x)                    (((x) >> 11) & 0x1)
+#define   C_030010_FORCE_DEGAMMA                       0xFFFFF7FF
+#define   S_030010_ENDIAN_SWAP(x)                      (((x) & 0x3) << 12)
+#define   G_030010_ENDIAN_SWAP(x)                      (((x) >> 12) & 0x3)
+#define   C_030010_ENDIAN_SWAP                         0xFFFFCFFF
+#define   S_030010_DST_SEL_X(x)                        (((x) & 0x7) << 16)
+#define   G_030010_DST_SEL_X(x)                        (((x) >> 16) & 0x7)
+#define   C_030010_DST_SEL_X                           0xFFF8FFFF
+#define     V_030010_SQ_SEL_X                          0x00000000
+#define     V_030010_SQ_SEL_Y                          0x00000001
+#define     V_030010_SQ_SEL_Z                          0x00000002
+#define     V_030010_SQ_SEL_W                          0x00000003
+#define     V_030010_SQ_SEL_0                          0x00000004
+#define     V_030010_SQ_SEL_1                          0x00000005
+#define   S_030010_DST_SEL_Y(x)                        (((x) & 0x7) << 19)
+#define   G_030010_DST_SEL_Y(x)                        (((x) >> 19) & 0x7)
+#define   C_030010_DST_SEL_Y                           0xFFC7FFFF
+#define   S_030010_DST_SEL_Z(x)                        (((x) & 0x7) << 22)
+#define   G_030010_DST_SEL_Z(x)                        (((x) >> 22) & 0x7)
+#define   C_030010_DST_SEL_Z                           0xFE3FFFFF
+#define   S_030010_DST_SEL_W(x)                        (((x) & 0x7) << 25)
+#define   G_030010_DST_SEL_W(x)                        (((x) >> 25) & 0x7)
+#define   C_030010_DST_SEL_W                           0xF1FFFFFF
+#define   S_030010_BASE_LEVEL(x)                       (((x) & 0xF) << 28)
+#define   G_030010_BASE_LEVEL(x)                       (((x) >> 28) & 0xF)
+#define   C_030010_BASE_LEVEL                          0x0FFFFFFF
+#define R_030014_SQ_TEX_RESOURCE_WORD5_0             0x030014
+#define   S_030014_LAST_LEVEL(x)                       (((x) & 0xF) << 0)
+#define   G_030014_LAST_LEVEL(x)                       (((x) >> 0) & 0xF)
+#define   C_030014_LAST_LEVEL                          0xFFFFFFF0
+#define   S_030014_BASE_ARRAY(x)                       (((x) & 0x1FFF) << 4)
+#define   G_030014_BASE_ARRAY(x)                       (((x) >> 4) & 0x1FFF)
+#define   C_030014_BASE_ARRAY                          0xFFFE000F
+#define   S_030014_LAST_ARRAY(x)                       (((x) & 0x1FFF) << 17)
+#define   G_030014_LAST_ARRAY(x)                       (((x) >> 17) & 0x1FFF)
+#define   C_030014_LAST_ARRAY                          0xC001FFFF
+#define R_030018_SQ_TEX_RESOURCE_WORD6_0             0x030018
+#define   S_030018_MAX_ANISO(x)                        (((x) & 0x7) << 0)
+#define   G_030018_MAX_ANISO(x)                        (((x) >> 0) & 0x7)
+#define   C_030018_MAX_ANISO                           0xFFFFFFF8
+#define   S_030018_PERF_MODULATION(x)                  (((x) & 0x7) << 3)
+#define   G_030018_PERF_MODULATION(x)                  (((x) >> 3) & 0x7)
+#define   C_030018_PERF_MODULATION                     0xFFFFFFC7
+#define   S_030018_INTERLACED(x)                       (((x) & 0x1) << 6)
+#define   G_030018_INTERLACED(x)                       (((x) >> 6) & 0x1)
+#define   C_030018_INTERLACED                          0xFFFFFFBF
+#define   S_030018_TILE_SPLIT(x)                       (((x) & 0x7) << 29)
+#define   G_030018_TILE_SPLIT(x)                       (((x) >> 29) & 0x7)
+#define R_03001C_SQ_TEX_RESOURCE_WORD7_0             0x03001C
+#define   S_03001C_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 6)
+#define   G_03001C_MACRO_TILE_ASPECT(x)                (((x) >> 6) & 0x3)
+#define   S_03001C_BANK_WIDTH(x)                       (((x) & 0x3) << 8)
+#define   G_03001C_BANK_WIDTH(x)                       (((x) >> 8) & 0x3)
+#define   S_03001C_BANK_HEIGHT(x)                      (((x) & 0x3) << 10)
+#define   G_03001C_BANK_HEIGHT(x)                      (((x) >> 10) & 0x3)
+#define   S_03001C_NUM_BANKS(x)                        (((x) & 0x3) << 16)
+#define   G_03001C_NUM_BANKS(x)                        (((x) >> 16) & 0x3)
+#define   S_03001C_TYPE(x)                             (((x) & 0x3) << 30)
+#define   G_03001C_TYPE(x)                             (((x) >> 30) & 0x3)
+#define   C_03001C_TYPE                                0x3FFFFFFF
+#define     V_03001C_SQ_TEX_VTX_INVALID_TEXTURE        0x00000000
+#define     V_03001C_SQ_TEX_VTX_INVALID_BUFFER         0x00000001
+#define     V_03001C_SQ_TEX_VTX_VALID_TEXTURE          0x00000002
+#define     V_03001C_SQ_TEX_VTX_VALID_BUFFER           0x00000003
+#define   S_03001C_DATA_FORMAT(x)                      (((x) & 0x3F) << 0)
+#define   G_03001C_DATA_FORMAT(x)                      (((x) >> 0) & 0x3F)
+#define   C_03001C_DATA_FORMAT                         0xFFFFFFC0
+
+#define SQ_VTX_CONSTANT_WORD0_0				0x30000
+#define SQ_VTX_CONSTANT_WORD1_0				0x30004
+#define SQ_VTX_CONSTANT_WORD2_0				0x30008
+#	define SQ_VTXC_BASE_ADDR_HI(x)			((x) << 0)
+#	define SQ_VTXC_STRIDE(x)			((x) << 8)
+#	define SQ_VTXC_ENDIAN_SWAP(x)			((x) << 30)
+#	define SQ_ENDIAN_NONE				0
+#	define SQ_ENDIAN_8IN16				1
+#	define SQ_ENDIAN_8IN32				2
+#define SQ_VTX_CONSTANT_WORD3_0				0x3000C
+#	define SQ_VTCX_SEL_X(x)				((x) << 3)
+#	define SQ_VTCX_SEL_Y(x)				((x) << 6)
+#	define SQ_VTCX_SEL_Z(x)				((x) << 9)
+#	define SQ_VTCX_SEL_W(x)				((x) << 12)
+#define SQ_VTX_CONSTANT_WORD4_0				0x30010
+#define SQ_VTX_CONSTANT_WORD5_0                         0x30014
+#define SQ_VTX_CONSTANT_WORD6_0                         0x30018
+#define SQ_VTX_CONSTANT_WORD7_0                         0x3001c
+
+#define TD_PS_BORDER_COLOR_INDEX                        0xA400
+#define TD_PS_BORDER_COLOR_RED                          0xA404
+#define TD_PS_BORDER_COLOR_GREEN                        0xA408
+#define TD_PS_BORDER_COLOR_BLUE                         0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA                        0xA410
+#define TD_VS_BORDER_COLOR_INDEX                        0xA414
+#define TD_VS_BORDER_COLOR_RED                          0xA418
+#define TD_VS_BORDER_COLOR_GREEN                        0xA41C
+#define TD_VS_BORDER_COLOR_BLUE                         0xA420
+#define TD_VS_BORDER_COLOR_ALPHA                        0xA424
+#define TD_GS_BORDER_COLOR_INDEX                        0xA428
+#define TD_GS_BORDER_COLOR_RED                          0xA42C
+#define TD_GS_BORDER_COLOR_GREEN                        0xA430
+#define TD_GS_BORDER_COLOR_BLUE                         0xA434
+#define TD_GS_BORDER_COLOR_ALPHA                        0xA438
+#define TD_HS_BORDER_COLOR_INDEX                        0xA43C
+#define TD_HS_BORDER_COLOR_RED                          0xA440
+#define TD_HS_BORDER_COLOR_GREEN                        0xA444
+#define TD_HS_BORDER_COLOR_BLUE                         0xA448
+#define TD_HS_BORDER_COLOR_ALPHA                        0xA44C
+#define TD_LS_BORDER_COLOR_INDEX                        0xA450
+#define TD_LS_BORDER_COLOR_RED                          0xA454
+#define TD_LS_BORDER_COLOR_GREEN                        0xA458
+#define TD_LS_BORDER_COLOR_BLUE                         0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA                        0xA460
+#define TD_CS_BORDER_COLOR_INDEX                        0xA464
+#define TD_CS_BORDER_COLOR_RED                          0xA468
+#define TD_CS_BORDER_COLOR_GREEN                        0xA46C
+#define TD_CS_BORDER_COLOR_BLUE                         0xA470
+#define TD_CS_BORDER_COLOR_ALPHA                        0xA474
+
+/* cayman 3D regs */
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE			0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS			0x8E48
+#define CAYMAN_DB_EQAA					0x28804
+#define CAYMAN_DB_DEPTH_INFO				0x2803C
+#define CAYMAN_PA_SC_AA_CONFIG				0x28BE0
+#define         CAYMAN_MSAA_NUM_SAMPLES_SHIFT           0
+#define         CAYMAN_MSAA_NUM_SAMPLES_MASK            0x7
+#define CAYMAN_SX_SCATTER_EXPORT_BASE			0x28358
+/* cayman packet3 addition */
+#define	CAYMAN_PACKET3_DEALLOC_STATE			0x14
+
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
new file mode 100644
index 0000000..f055d6e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -0,0 +1,2899 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "kv_dpm.h"
+#include "radeon_asic.h"
+#include <linux/seq_file.h>
+
+#define KV_MAX_DEEPSLEEP_DIVIDER_ID     5
+#define KV_MINIMUM_ENGINE_CLOCK         800
+#define SMC_RAM_END                     0x40000
+
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+			    bool enable);
+static void kv_init_graphics_levels(struct radeon_device *rdev);
+static int kv_calculate_ds_divider(struct radeon_device *rdev);
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
+static int kv_calculate_dpm_settings(struct radeon_device *rdev);
+static void kv_enable_new_levels(struct radeon_device *rdev);
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+					   struct radeon_ps *new_rps);
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
+static int kv_set_enabled_levels(struct radeon_device *rdev);
+static int kv_force_dpm_highest(struct radeon_device *rdev);
+static int kv_force_dpm_lowest(struct radeon_device *rdev);
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+					struct radeon_ps *new_rps,
+					struct radeon_ps *old_rps);
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+					    int min_temp, int max_temp);
+static int kv_init_fps_limits(struct radeon_device *rdev);
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
+
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+			  u32 block, bool enable);
+
+static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
+{
+	{  0,       4,        1    },
+	{  1,       4,        1    },
+	{  2,       5,        1    },
+	{  3,       4,        2    },
+	{  4,       1,        1    },
+	{  5,       5,        2    },
+	{  6,       6,        1    },
+	{  7,       9,        2    },
+	{ 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
+{
+	{  0,       4,        1    },
+	{ 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
+{
+	{  0,       4,        1    },
+	{ 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
+{
+	{  0,       4,        1    },
+	{ 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
+{
+	{  0,       4,        1    },
+	{ 0xffffffff }
+};
+
+static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
+{
+	{  0,       4,        1    },
+	{  1,       4,        1    },
+	{  2,       5,        1    },
+	{  3,       4,        1    },
+	{  4,       1,        1    },
+	{  5,       5,        1    },
+	{  6,       6,        1    },
+	{  7,       9,        1    },
+	{  8,       4,        1    },
+	{  9,       2,        1    },
+	{  10,      3,        1    },
+	{  11,      6,        1    },
+	{  12,      8,        2    },
+	{  13,      1,        1    },
+	{  14,      2,        1    },
+	{  15,      3,        1    },
+	{  16,      1,        1    },
+	{  17,      4,        1    },
+	{  18,      3,        1    },
+	{  19,      1,        1    },
+	{  20,      8,        1    },
+	{  21,      5,        1    },
+	{  22,      1,        1    },
+	{  23,      1,        1    },
+	{  24,      4,        1    },
+	{  27,      6,        1    },
+	{  28,      1,        1    },
+	{ 0xffffffff }
+};
+
+static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
+{
+	{ 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
+{
+	{ 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
+{
+	{ 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
+{
+	{ 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
+{
+	{ 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
+{
+	{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_pt_config_reg didt_config_kv[] =
+{
+	{ 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+	{ 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+	{ 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+	{ 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+	{ 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+	{ 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+	{ 0xFFFFFFFF }
+};
+
+static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
+{
+	struct kv_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+#if 0
+static void kv_program_local_cac_table(struct radeon_device *rdev,
+				       const struct kv_lcac_config_values *local_cac_table,
+				       const struct kv_lcac_config_reg *local_cac_reg)
+{
+	u32 i, count, data;
+	const struct kv_lcac_config_values *values = local_cac_table;
+
+	while (values->block_id != 0xffffffff) {
+		count = values->signal_id;
+		for (i = 0; i < count; i++) {
+			data = ((values->block_id << local_cac_reg->block_shift) &
+				local_cac_reg->block_mask);
+			data |= ((i << local_cac_reg->signal_shift) &
+				 local_cac_reg->signal_mask);
+			data |= ((values->t << local_cac_reg->t_shift) &
+				 local_cac_reg->t_mask);
+			data |= ((1 << local_cac_reg->enable_shift) &
+				 local_cac_reg->enable_mask);
+			WREG32_SMC(local_cac_reg->cntl, data);
+		}
+		values++;
+	}
+}
+#endif
+
+static int kv_program_pt_config_registers(struct radeon_device *rdev,
+					  const struct kv_pt_config_reg *cac_config_regs)
+{
+	const struct kv_pt_config_reg *config_regs = cac_config_regs;
+	u32 data;
+	u32 cache = 0;
+
+	if (config_regs == NULL)
+		return -EINVAL;
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		if (config_regs->type == KV_CONFIGREG_CACHE) {
+			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+		} else {
+			switch (config_regs->type) {
+			case KV_CONFIGREG_SMC_IND:
+				data = RREG32_SMC(config_regs->offset);
+				break;
+			case KV_CONFIGREG_DIDT_IND:
+				data = RREG32_DIDT(config_regs->offset);
+				break;
+			default:
+				data = RREG32(config_regs->offset << 2);
+				break;
+			}
+
+			data &= ~config_regs->mask;
+			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+			data |= cache;
+			cache = 0;
+
+			switch (config_regs->type) {
+			case KV_CONFIGREG_SMC_IND:
+				WREG32_SMC(config_regs->offset, data);
+				break;
+			case KV_CONFIGREG_DIDT_IND:
+				WREG32_DIDT(config_regs->offset, data);
+				break;
+			default:
+				WREG32(config_regs->offset << 2, data);
+				break;
+			}
+		}
+		config_regs++;
+	}
+
+	return 0;
+}
+
+static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 data;
+
+	if (pi->caps_sq_ramping) {
+		data = RREG32_DIDT(DIDT_SQ_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_SQ_CTRL0, data);
+	}
+
+	if (pi->caps_db_ramping) {
+		data = RREG32_DIDT(DIDT_DB_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_DB_CTRL0, data);
+	}
+
+	if (pi->caps_td_ramping) {
+		data = RREG32_DIDT(DIDT_TD_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_TD_CTRL0, data);
+	}
+
+	if (pi->caps_tcp_ramping) {
+		data = RREG32_DIDT(DIDT_TCP_CTRL0);
+		if (enable)
+			data |= DIDT_CTRL_EN;
+		else
+			data &= ~DIDT_CTRL_EN;
+		WREG32_DIDT(DIDT_TCP_CTRL0, data);
+	}
+}
+
+static int kv_enable_didt(struct radeon_device *rdev, bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	if (pi->caps_sq_ramping ||
+	    pi->caps_db_ramping ||
+	    pi->caps_td_ramping ||
+	    pi->caps_tcp_ramping) {
+		cik_enter_rlc_safe_mode(rdev);
+
+		if (enable) {
+			ret = kv_program_pt_config_registers(rdev, didt_config_kv);
+			if (ret) {
+				cik_exit_rlc_safe_mode(rdev);
+				return ret;
+			}
+		}
+
+		kv_do_enable_didt(rdev, enable);
+
+		cik_exit_rlc_safe_mode(rdev);
+	}
+
+	return 0;
+}
+
+#if 0
+static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (pi->caps_cac) {
+		WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
+		WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
+		kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
+
+		WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
+		WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
+		kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
+
+		WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
+		WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
+		kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
+
+		WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
+		WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
+		kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
+
+		WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
+		WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
+		kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
+
+		WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
+		WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
+		kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
+	}
+}
+#endif
+
+static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret = 0;
+
+	if (pi->caps_cac) {
+		if (enable) {
+			ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
+			if (ret)
+				pi->cac_enabled = false;
+			else
+				pi->cac_enabled = true;
+		} else if (pi->cac_enabled) {
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
+			pi->cac_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+static int kv_process_firmware_header(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, DpmTable),
+				     &tmp, pi->sram_end);
+
+	if (ret == 0)
+		pi->dpm_table_start = tmp;
+
+	ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, SoftRegisters),
+				     &tmp, pi->sram_end);
+
+	if (ret == 0)
+		pi->soft_regs_start = tmp;
+
+	return ret;
+}
+
+static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	pi->graphics_voltage_change_enable = 1;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
+				   &pi->graphics_voltage_change_enable,
+				   sizeof(u8), pi->sram_end);
+
+	return ret;
+}
+
+static int kv_set_dpm_interval(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	pi->graphics_interval = 1;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
+				   &pi->graphics_interval,
+				   sizeof(u8), pi->sram_end);
+
+	return ret;
+}
+
+static int kv_set_dpm_boot_state(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
+				   &pi->graphics_boot_level,
+				   sizeof(u8), pi->sram_end);
+
+	return ret;
+}
+
+static void kv_program_vc(struct radeon_device *rdev)
+{
+	WREG32_SMC(CG_FTV_0, 0x3FFFC100);
+}
+
+static void kv_clear_vc(struct radeon_device *rdev)
+{
+	WREG32_SMC(CG_FTV_0, 0);
+}
+
+static int kv_set_divider_value(struct radeon_device *rdev,
+				u32 index, u32 sclk)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     sclk, false, &dividers);
+	if (ret)
+		return ret;
+
+	pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
+	pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
+
+	return 0;
+}
+
+static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
+				   struct sumo_vid_mapping_table *vid_mapping_table,
+				   u32 vid_2bit)
+{
+	struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	u32 i;
+
+	if (vddc_sclk_table && vddc_sclk_table->count) {
+		if (vid_2bit < vddc_sclk_table->count)
+			return vddc_sclk_table->entries[vid_2bit].v;
+		else
+			return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
+	} else {
+		for (i = 0; i < vid_mapping_table->num_entries; i++) {
+			if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+				return vid_mapping_table->entries[i].vid_7bit;
+		}
+		return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
+	}
+}
+
+static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
+				   struct sumo_vid_mapping_table *vid_mapping_table,
+				   u32 vid_7bit)
+{
+	struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	u32 i;
+
+	if (vddc_sclk_table && vddc_sclk_table->count) {
+		for (i = 0; i < vddc_sclk_table->count; i++) {
+			if (vddc_sclk_table->entries[i].v == vid_7bit)
+				return i;
+		}
+		return vddc_sclk_table->count - 1;
+	} else {
+		for (i = 0; i < vid_mapping_table->num_entries; i++) {
+			if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+				return vid_mapping_table->entries[i].vid_2bit;
+		}
+
+		return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+	}
+}
+
+static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
+					    u16 voltage)
+{
+	return 6200 - (voltage * 25);
+}
+
+static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
+					    u32 vid_2bit)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
+					       &pi->sys_info.vid_mapping_table,
+					       vid_2bit);
+
+	return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
+}
+
+
+static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
+	pi->graphics_level[index].MinVddNb =
+		cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
+
+	return 0;
+}
+
+static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->graphics_level[index].AT = cpu_to_be16((u16)at);
+
+	return 0;
+}
+
+static void kv_dpm_power_level_enable(struct radeon_device *rdev,
+				      u32 index, bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
+}
+
+static void kv_start_dpm(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+	tmp |= GLOBAL_PWRMGT_EN;
+	WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+	kv_smc_dpm_enable(rdev, true);
+}
+
+static void kv_stop_dpm(struct radeon_device *rdev)
+{
+	kv_smc_dpm_enable(rdev, false);
+}
+
+static void kv_start_am(struct radeon_device *rdev)
+{
+	u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+	sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+	sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
+
+	WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static void kv_reset_am(struct radeon_device *rdev)
+{
+	u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+	sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+
+	WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
+{
+	return kv_notify_message_to_smu(rdev, freeze ?
+					PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+}
+
+static int kv_force_lowest_valid(struct radeon_device *rdev)
+{
+	return kv_force_dpm_lowest(rdev);
+}
+
+static int kv_unforce_levels(struct radeon_device *rdev)
+{
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+	else
+		return kv_set_enabled_levels(rdev);
+}
+
+static int kv_update_sclk_t(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 low_sclk_interrupt_t = 0;
+	int ret = 0;
+
+	if (pi->caps_sclk_throttle_low_notification) {
+		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+		ret = kv_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
+					   (u8 *)&low_sclk_interrupt_t,
+					   sizeof(u32), pi->sram_end);
+	}
+	return ret;
+}
+
+static int kv_program_bootup_state(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+	if (table && table->count) {
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+			if (table->entries[i].clk == pi->boot_pl.sclk)
+				break;
+		}
+
+		pi->graphics_boot_level = (u8)i;
+		kv_dpm_power_level_enable(rdev, i, true);
+	} else {
+		struct sumo_sclk_voltage_mapping_table *table =
+			&pi->sys_info.sclk_voltage_mapping_table;
+
+		if (table->num_max_dpm_entries == 0)
+			return -EINVAL;
+
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+			if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
+				break;
+		}
+
+		pi->graphics_boot_level = (u8)i;
+		kv_dpm_power_level_enable(rdev, i, true);
+	}
+	return 0;
+}
+
+static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	pi->graphics_therm_throttle_enable = 1;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
+				   &pi->graphics_therm_throttle_enable,
+				   sizeof(u8), pi->sram_end);
+
+	return ret;
+}
+
+static int kv_upload_dpm_settings(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
+				   (u8 *)&pi->graphics_level,
+				   sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
+				   pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
+				   &pi->graphics_dpm_level_count,
+				   sizeof(u8), pi->sram_end);
+
+	return ret;
+}
+
+static u32 kv_get_clock_difference(u32 a, u32 b)
+{
+	return (a >= b) ? a - b : b - a;
+}
+
+static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 value;
+
+	if (pi->caps_enable_dfs_bypass) {
+		if (kv_get_clock_difference(clk, 40000) < 200)
+			value = 3;
+		else if (kv_get_clock_difference(clk, 30000) < 200)
+			value = 2;
+		else if (kv_get_clock_difference(clk, 20000) < 200)
+			value = 7;
+		else if (kv_get_clock_difference(clk, 15000) < 200)
+			value = 6;
+		else if (kv_get_clock_difference(clk, 10000) < 200)
+			value = 8;
+		else
+			value = 0;
+	} else {
+		value = 0;
+	}
+
+	return value;
+}
+
+static int kv_populate_uvd_table(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_uvd_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+	struct atom_clock_dividers dividers;
+	int ret;
+	u32 i;
+
+	if (table == NULL || table->count == 0)
+		return 0;
+
+	pi->uvd_level_count = 0;
+	for (i = 0; i < table->count; i++) {
+		if (pi->high_voltage_t &&
+		    (pi->high_voltage_t < table->entries[i].v))
+			break;
+
+		pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
+		pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
+		pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
+
+		pi->uvd_level[i].VClkBypassCntl =
+			(u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
+		pi->uvd_level[i].DClkBypassCntl =
+			(u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
+
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     table->entries[i].vclk, false, &dividers);
+		if (ret)
+			return ret;
+		pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
+
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     table->entries[i].dclk, false, &dividers);
+		if (ret)
+			return ret;
+		pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
+
+		pi->uvd_level_count++;
+	}
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
+				   (u8 *)&pi->uvd_level_count,
+				   sizeof(u8), pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->uvd_interval = 1;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, UVDInterval),
+				   &pi->uvd_interval,
+				   sizeof(u8), pi->sram_end);
+	if (ret)
+		return ret;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, UvdLevel),
+				   (u8 *)&pi->uvd_level,
+				   sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
+				   pi->sram_end);
+
+	return ret;
+
+}
+
+static int kv_populate_vce_table(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+	u32 i;
+	struct radeon_vce_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+	struct atom_clock_dividers dividers;
+
+	if (table == NULL || table->count == 0)
+		return 0;
+
+	pi->vce_level_count = 0;
+	for (i = 0; i < table->count; i++) {
+		if (pi->high_voltage_t &&
+		    pi->high_voltage_t < table->entries[i].v)
+			break;
+
+		pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
+		pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+		pi->vce_level[i].ClkBypassCntl =
+			(u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
+
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     table->entries[i].evclk, false, &dividers);
+		if (ret)
+			return ret;
+		pi->vce_level[i].Divider = (u8)dividers.post_div;
+
+		pi->vce_level_count++;
+	}
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
+				   (u8 *)&pi->vce_level_count,
+				   sizeof(u8),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->vce_interval = 1;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, VCEInterval),
+				   (u8 *)&pi->vce_interval,
+				   sizeof(u8),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, VceLevel),
+				   (u8 *)&pi->vce_level,
+				   sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
+				   pi->sram_end);
+
+	return ret;
+}
+
+static int kv_populate_samu_table(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+	struct atom_clock_dividers dividers;
+	int ret;
+	u32 i;
+
+	if (table == NULL || table->count == 0)
+		return 0;
+
+	pi->samu_level_count = 0;
+	for (i = 0; i < table->count; i++) {
+		if (pi->high_voltage_t &&
+		    pi->high_voltage_t < table->entries[i].v)
+			break;
+
+		pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+		pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+		pi->samu_level[i].ClkBypassCntl =
+			(u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
+
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     table->entries[i].clk, false, &dividers);
+		if (ret)
+			return ret;
+		pi->samu_level[i].Divider = (u8)dividers.post_div;
+
+		pi->samu_level_count++;
+	}
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
+				   (u8 *)&pi->samu_level_count,
+				   sizeof(u8),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->samu_interval = 1;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
+				   (u8 *)&pi->samu_interval,
+				   sizeof(u8),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, SamuLevel),
+				   (u8 *)&pi->samu_level,
+				   sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+
+static int kv_populate_acp_table(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+	struct atom_clock_dividers dividers;
+	int ret;
+	u32 i;
+
+	if (table == NULL || table->count == 0)
+		return 0;
+
+	pi->acp_level_count = 0;
+	for (i = 0; i < table->count; i++) {
+		pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+		pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     table->entries[i].clk, false, &dividers);
+		if (ret)
+			return ret;
+		pi->acp_level[i].Divider = (u8)dividers.post_div;
+
+		pi->acp_level_count++;
+	}
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
+				   (u8 *)&pi->acp_level_count,
+				   sizeof(u8),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->acp_interval = 1;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, ACPInterval),
+				   (u8 *)&pi->acp_interval,
+				   sizeof(u8),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	ret = kv_copy_bytes_to_smc(rdev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Fusion_DpmTable, AcpLevel),
+				   (u8 *)&pi->acp_level,
+				   sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+	if (table && table->count) {
+		for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+			if (pi->caps_enable_dfs_bypass) {
+				if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 3;
+				else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 2;
+				else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 7;
+				else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 6;
+				else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 8;
+				else
+					pi->graphics_level[i].ClkBypassCntl = 0;
+			} else {
+				pi->graphics_level[i].ClkBypassCntl = 0;
+			}
+		}
+	} else {
+		struct sumo_sclk_voltage_mapping_table *table =
+			&pi->sys_info.sclk_voltage_mapping_table;
+		for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+			if (pi->caps_enable_dfs_bypass) {
+				if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 3;
+				else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 2;
+				else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 7;
+				else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 6;
+				else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
+					pi->graphics_level[i].ClkBypassCntl = 8;
+				else
+					pi->graphics_level[i].ClkBypassCntl = 0;
+			} else {
+				pi->graphics_level[i].ClkBypassCntl = 0;
+			}
+		}
+	}
+}
+
+static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+	return kv_notify_message_to_smu(rdev, enable ?
+					PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
+}
+
+static void kv_reset_acp_boot_level(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->acp_boot_level = 0xff;
+}
+
+static void kv_update_current_ps(struct radeon_device *rdev,
+				 struct radeon_ps *rps)
+{
+	struct kv_ps *new_ps = kv_get_ps(rps);
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->current_rps = *rps;
+	pi->current_ps = *new_ps;
+	pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void kv_update_requested_ps(struct radeon_device *rdev,
+				   struct radeon_ps *rps)
+{
+	struct kv_ps *new_ps = kv_get_ps(rps);
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->requested_rps = *rps;
+	pi->requested_ps = *new_ps;
+	pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	if (pi->bapm_enable) {
+		ret = kv_smc_bapm_enable(rdev, enable);
+		if (ret)
+			DRM_ERROR("kv_smc_bapm_enable failed\n");
+	}
+}
+
+static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
+{
+	u32 thermal_int;
+
+	thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
+	if (enable)
+		thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+	else
+		thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
+	WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+
+}
+
+int kv_dpm_enable(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	ret = kv_process_firmware_header(rdev);
+	if (ret) {
+		DRM_ERROR("kv_process_firmware_header failed\n");
+		return ret;
+	}
+	kv_init_fps_limits(rdev);
+	kv_init_graphics_levels(rdev);
+	ret = kv_program_bootup_state(rdev);
+	if (ret) {
+		DRM_ERROR("kv_program_bootup_state failed\n");
+		return ret;
+	}
+	kv_calculate_dfs_bypass_settings(rdev);
+	ret = kv_upload_dpm_settings(rdev);
+	if (ret) {
+		DRM_ERROR("kv_upload_dpm_settings failed\n");
+		return ret;
+	}
+	ret = kv_populate_uvd_table(rdev);
+	if (ret) {
+		DRM_ERROR("kv_populate_uvd_table failed\n");
+		return ret;
+	}
+	ret = kv_populate_vce_table(rdev);
+	if (ret) {
+		DRM_ERROR("kv_populate_vce_table failed\n");
+		return ret;
+	}
+	ret = kv_populate_samu_table(rdev);
+	if (ret) {
+		DRM_ERROR("kv_populate_samu_table failed\n");
+		return ret;
+	}
+	ret = kv_populate_acp_table(rdev);
+	if (ret) {
+		DRM_ERROR("kv_populate_acp_table failed\n");
+		return ret;
+	}
+	kv_program_vc(rdev);
+#if 0
+	kv_initialize_hardware_cac_manager(rdev);
+#endif
+	kv_start_am(rdev);
+	if (pi->enable_auto_thermal_throttling) {
+		ret = kv_enable_auto_thermal_throttling(rdev);
+		if (ret) {
+			DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+			return ret;
+		}
+	}
+	ret = kv_enable_dpm_voltage_scaling(rdev);
+	if (ret) {
+		DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+		return ret;
+	}
+	ret = kv_set_dpm_interval(rdev);
+	if (ret) {
+		DRM_ERROR("kv_set_dpm_interval failed\n");
+		return ret;
+	}
+	ret = kv_set_dpm_boot_state(rdev);
+	if (ret) {
+		DRM_ERROR("kv_set_dpm_boot_state failed\n");
+		return ret;
+	}
+	ret = kv_enable_ulv(rdev, true);
+	if (ret) {
+		DRM_ERROR("kv_enable_ulv failed\n");
+		return ret;
+	}
+	kv_start_dpm(rdev);
+	ret = kv_enable_didt(rdev, true);
+	if (ret) {
+		DRM_ERROR("kv_enable_didt failed\n");
+		return ret;
+	}
+	ret = kv_enable_smc_cac(rdev, true);
+	if (ret) {
+		DRM_ERROR("kv_enable_smc_cac failed\n");
+		return ret;
+	}
+
+	kv_reset_acp_boot_level(rdev);
+
+	ret = kv_smc_bapm_enable(rdev, false);
+	if (ret) {
+		DRM_ERROR("kv_smc_bapm_enable failed\n");
+		return ret;
+	}
+
+	kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+	return ret;
+}
+
+int kv_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret = 0;
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+		if (ret) {
+			DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+			return ret;
+		}
+		kv_enable_thermal_int(rdev, true);
+	}
+
+	/* powerdown unused blocks for now */
+	kv_dpm_powergate_acp(rdev, true);
+	kv_dpm_powergate_samu(rdev, true);
+	kv_dpm_powergate_vce(rdev, true);
+	kv_dpm_powergate_uvd(rdev, true);
+
+	return ret;
+}
+
+void kv_dpm_disable(struct radeon_device *rdev)
+{
+	kv_smc_bapm_enable(rdev, false);
+
+	if (rdev->family == CHIP_MULLINS)
+		kv_enable_nb_dpm(rdev, false);
+
+	/* powerup blocks */
+	kv_dpm_powergate_acp(rdev, false);
+	kv_dpm_powergate_samu(rdev, false);
+	kv_dpm_powergate_vce(rdev, false);
+	kv_dpm_powergate_uvd(rdev, false);
+
+	kv_enable_smc_cac(rdev, false);
+	kv_enable_didt(rdev, false);
+	kv_clear_vc(rdev);
+	kv_stop_dpm(rdev);
+	kv_enable_ulv(rdev, false);
+	kv_reset_am(rdev);
+	kv_enable_thermal_int(rdev, false);
+
+	kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+}
+
+#if 0
+static int kv_write_smc_soft_register(struct radeon_device *rdev,
+				      u16 reg_offset, u32 value)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
+				    (u8 *)&value, sizeof(u16), pi->sram_end);
+}
+
+static int kv_read_smc_soft_register(struct radeon_device *rdev,
+				     u16 reg_offset, u32 *value)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
+				      value, pi->sram_end);
+}
+#endif
+
+static void kv_init_sclk_t(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->low_sclk_interrupt_t = 0;
+}
+
+static int kv_init_fps_limits(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret = 0;
+
+	if (pi->caps_fps) {
+		u16 tmp;
+
+		tmp = 45;
+		pi->fps_high_t = cpu_to_be16(tmp);
+		ret = kv_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Fusion_DpmTable, FpsHighT),
+					   (u8 *)&pi->fps_high_t,
+					   sizeof(u16), pi->sram_end);
+
+		tmp = 30;
+		pi->fps_low_t = cpu_to_be16(tmp);
+
+		ret = kv_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Fusion_DpmTable, FpsLowT),
+					   (u8 *)&pi->fps_low_t,
+					   sizeof(u16), pi->sram_end);
+
+	}
+	return ret;
+}
+
+static void kv_init_powergate_state(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->uvd_power_gated = false;
+	pi->vce_power_gated = false;
+	pi->samu_power_gated = false;
+	pi->acp_power_gated = false;
+
+}
+
+static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+	return kv_notify_message_to_smu(rdev, enable ?
+					PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
+}
+
+static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+	return kv_notify_message_to_smu(rdev, enable ?
+					PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
+}
+
+static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+	return kv_notify_message_to_smu(rdev, enable ?
+					PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+	return kv_notify_message_to_smu(rdev, enable ?
+					PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
+}
+
+static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_uvd_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+	int ret;
+	u32 mask;
+
+	if (!gate) {
+		if (table->count)
+			pi->uvd_boot_level = table->count - 1;
+		else
+			pi->uvd_boot_level = 0;
+
+		if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
+			mask = 1 << pi->uvd_boot_level;
+		} else {
+			mask = 0x1f;
+		}
+
+		ret = kv_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
+					   (uint8_t *)&pi->uvd_boot_level,
+					   sizeof(u8), pi->sram_end);
+		if (ret)
+			return ret;
+
+		kv_send_msg_to_smc_with_parameter(rdev,
+						  PPSMC_MSG_UVDDPM_SetEnabledMask,
+						  mask);
+	}
+
+	return kv_enable_uvd_dpm(rdev, !gate);
+}
+
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
+{
+	u8 i;
+	struct radeon_vce_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].evclk >= evclk)
+			break;
+	}
+
+	return i;
+}
+
+static int kv_update_vce_dpm(struct radeon_device *rdev,
+			     struct radeon_ps *radeon_new_state,
+			     struct radeon_ps *radeon_current_state)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_vce_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+	int ret;
+
+	if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
+		kv_dpm_powergate_vce(rdev, false);
+		/* turn the clocks on when encoding */
+		cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
+		if (pi->caps_stable_p_state)
+			pi->vce_boot_level = table->count - 1;
+		else
+			pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
+
+		ret = kv_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
+					   (u8 *)&pi->vce_boot_level,
+					   sizeof(u8),
+					   pi->sram_end);
+		if (ret)
+			return ret;
+
+		if (pi->caps_stable_p_state)
+			kv_send_msg_to_smc_with_parameter(rdev,
+							  PPSMC_MSG_VCEDPM_SetEnabledMask,
+							  (1 << pi->vce_boot_level));
+
+		kv_enable_vce_dpm(rdev, true);
+	} else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
+		kv_enable_vce_dpm(rdev, false);
+		/* turn the clocks off when not encoding */
+		cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
+		kv_dpm_powergate_vce(rdev, true);
+	}
+
+	return 0;
+}
+
+static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+	int ret;
+
+	if (!gate) {
+		if (pi->caps_stable_p_state)
+			pi->samu_boot_level = table->count - 1;
+		else
+			pi->samu_boot_level = 0;
+
+		ret = kv_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
+					   (u8 *)&pi->samu_boot_level,
+					   sizeof(u8),
+					   pi->sram_end);
+		if (ret)
+			return ret;
+
+		if (pi->caps_stable_p_state)
+			kv_send_msg_to_smc_with_parameter(rdev,
+							  PPSMC_MSG_SAMUDPM_SetEnabledMask,
+							  (1 << pi->samu_boot_level));
+	}
+
+	return kv_enable_samu_dpm(rdev, !gate);
+}
+
+static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
+{
+	u8 i;
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].clk >= 0) /* XXX */
+			break;
+	}
+
+	if (i >= table->count)
+		i = table->count - 1;
+
+	return i;
+}
+
+static void kv_update_acp_boot_level(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u8 acp_boot_level;
+
+	if (!pi->caps_stable_p_state) {
+		acp_boot_level = kv_get_acp_boot_level(rdev);
+		if (acp_boot_level != pi->acp_boot_level) {
+			pi->acp_boot_level = acp_boot_level;
+			kv_send_msg_to_smc_with_parameter(rdev,
+							  PPSMC_MSG_ACPDPM_SetEnabledMask,
+							  (1 << pi->acp_boot_level));
+		}
+	}
+}
+
+static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+	int ret;
+
+	if (!gate) {
+		if (pi->caps_stable_p_state)
+			pi->acp_boot_level = table->count - 1;
+		else
+			pi->acp_boot_level = kv_get_acp_boot_level(rdev);
+
+		ret = kv_copy_bytes_to_smc(rdev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
+					   (u8 *)&pi->acp_boot_level,
+					   sizeof(u8),
+					   pi->sram_end);
+		if (ret)
+			return ret;
+
+		if (pi->caps_stable_p_state)
+			kv_send_msg_to_smc_with_parameter(rdev,
+							  PPSMC_MSG_ACPDPM_SetEnabledMask,
+							  (1 << pi->acp_boot_level));
+	}
+
+	return kv_enable_acp_dpm(rdev, !gate);
+}
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (pi->uvd_power_gated == gate)
+		return;
+
+	pi->uvd_power_gated = gate;
+
+	if (gate) {
+		if (pi->caps_uvd_pg) {
+			uvd_v1_0_stop(rdev);
+			cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
+		}
+		kv_update_uvd_dpm(rdev, gate);
+		if (pi->caps_uvd_pg)
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
+	} else {
+		if (pi->caps_uvd_pg) {
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
+			uvd_v4_2_resume(rdev);
+			uvd_v1_0_start(rdev);
+			cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+		}
+		kv_update_uvd_dpm(rdev, gate);
+	}
+}
+
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (pi->vce_power_gated == gate)
+		return;
+
+	pi->vce_power_gated = gate;
+
+	if (gate) {
+		if (pi->caps_vce_pg) {
+			/* XXX do we need a vce_v1_0_stop() ?  */
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
+		}
+	} else {
+		if (pi->caps_vce_pg) {
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
+			vce_v2_0_resume(rdev);
+			vce_v1_0_start(rdev);
+		}
+	}
+}
+
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (pi->samu_power_gated == gate)
+		return;
+
+	pi->samu_power_gated = gate;
+
+	if (gate) {
+		kv_update_samu_dpm(rdev, true);
+		if (pi->caps_samu_pg)
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
+	} else {
+		if (pi->caps_samu_pg)
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
+		kv_update_samu_dpm(rdev, false);
+	}
+}
+
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (pi->acp_power_gated == gate)
+		return;
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
+		return;
+
+	pi->acp_power_gated = gate;
+
+	if (gate) {
+		kv_update_acp_dpm(rdev, true);
+		if (pi->caps_acp_pg)
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
+	} else {
+		if (pi->caps_acp_pg)
+			kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
+		kv_update_acp_dpm(rdev, false);
+	}
+}
+
+static void kv_set_valid_clock_range(struct radeon_device *rdev,
+				     struct radeon_ps *new_rps)
+{
+	struct kv_ps *new_ps = kv_get_ps(new_rps);
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+	if (table && table->count) {
+		for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+			if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
+			    (i == (pi->graphics_dpm_level_count - 1))) {
+				pi->lowest_valid = i;
+				break;
+			}
+		}
+
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+			if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
+				break;
+		}
+		pi->highest_valid = i;
+
+		if (pi->lowest_valid > pi->highest_valid) {
+			if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
+			    (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
+				pi->highest_valid = pi->lowest_valid;
+			else
+				pi->lowest_valid =  pi->highest_valid;
+		}
+	} else {
+		struct sumo_sclk_voltage_mapping_table *table =
+			&pi->sys_info.sclk_voltage_mapping_table;
+
+		for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
+			if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
+			    i == (int)(pi->graphics_dpm_level_count - 1)) {
+				pi->lowest_valid = i;
+				break;
+			}
+		}
+
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+			if (table->entries[i].sclk_frequency <=
+			    new_ps->levels[new_ps->num_levels - 1].sclk)
+				break;
+		}
+		pi->highest_valid = i;
+
+		if (pi->lowest_valid > pi->highest_valid) {
+			if ((new_ps->levels[0].sclk -
+			     table->entries[pi->highest_valid].sclk_frequency) >
+			    (table->entries[pi->lowest_valid].sclk_frequency -
+			     new_ps->levels[new_ps->num_levels -1].sclk))
+				pi->highest_valid = pi->lowest_valid;
+			else
+				pi->lowest_valid =  pi->highest_valid;
+		}
+	}
+}
+
+static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
+					 struct radeon_ps *new_rps)
+{
+	struct kv_ps *new_ps = kv_get_ps(new_rps);
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret = 0;
+	u8 clk_bypass_cntl;
+
+	if (pi->caps_enable_dfs_bypass) {
+		clk_bypass_cntl = new_ps->need_dfs_bypass ?
+			pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
+		ret = kv_copy_bytes_to_smc(rdev,
+					   (pi->dpm_table_start +
+					    offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
+					    (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
+					    offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
+					   &clk_bypass_cntl,
+					   sizeof(u8), pi->sram_end);
+	}
+
+	return ret;
+}
+
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+			    bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret = 0;
+
+	if (enable) {
+		if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+			ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+			if (ret == 0)
+				pi->nb_dpm_enabled = true;
+		}
+	} else {
+		if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
+			ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
+			if (ret == 0)
+				pi->nb_dpm_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level)
+{
+	int ret;
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		ret = kv_force_dpm_highest(rdev);
+		if (ret)
+			return ret;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		ret = kv_force_dpm_lowest(rdev);
+		if (ret)
+			return ret;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+		ret = kv_unforce_levels(rdev);
+		if (ret)
+			return ret;
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+	struct radeon_ps *new_ps = &requested_ps;
+
+	kv_update_requested_ps(rdev, new_ps);
+
+	kv_apply_state_adjust_rules(rdev,
+				    &pi->requested_rps,
+				    &pi->current_rps);
+
+	return 0;
+}
+
+int kv_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+	struct radeon_ps *old_ps = &pi->current_rps;
+	int ret;
+
+	if (pi->bapm_enable) {
+		ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
+		if (ret) {
+			DRM_ERROR("kv_smc_bapm_enable failed\n");
+			return ret;
+		}
+	}
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
+		if (pi->enable_dpm) {
+			kv_set_valid_clock_range(rdev, new_ps);
+			kv_update_dfs_bypass_settings(rdev, new_ps);
+			ret = kv_calculate_ds_divider(rdev);
+			if (ret) {
+				DRM_ERROR("kv_calculate_ds_divider failed\n");
+				return ret;
+			}
+			kv_calculate_nbps_level_settings(rdev);
+			kv_calculate_dpm_settings(rdev);
+			kv_force_lowest_valid(rdev);
+			kv_enable_new_levels(rdev);
+			kv_upload_dpm_settings(rdev);
+			kv_program_nbps_index_settings(rdev, new_ps);
+			kv_unforce_levels(rdev);
+			kv_set_enabled_levels(rdev);
+			kv_force_lowest_valid(rdev);
+			kv_unforce_levels(rdev);
+
+			ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+			if (ret) {
+				DRM_ERROR("kv_update_vce_dpm failed\n");
+				return ret;
+			}
+			kv_update_sclk_t(rdev);
+			if (rdev->family == CHIP_MULLINS)
+				kv_enable_nb_dpm(rdev, true);
+		}
+	} else {
+		if (pi->enable_dpm) {
+			kv_set_valid_clock_range(rdev, new_ps);
+			kv_update_dfs_bypass_settings(rdev, new_ps);
+			ret = kv_calculate_ds_divider(rdev);
+			if (ret) {
+				DRM_ERROR("kv_calculate_ds_divider failed\n");
+				return ret;
+			}
+			kv_calculate_nbps_level_settings(rdev);
+			kv_calculate_dpm_settings(rdev);
+			kv_freeze_sclk_dpm(rdev, true);
+			kv_upload_dpm_settings(rdev);
+			kv_program_nbps_index_settings(rdev, new_ps);
+			kv_freeze_sclk_dpm(rdev, false);
+			kv_set_enabled_levels(rdev);
+			ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+			if (ret) {
+				DRM_ERROR("kv_update_vce_dpm failed\n");
+				return ret;
+			}
+			kv_update_acp_boot_level(rdev);
+			kv_update_sclk_t(rdev);
+			kv_enable_nb_dpm(rdev, true);
+		}
+	}
+
+	return 0;
+}
+
+void kv_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+
+	kv_update_current_ps(rdev, new_ps);
+}
+
+void kv_dpm_setup_asic(struct radeon_device *rdev)
+{
+	sumo_take_smu_control(rdev, true);
+	kv_init_powergate_state(rdev);
+	kv_init_sclk_t(rdev);
+}
+
+#if 0
+void kv_dpm_reset_asic(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
+		kv_force_lowest_valid(rdev);
+		kv_init_graphics_levels(rdev);
+		kv_program_bootup_state(rdev);
+		kv_upload_dpm_settings(rdev);
+		kv_force_lowest_valid(rdev);
+		kv_unforce_levels(rdev);
+	} else {
+		kv_init_graphics_levels(rdev);
+		kv_program_bootup_state(rdev);
+		kv_freeze_sclk_dpm(rdev, true);
+		kv_upload_dpm_settings(rdev);
+		kv_freeze_sclk_dpm(rdev, false);
+		kv_set_enabled_level(rdev, pi->graphics_boot_level);
+	}
+}
+#endif
+
+//XXX use sumo_dpm_display_configuration_changed
+
+static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
+						struct radeon_clock_and_voltage_limits *table)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
+		int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
+		table->sclk =
+			pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
+		table->vddc =
+			kv_convert_2bit_index_to_voltage(rdev,
+							 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
+	}
+
+	table->mclk = pi->sys_info.nbp_memory_clock[0];
+}
+
+static void kv_patch_voltage_values(struct radeon_device *rdev)
+{
+	int i;
+	struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
+		&rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+	struct radeon_vce_clock_voltage_dependency_table *vce_table =
+		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+	struct radeon_clock_voltage_dependency_table *samu_table =
+		&rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+	struct radeon_clock_voltage_dependency_table *acp_table =
+		&rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+	if (uvd_table->count) {
+		for (i = 0; i < uvd_table->count; i++)
+			uvd_table->entries[i].v =
+				kv_convert_8bit_index_to_voltage(rdev,
+								 uvd_table->entries[i].v);
+	}
+
+	if (vce_table->count) {
+		for (i = 0; i < vce_table->count; i++)
+			vce_table->entries[i].v =
+				kv_convert_8bit_index_to_voltage(rdev,
+								 vce_table->entries[i].v);
+	}
+
+	if (samu_table->count) {
+		for (i = 0; i < samu_table->count; i++)
+			samu_table->entries[i].v =
+				kv_convert_8bit_index_to_voltage(rdev,
+								 samu_table->entries[i].v);
+	}
+
+	if (acp_table->count) {
+		for (i = 0; i < acp_table->count; i++)
+			acp_table->entries[i].v =
+				kv_convert_8bit_index_to_voltage(rdev,
+								 acp_table->entries[i].v);
+	}
+
+}
+
+static void kv_construct_boot_state(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+	pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+	pi->boot_pl.ds_divider_index = 0;
+	pi->boot_pl.ss_divider_index = 0;
+	pi->boot_pl.allow_gnb_slow = 1;
+	pi->boot_pl.force_nbp_state = 0;
+	pi->boot_pl.display_wm = 0;
+	pi->boot_pl.vce_wm = 0;
+}
+
+static int kv_force_dpm_highest(struct radeon_device *rdev)
+{
+	int ret;
+	u32 enable_mask, i;
+
+	ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+	if (ret)
+		return ret;
+
+	for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
+		if (enable_mask & (1 << i))
+			break;
+	}
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
+		return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+	else
+		return kv_set_enabled_level(rdev, i);
+}
+
+static int kv_force_dpm_lowest(struct radeon_device *rdev)
+{
+	int ret;
+	u32 enable_mask, i;
+
+	ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+		if (enable_mask & (1 << i))
+			break;
+	}
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
+		return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+	else
+		return kv_set_enabled_level(rdev, i);
+}
+
+static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+					     u32 sclk, u32 min_sclk_in_sr)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+	u32 temp;
+	u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
+		min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
+
+	if (sclk < min)
+		return 0;
+
+	if (!pi->caps_sclk_ds)
+		return 0;
+
+	for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
+		temp = sclk / sumo_get_sleep_divider_from_id(i);
+		if (temp >= min)
+			break;
+	}
+
+	return (u8)i;
+}
+
+static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	int i;
+
+	if (table && table->count) {
+		for (i = table->count - 1; i >= 0; i--) {
+			if (pi->high_voltage_t &&
+			    (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
+			     pi->high_voltage_t)) {
+				*limit = i;
+				return 0;
+			}
+		}
+	} else {
+		struct sumo_sclk_voltage_mapping_table *table =
+			&pi->sys_info.sclk_voltage_mapping_table;
+
+		for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
+			if (pi->high_voltage_t &&
+			    (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
+			     pi->high_voltage_t)) {
+				*limit = i;
+				return 0;
+			}
+		}
+	}
+
+	*limit = 0;
+	return 0;
+}
+
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+					struct radeon_ps *new_rps,
+					struct radeon_ps *old_rps)
+{
+	struct kv_ps *ps = kv_get_ps(new_rps);
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 min_sclk = 10000; /* ??? */
+	u32 sclk, mclk = 0;
+	int i, limit;
+	bool force_high;
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	u32 stable_p_state_sclk = 0;
+	struct radeon_clock_and_voltage_limits *max_limits =
+		&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	if (new_rps->vce_active) {
+		new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+		new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
+	} else {
+		new_rps->evclk = 0;
+		new_rps->ecclk = 0;
+	}
+
+	mclk = max_limits->mclk;
+	sclk = min_sclk;
+
+	if (pi->caps_stable_p_state) {
+		stable_p_state_sclk = (max_limits->sclk * 75) / 100;
+
+		for (i = table->count - 1; i >= 0; i--) {
+			if (stable_p_state_sclk >= table->entries[i].clk) {
+				stable_p_state_sclk = table->entries[i].clk;
+				break;
+			}
+		}
+
+		if (i > 0)
+			stable_p_state_sclk = table->entries[0].clk;
+
+		sclk = stable_p_state_sclk;
+	}
+
+	if (new_rps->vce_active) {
+		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
+			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
+	}
+
+	ps->need_dfs_bypass = true;
+
+	for (i = 0; i < ps->num_levels; i++) {
+		if (ps->levels[i].sclk < sclk)
+			ps->levels[i].sclk = sclk;
+	}
+
+	if (table && table->count) {
+		for (i = 0; i < ps->num_levels; i++) {
+			if (pi->high_voltage_t &&
+			    (pi->high_voltage_t <
+			     kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+				kv_get_high_voltage_limit(rdev, &limit);
+				ps->levels[i].sclk = table->entries[limit].clk;
+			}
+		}
+	} else {
+		struct sumo_sclk_voltage_mapping_table *table =
+			&pi->sys_info.sclk_voltage_mapping_table;
+
+		for (i = 0; i < ps->num_levels; i++) {
+			if (pi->high_voltage_t &&
+			    (pi->high_voltage_t <
+			     kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+				kv_get_high_voltage_limit(rdev, &limit);
+				ps->levels[i].sclk = table->entries[limit].sclk_frequency;
+			}
+		}
+	}
+
+	if (pi->caps_stable_p_state) {
+		for (i = 0; i < ps->num_levels; i++) {
+			ps->levels[i].sclk = stable_p_state_sclk;
+		}
+	}
+
+	pi->video_start = new_rps->dclk || new_rps->vclk ||
+		new_rps->evclk || new_rps->ecclk;
+
+	if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+	    ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+		pi->battery_state = true;
+	else
+		pi->battery_state = false;
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
+		ps->dpm0_pg_nb_ps_lo = 0x1;
+		ps->dpm0_pg_nb_ps_hi = 0x0;
+		ps->dpmx_nb_ps_lo = 0x1;
+		ps->dpmx_nb_ps_hi = 0x0;
+	} else {
+		ps->dpm0_pg_nb_ps_lo = 0x3;
+		ps->dpm0_pg_nb_ps_hi = 0x0;
+		ps->dpmx_nb_ps_lo = 0x3;
+		ps->dpmx_nb_ps_hi = 0x0;
+
+		if (pi->sys_info.nb_dpm_enable) {
+			force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+				pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
+				pi->disable_nb_ps3_in_battery;
+			ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
+			ps->dpm0_pg_nb_ps_hi = 0x2;
+			ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
+			ps->dpmx_nb_ps_hi = 0x2;
+		}
+	}
+}
+
+static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
+						    u32 index, bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
+}
+
+static int kv_calculate_ds_divider(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 sclk_in_sr = 10000; /* ??? */
+	u32 i;
+
+	if (pi->lowest_valid > pi->highest_valid)
+		return -EINVAL;
+
+	for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+		pi->graphics_level[i].DeepSleepDivId =
+			kv_get_sleep_divider_id_from_clock(rdev,
+							   be32_to_cpu(pi->graphics_level[i].SclkFrequency),
+							   sclk_in_sr);
+	}
+	return 0;
+}
+
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+	bool force_high;
+	struct radeon_clock_and_voltage_limits *max_limits =
+		&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	u32 mclk = max_limits->mclk;
+
+	if (pi->lowest_valid > pi->highest_valid)
+		return -EINVAL;
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
+		for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+			pi->graphics_level[i].GnbSlow = 1;
+			pi->graphics_level[i].ForceNbPs1 = 0;
+			pi->graphics_level[i].UpH = 0;
+		}
+
+		if (!pi->sys_info.nb_dpm_enable)
+			return 0;
+
+		force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+			      (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+
+		if (force_high) {
+			for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+				pi->graphics_level[i].GnbSlow = 0;
+		} else {
+			if (pi->battery_state)
+				pi->graphics_level[0].ForceNbPs1 = 1;
+
+			pi->graphics_level[1].GnbSlow = 0;
+			pi->graphics_level[2].GnbSlow = 0;
+			pi->graphics_level[3].GnbSlow = 0;
+			pi->graphics_level[4].GnbSlow = 0;
+		}
+	} else {
+		for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+			pi->graphics_level[i].GnbSlow = 1;
+			pi->graphics_level[i].ForceNbPs1 = 0;
+			pi->graphics_level[i].UpH = 0;
+		}
+
+		if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+			pi->graphics_level[pi->lowest_valid].UpH = 0x28;
+			pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
+			if (pi->lowest_valid != pi->highest_valid)
+				pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
+		}
+	}
+	return 0;
+}
+
+static int kv_calculate_dpm_settings(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+
+	if (pi->lowest_valid > pi->highest_valid)
+		return -EINVAL;
+
+	for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+		pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
+
+	return 0;
+}
+
+static void kv_init_graphics_levels(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+	if (table && table->count) {
+		u32 vid_2bit;
+
+		pi->graphics_dpm_level_count = 0;
+		for (i = 0; i < table->count; i++) {
+			if (pi->high_voltage_t &&
+			    (pi->high_voltage_t <
+			     kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
+				break;
+
+			kv_set_divider_value(rdev, i, table->entries[i].clk);
+			vid_2bit = kv_convert_vid7_to_vid2(rdev,
+							   &pi->sys_info.vid_mapping_table,
+							   table->entries[i].v);
+			kv_set_vid(rdev, i, vid_2bit);
+			kv_set_at(rdev, i, pi->at[i]);
+			kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+			pi->graphics_dpm_level_count++;
+		}
+	} else {
+		struct sumo_sclk_voltage_mapping_table *table =
+			&pi->sys_info.sclk_voltage_mapping_table;
+
+		pi->graphics_dpm_level_count = 0;
+		for (i = 0; i < table->num_max_dpm_entries; i++) {
+			if (pi->high_voltage_t &&
+			    pi->high_voltage_t <
+			    kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
+				break;
+
+			kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
+			kv_set_vid(rdev, i, table->entries[i].vid_2bit);
+			kv_set_at(rdev, i, pi->at[i]);
+			kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+			pi->graphics_dpm_level_count++;
+		}
+	}
+
+	for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+		kv_dpm_power_level_enable(rdev, i, false);
+}
+
+static void kv_enable_new_levels(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i;
+
+	for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+		if (i >= pi->lowest_valid && i <= pi->highest_valid)
+			kv_dpm_power_level_enable(rdev, i, true);
+	}
+}
+
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
+{
+	u32 new_mask = (1 << level);
+
+	return kv_send_msg_to_smc_with_parameter(rdev,
+						 PPSMC_MSG_SCLKDPM_SetEnabledMask,
+						 new_mask);
+}
+
+static int kv_set_enabled_levels(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 i, new_mask = 0;
+
+	for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+		new_mask |= (1 << i);
+
+	return kv_send_msg_to_smc_with_parameter(rdev,
+						 PPSMC_MSG_SCLKDPM_SetEnabledMask,
+						 new_mask);
+}
+
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+					   struct radeon_ps *new_rps)
+{
+	struct kv_ps *new_ps = kv_get_ps(new_rps);
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 nbdpmconfig1;
+
+	if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
+		return;
+
+	if (pi->sys_info.nb_dpm_enable) {
+		nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
+		nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
+				  DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
+		nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
+				 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
+				 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
+				 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
+		WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
+	}
+}
+
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+					    int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+	u32 tmp;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
+	tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
+	tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
+		DIG_THERM_INTL(49 + (low_temp / 1000)));
+	WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
+
+	rdev->pm.dpm.thermal.min_temp = low_temp;
+	rdev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static int kv_parse_sys_info_table(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+	int i;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
+					      data_offset);
+
+		if (crev != 8) {
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			return -EINVAL;
+		}
+		pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
+		pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
+		pi->sys_info.bootup_nb_voltage_index =
+			le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
+		if (igp_info->info_8.ucHtcTmpLmt == 0)
+			pi->sys_info.htc_tmp_lmt = 203;
+		else
+			pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
+		if (igp_info->info_8.ucHtcHystLmt == 0)
+			pi->sys_info.htc_hyst_lmt = 5;
+		else
+			pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
+		if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+			DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+		}
+
+		if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
+			pi->sys_info.nb_dpm_enable = true;
+		else
+			pi->sys_info.nb_dpm_enable = false;
+
+		for (i = 0; i < KV_NUM_NBPSTATES; i++) {
+			pi->sys_info.nbp_memory_clock[i] =
+				le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
+			pi->sys_info.nbp_n_clock[i] =
+				le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
+		}
+		if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
+		    SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+			pi->caps_enable_dfs_bypass = true;
+
+		sumo_construct_sclk_voltage_mapping_table(rdev,
+							  &pi->sys_info.sclk_voltage_mapping_table,
+							  igp_info->info_8.sAvail_SCLK);
+
+		sumo_construct_vid_mapping_table(rdev,
+						 &pi->sys_info.vid_mapping_table,
+						 igp_info->info_8.sAvail_SCLK);
+
+		kv_construct_max_power_limits_table(rdev,
+						    &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+	}
+	return 0;
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void kv_patch_boot_state(struct radeon_device *rdev,
+				struct kv_ps *ps)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	ps->num_levels = 1;
+	ps->levels[0] = pi->boot_pl;
+}
+
+static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					  struct radeon_ps *rps,
+					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					  u8 table_rev)
+{
+	struct kv_ps *ps = kv_get_ps(rps);
+
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.dpm.boot_ps = rps;
+		kv_patch_boot_state(rdev, ps);
+	}
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
+				      struct radeon_ps *rps, int index,
+					union pplib_clock_info *clock_info)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct kv_ps *ps = kv_get_ps(rps);
+	struct kv_pl *pl = &ps->levels[index];
+	u32 sclk;
+
+	sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+	sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+	pl->sclk = sclk;
+	pl->vddc_index = clock_info->sumo.vddcIndex;
+
+	ps->num_levels = index + 1;
+
+	if (pi->caps_sclk_ds) {
+		pl->ds_divider_index = 5;
+		pl->ss_divider_index = 5;
+	}
+}
+
+static int kv_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct kv_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		if (!rdev->pm.power_state[i].clock_info)
+			return -EINVAL;
+		ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(rdev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		rdev->pm.dpm.ps[i].ps_priv = ps;
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			kv_parse_pplib_clock_info(rdev,
+						  &rdev->pm.dpm.ps[i], k,
+						  clock_info);
+			k++;
+		}
+		kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+					      non_clock_info,
+					      non_clock_info_array->ucEntrySize);
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	/* fill in the vce power states */
+	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
+		u32 sclk;
+		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
+		clock_info = (union pplib_clock_info *)
+			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+		sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+		sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+		rdev->pm.dpm.vce_states[i].sclk = sclk;
+		rdev->pm.dpm.vce_states[i].mclk = 0;
+	}
+
+	return 0;
+}
+
+int kv_dpm_init(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi;
+	int ret, i;
+
+	pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = pi;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = r600_parse_extended_power_table(rdev);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+		pi->at[i] = TRINITY_AT_DFLT;
+
+	pi->sram_end = SMC_RAM_END;
+
+	/* Enabling nb dpm on an asrock system prevents dpm from working */
+	if (rdev->pdev->subsystem_vendor == 0x1849)
+		pi->enable_nb_dpm = false;
+	else
+		pi->enable_nb_dpm = true;
+
+	pi->caps_power_containment = true;
+	pi->caps_cac = true;
+	pi->enable_didt = false;
+	if (pi->enable_didt) {
+		pi->caps_sq_ramping = true;
+		pi->caps_db_ramping = true;
+		pi->caps_td_ramping = true;
+		pi->caps_tcp_ramping = true;
+	}
+
+	pi->caps_sclk_ds = true;
+	pi->enable_auto_thermal_throttling = true;
+	pi->disable_nb_ps3_in_battery = false;
+	if (radeon_bapm == -1) {
+		/* only enable bapm on KB, ML by default */
+		if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
+			pi->bapm_enable = true;
+		else
+			pi->bapm_enable = false;
+	} else if (radeon_bapm == 0) {
+		pi->bapm_enable = false;
+	} else {
+		pi->bapm_enable = true;
+	}
+	pi->voltage_drop_t = 0;
+	pi->caps_sclk_throttle_low_notification = false;
+	pi->caps_fps = false; /* true? */
+	pi->caps_uvd_pg = true;
+	pi->caps_uvd_dpm = true;
+	pi->caps_vce_pg = false; /* XXX true */
+	pi->caps_samu_pg = false;
+	pi->caps_acp_pg = false;
+	pi->caps_stable_p_state = false;
+
+	ret = kv_parse_sys_info_table(rdev);
+	if (ret)
+		return ret;
+
+	kv_patch_voltage_values(rdev);
+	kv_construct_boot_state(rdev);
+
+	ret = kv_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	pi->enable_dpm = true;
+
+	return 0;
+}
+
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 current_index =
+		(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
+		CURR_SCLK_INDEX_SHIFT;
+	u32 sclk, tmp;
+	u16 vddc;
+
+	if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+		tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
+			SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
+		vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
+		seq_printf(m, "uvd    %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+		seq_printf(m, "vce    %sabled\n", pi->vce_power_gated ? "dis" : "en");
+		seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
+			   current_index, sclk, vddc);
+	}
+}
+
+u32 kv_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u32 current_index =
+		(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
+		CURR_SCLK_INDEX_SHIFT;
+	u32 sclk;
+
+	if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+		return 0;
+	} else {
+		sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+		return sclk;
+	}
+}
+
+u32 kv_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	return pi->sys_info.bootup_uma_clk;
+}
+
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+			      struct radeon_ps *rps)
+{
+	int i;
+	struct kv_ps *ps = kv_get_ps(rps);
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->num_levels; i++) {
+		struct kv_pl *pl = &ps->levels[i];
+		printk("\t\tpower level %d    sclk: %u vddc: %u\n",
+		       i, pl->sclk,
+		       kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
+	}
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+void kv_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+	r600_free_extended_power_table(rdev);
+}
+
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+
+}
+
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->levels[0].sclk;
+	else
+		return requested_state->levels[requested_state->num_levels - 1].sclk;
+}
+
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	return pi->sys_info.bootup_uma_clk;
+}
+
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
new file mode 100644
index 0000000..8cef752
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __KV_DPM_H__
+#define __KV_DPM_H__
+
+#define SMU__NUM_SCLK_DPM_STATE  8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
+#include "smu7_fusion.h"
+#include "trinity_dpm.h"
+#include "ppsmc.h"
+
+#define KV_NUM_NBPSTATES   4
+
+enum kv_pt_config_reg_type {
+	KV_CONFIGREG_MMR = 0,
+	KV_CONFIGREG_SMC_IND,
+	KV_CONFIGREG_DIDT_IND,
+	KV_CONFIGREG_CACHE,
+	KV_CONFIGREG_MAX
+};
+
+struct kv_pt_config_reg {
+	u32 offset;
+	u32 mask;
+	u32 shift;
+	u32 value;
+	enum kv_pt_config_reg_type type;
+};
+
+struct kv_lcac_config_values {
+	u32 block_id;
+	u32 signal_id;
+	u32 t;
+};
+
+struct kv_lcac_config_reg {
+	u32 cntl;
+	u32 block_mask;
+	u32 block_shift;
+	u32 signal_mask;
+	u32 signal_shift;
+	u32 t_mask;
+	u32 t_shift;
+	u32 enable_mask;
+	u32 enable_shift;
+};
+
+struct kv_pl {
+	u32 sclk;
+	u8 vddc_index;
+	u8 ds_divider_index;
+	u8 ss_divider_index;
+	u8 allow_gnb_slow;
+	u8 force_nbp_state;
+	u8 display_wm;
+	u8 vce_wm;
+};
+
+struct kv_ps {
+	struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
+	u32 num_levels;
+	bool need_dfs_bypass;
+	u8 dpm0_pg_nb_ps_lo;
+	u8 dpm0_pg_nb_ps_hi;
+	u8 dpmx_nb_ps_lo;
+	u8 dpmx_nb_ps_hi;
+};
+
+struct kv_sys_info {
+	u32 bootup_uma_clk;
+	u32 bootup_sclk;
+	u32 dentist_vco_freq;
+	u32 nb_dpm_enable;
+	u32 nbp_memory_clock[KV_NUM_NBPSTATES];
+	u32 nbp_n_clock[KV_NUM_NBPSTATES];
+	u16 bootup_nb_voltage_index;
+	u8 htc_tmp_lmt;
+	u8 htc_hyst_lmt;
+	struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
+	struct sumo_vid_mapping_table vid_mapping_table;
+	u32 uma_channel_number;
+};
+
+struct kv_power_info {
+	u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
+	u32 voltage_drop_t;
+	struct kv_sys_info sys_info;
+	struct kv_pl boot_pl;
+	bool enable_nb_ps_policy;
+	bool disable_nb_ps3_in_battery;
+	bool video_start;
+	bool battery_state;
+	u32 lowest_valid;
+	u32 highest_valid;
+	u16 high_voltage_t;
+	bool cac_enabled;
+	bool bapm_enable;
+	/* smc offsets */
+	u32 sram_end;
+	u32 dpm_table_start;
+	u32 soft_regs_start;
+	/* dpm SMU tables */
+	u8 graphics_dpm_level_count;
+	u8 uvd_level_count;
+	u8 vce_level_count;
+	u8 acp_level_count;
+	u8 samu_level_count;
+	u16 fps_high_t;
+	SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
+	SMU7_Fusion_ACPILevel acpi_level;
+	SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
+	SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
+	SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
+	SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
+	u8 uvd_boot_level;
+	u8 vce_boot_level;
+	u8 acp_boot_level;
+	u8 samu_boot_level;
+	u8 uvd_interval;
+	u8 vce_interval;
+	u8 acp_interval;
+	u8 samu_interval;
+	u8 graphics_boot_level;
+	u8 graphics_interval;
+	u8 graphics_therm_throttle_enable;
+	u8 graphics_voltage_change_enable;
+	u8 graphics_clk_slow_enable;
+	u8 graphics_clk_slow_divider;
+	u8 fps_low_t;
+	u32 low_sclk_interrupt_t;
+	bool uvd_power_gated;
+	bool vce_power_gated;
+	bool acp_power_gated;
+	bool samu_power_gated;
+	bool nb_dpm_enabled;
+	/* flags */
+	bool enable_didt;
+	bool enable_dpm;
+	bool enable_auto_thermal_throttling;
+	bool enable_nb_dpm;
+	/* caps */
+	bool caps_cac;
+	bool caps_power_containment;
+	bool caps_sq_ramping;
+	bool caps_db_ramping;
+	bool caps_td_ramping;
+	bool caps_tcp_ramping;
+	bool caps_sclk_throttle_low_notification;
+	bool caps_fps;
+	bool caps_uvd_dpm;
+	bool caps_uvd_pg;
+	bool caps_vce_pg;
+	bool caps_samu_pg;
+	bool caps_acp_pg;
+	bool caps_stable_p_state;
+	bool caps_enable_dfs_bypass;
+	bool caps_sclk_ds;
+	struct radeon_ps current_rps;
+	struct kv_ps current_ps;
+	struct radeon_ps requested_rps;
+	struct kv_ps requested_ps;
+};
+
+
+/* kv_smc.c */
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id);
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask);
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+				      PPSMC_Msg msg, u32 parameter);
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+			   u32 *value, u32 limit);
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
new file mode 100644
index 0000000..af60bd3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "cikd.h"
+#include "kv_dpm.h"
+
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
+{
+	u32 i;
+	u32 tmp = 0;
+
+	WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
+			break;
+		udelay(1);
+	}
+	tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
+
+	if (tmp != 1) {
+		if (tmp == 0xFF)
+			return -EINVAL;
+		else if (tmp == 0xFE)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
+{
+	int ret;
+
+	ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
+
+	if (ret == 0)
+		*enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
+
+	return ret;
+}
+
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+				      PPSMC_Msg msg, u32 parameter)
+{
+
+	WREG32(SMC_MSG_ARG_0, parameter);
+
+	return kv_notify_message_to_smu(rdev, msg);
+}
+
+static int kv_set_smc_sram_address(struct radeon_device *rdev,
+				   u32 smc_address, u32 limit)
+{
+	if (smc_address & 3)
+		return -EINVAL;
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(SMC_IND_INDEX_0, smc_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+	return 0;
+}
+
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+			   u32 *value, u32 limit)
+{
+	int ret;
+
+	ret = kv_set_smc_sram_address(rdev, smc_address, limit);
+	if (ret)
+		return ret;
+
+	*value = RREG32(SMC_IND_DATA_0);
+	return 0;
+}
+
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
+	else
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
+}
+
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+	else
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit)
+{
+	int ret;
+	u32 data, original_data, addr, extra_shift, t_byte, count, mask;
+
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+	t_byte = addr & 3;
+
+	/* RMW for the initial bytes */
+	if  (t_byte != 0) {
+		addr -= t_byte;
+
+		ret = kv_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			return ret;
+
+		original_data = RREG32(SMC_IND_DATA_0);
+
+		data = 0;
+		mask = 0;
+		count = 4;
+		while (count > 0) {
+			if (t_byte > 0) {
+				mask = (mask << 8) | 0xff;
+				t_byte--;
+			} else if (byte_count > 0) {
+				data = (data << 8) + *src++;
+				byte_count--;
+				mask <<= 8;
+			} else {
+				data <<= 8;
+				mask = (mask << 8) | 0xff;
+			}
+			count--;
+		}
+
+		data |= original_data & mask;
+
+		ret = kv_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			return ret;
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		addr += 4;
+	}
+
+	while (byte_count >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+		ret = kv_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			return ret;
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	/* RMW for the final bytes */
+	if (byte_count > 0) {
+		data = 0;
+
+		ret = kv_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			return ret;
+
+		original_data= RREG32(SMC_IND_DATA_0);
+
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			/* SMC address space is BE */
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		ret = kv_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			return ret;
+
+		WREG32(SMC_IND_DATA_0, data);
+	}
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
new file mode 100644
index 0000000..ba70463
--- /dev/null
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* utility to create the register check tables
+ * this includes inlined list.h safe for userspace.
+ *
+ * Copyright 2009 Jerome Glisse
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Authors:
+ * 	Jerome Glisse
+ * 	Dave Airlie
+ */
+
+#include <sys/types.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <regex.h>
+#include <libgen.h>
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr:    the pointer to the member.
+ * @type:   the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({          \
+	const typeof(((type *)0)->member)*__mptr = (ptr);    \
+		     (type *)((char *)__mptr - offsetof(type, member)); })
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+	struct list_head *next, *prev;
+};
+
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+	list->next = list;
+	list->prev = list;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_add(struct list_head *new,
+			      struct list_head *prev, struct list_head *next)
+{
+	next->prev = new;
+	new->next = next;
+	new->prev = prev;
+	prev->next = new;
+}
+#else
+extern void __list_add(struct list_head *new,
+		       struct list_head *prev, struct list_head *next);
+#endif
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+	__list_add(new, head->prev, head);
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr:	the &struct list_head pointer.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_head within the struct.
+ */
+#define list_entry(ptr, type, member) \
+	container_of(ptr, type, member)
+
+/**
+ * list_for_each_entry	-	iterate over list of given type
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_head within the struct.
+ */
+#define list_for_each_entry(pos, head, member)				\
+	for (pos = list_entry((head)->next, typeof(*pos), member);	\
+	     &pos->member != (head); 	\
+	     pos = list_entry(pos->member.next, typeof(*pos), member))
+
+struct offset {
+	struct list_head list;
+	unsigned offset;
+};
+
+struct table {
+	struct list_head offsets;
+	unsigned offset_max;
+	unsigned nentry;
+	unsigned *table;
+	char *gpu_prefix;
+};
+
+static struct offset *offset_new(unsigned o)
+{
+	struct offset *offset;
+
+	offset = (struct offset *)malloc(sizeof(struct offset));
+	if (offset) {
+		INIT_LIST_HEAD(&offset->list);
+		offset->offset = o;
+	}
+	return offset;
+}
+
+static void table_offset_add(struct table *t, struct offset *offset)
+{
+	list_add_tail(&offset->list, &t->offsets);
+}
+
+static void table_init(struct table *t)
+{
+	INIT_LIST_HEAD(&t->offsets);
+	t->offset_max = 0;
+	t->nentry = 0;
+	t->table = NULL;
+}
+
+static void table_print(struct table *t)
+{
+	unsigned nlloop, i, j, n, c, id;
+
+	nlloop = (t->nentry + 3) / 4;
+	c = t->nentry;
+	printf("static const unsigned %s_reg_safe_bm[%d] = {\n", t->gpu_prefix,
+	       t->nentry);
+	for (i = 0, id = 0; i < nlloop; i++) {
+		n = 4;
+		if (n > c)
+			n = c;
+		c -= n;
+		for (j = 0; j < n; j++) {
+			if (j == 0)
+				printf("\t");
+			else
+				printf(" ");
+			printf("0x%08X,", t->table[id++]);
+		}
+		printf("\n");
+	}
+	printf("};\n");
+}
+
+static int table_build(struct table *t)
+{
+	struct offset *offset;
+	unsigned i, m;
+
+	t->nentry = ((t->offset_max >> 2) + 31) / 32;
+	t->table = (unsigned *)malloc(sizeof(unsigned) * t->nentry);
+	if (t->table == NULL)
+		return -1;
+	memset(t->table, 0xff, sizeof(unsigned) * t->nentry);
+	list_for_each_entry(offset, &t->offsets, list) {
+		i = (offset->offset >> 2) / 32;
+		m = (offset->offset >> 2) & 31;
+		m = 1 << m;
+		t->table[i] ^= m;
+	}
+	return 0;
+}
+
+static char gpu_name[10];
+static int parser_auth(struct table *t, const char *filename)
+{
+	FILE *file;
+	regex_t mask_rex;
+	regmatch_t match[4];
+	char buf[1024];
+	size_t end;
+	int len;
+	int done = 0;
+	int r;
+	unsigned o;
+	struct offset *offset;
+	char last_reg_s[10];
+	int last_reg;
+
+	if (regcomp
+	    (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+		fprintf(stderr, "Failed to compile regular expression\n");
+		return -1;
+	}
+	file = fopen(filename, "r");
+	if (file == NULL) {
+		fprintf(stderr, "Failed to open: %s\n", filename);
+		return -1;
+	}
+	fseek(file, 0, SEEK_END);
+	end = ftell(file);
+	fseek(file, 0, SEEK_SET);
+
+	/* get header */
+	if (fgets(buf, 1024, file) == NULL) {
+		fclose(file);
+		return -1;
+	}
+
+	/* first line will contain the last register
+	 * and gpu name */
+	sscanf(buf, "%9s %9s", gpu_name, last_reg_s);
+	t->gpu_prefix = gpu_name;
+	last_reg = strtol(last_reg_s, NULL, 16);
+
+	do {
+		if (fgets(buf, 1024, file) == NULL) {
+			fclose(file);
+			return -1;
+		}
+		len = strlen(buf);
+		if (ftell(file) == end)
+			done = 1;
+		if (len) {
+			r = regexec(&mask_rex, buf, 4, match, 0);
+			if (r == REG_NOMATCH) {
+			} else if (r) {
+				fprintf(stderr,
+					"Error matching regular expression %d in %s\n",
+					r, filename);
+				fclose(file);
+				return -1;
+			} else {
+				buf[match[0].rm_eo] = 0;
+				buf[match[1].rm_eo] = 0;
+				buf[match[2].rm_eo] = 0;
+				o = strtol(&buf[match[1].rm_so], NULL, 16);
+				offset = offset_new(o);
+				table_offset_add(t, offset);
+				if (o > t->offset_max)
+					t->offset_max = o;
+			}
+		}
+	} while (!done);
+	fclose(file);
+	if (t->offset_max < last_reg)
+		t->offset_max = last_reg;
+	return table_build(t);
+}
+
+int main(int argc, char *argv[])
+{
+	struct table t;
+
+	if (argc != 2) {
+		fprintf(stderr, "Usage: %s <authfile>\n", argv[0]);
+		exit(1);
+	}
+	table_init(&t);
+	if (parser_auth(&t, argv[1])) {
+		fprintf(stderr, "Failed to parse file %s\n", argv[1]);
+		return -1;
+	}
+	table_print(&t);
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 0000000..381b025
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,2748 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include <drm/radeon_drm.h>
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+#include "cayman_blit_shaders.h"
+#include "radeon_ucode.h"
+#include "clearstate_cayman.h"
+
+/*
+ * Indirect registers accessor
+ */
+u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	WREG32(TN_SMC_IND_INDEX_0, (reg));
+	r = RREG32(TN_SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+	return r;
+}
+
+void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	WREG32(TN_SMC_IND_INDEX_0, (reg));
+	WREG32(TN_SMC_IND_DATA_0, (v));
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+}
+
+static const u32 tn_rlc_save_restore_register_list[] =
+{
+	0x98fc,
+	0x98f0,
+	0x9834,
+	0x9838,
+	0x9870,
+	0x9874,
+	0x8a14,
+	0x8b24,
+	0x8bcc,
+	0x8b10,
+	0x8c30,
+	0x8d00,
+	0x8d04,
+	0x8c00,
+	0x8c04,
+	0x8c10,
+	0x8c14,
+	0x8d8c,
+	0x8cf0,
+	0x8e38,
+	0x9508,
+	0x9688,
+	0x9608,
+	0x960c,
+	0x9610,
+	0x9614,
+	0x88c4,
+	0x8978,
+	0x88d4,
+	0x900c,
+	0x9100,
+	0x913c,
+	0x90e8,
+	0x9354,
+	0xa008,
+	0x98f8,
+	0x9148,
+	0x914c,
+	0x3f94,
+	0x98f4,
+	0x9b7c,
+	0x3f8c,
+	0x8950,
+	0x8954,
+	0x8a18,
+	0x8b28,
+	0x9144,
+	0x3f90,
+	0x915c,
+	0x9160,
+	0x9178,
+	0x917c,
+	0x9180,
+	0x918c,
+	0x9190,
+	0x9194,
+	0x9198,
+	0x919c,
+	0x91a8,
+	0x91ac,
+	0x91b0,
+	0x91b4,
+	0x91b8,
+	0x91c4,
+	0x91c8,
+	0x91cc,
+	0x91d0,
+	0x91d4,
+	0x91e0,
+	0x91e4,
+	0x91ec,
+	0x91f0,
+	0x91f4,
+	0x9200,
+	0x9204,
+	0x929c,
+	0x8030,
+	0x9150,
+	0x9a60,
+	0x920c,
+	0x9210,
+	0x9228,
+	0x922c,
+	0x9244,
+	0x9248,
+	0x91e8,
+	0x9294,
+	0x9208,
+	0x9224,
+	0x9240,
+	0x9220,
+	0x923c,
+	0x9258,
+	0x9744,
+	0xa200,
+	0xa204,
+	0xa208,
+	0xa20c,
+	0x8d58,
+	0x9030,
+	0x9034,
+	0x9038,
+	0x903c,
+	0x9040,
+	0x9654,
+	0x897c,
+	0xa210,
+	0xa214,
+	0x9868,
+	0xa02c,
+	0x9664,
+	0x9698,
+	0x949c,
+	0x8e10,
+	0x8e18,
+	0x8c50,
+	0x8c58,
+	0x8c60,
+	0x8c68,
+	0x89b4,
+	0x9830,
+	0x802c,
+};
+
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+extern void evergreen_mc_program(struct radeon_device *rdev);
+extern void evergreen_irq_suspend(struct radeon_device *rdev);
+extern int evergreen_mc_init(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void evergreen_program_aspm(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BARTS_smc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/TURKS_smc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
+MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
+MODULE_FIRMWARE("radeon/ARUBA_me.bin");
+MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
+
+
+static const u32 cayman_golden_registers2[] =
+{
+	0x3e5c, 0xffffffff, 0x00000000,
+	0x3e48, 0xffffffff, 0x00000000,
+	0x3e4c, 0xffffffff, 0x00000000,
+	0x3e64, 0xffffffff, 0x00000000,
+	0x3e50, 0xffffffff, 0x00000000,
+	0x3e60, 0xffffffff, 0x00000000
+};
+
+static const u32 cayman_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x70073777, 0x00011003,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x73773777, 0x02011003,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x02011003,
+	0x98fc, 0xffffffff, 0x76541032,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x33773777, 0x42010001,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000010f, 0x01000100,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x88d0, 0xffffffff, 0x0f40df40,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 dvst_golden_registers2[] =
+{
+	0x8f8, 0xffffffff, 0,
+	0x8fc, 0x00380000, 0,
+	0x8f8, 0xffffffff, 1,
+	0x8fc, 0x0e000000, 0
+};
+
+static const u32 dvst_golden_registers[] =
+{
+	0x690, 0x3fff3fff, 0x20c00033,
+	0x918c, 0x0fff0fff, 0x00010006,
+	0x91a8, 0x0fff0fff, 0x00010006,
+	0x9150, 0xffffdfff, 0x6e944040,
+	0x917c, 0x0fff0fff, 0x00030002,
+	0x9198, 0x0fff0fff, 0x00030002,
+	0x915c, 0x0fff0fff, 0x00010000,
+	0x3f90, 0xffff0001, 0xff000000,
+	0x9178, 0x0fff0fff, 0x00070000,
+	0x9194, 0x0fff0fff, 0x00070000,
+	0x9148, 0xffff0001, 0xff000000,
+	0x9190, 0x0fff0fff, 0x00090008,
+	0x91ac, 0x0fff0fff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0x00000fff, 0x00000001,
+	0x55e4, 0xff607fff, 0xfc000100,
+	0x8a18, 0xff000fff, 0x00000100,
+	0x8b28, 0xff000fff, 0x00000100,
+	0x9144, 0xfffc0fff, 0x00000100,
+	0x6ed8, 0x00010101, 0x00010000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0xfffffffe, 0x00000000,
+	0xd0c0, 0xff000fff, 0x00000100,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x73773777, 0x12010001,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2408, 0x00030000, 0x000c007f,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x4d8, 0x00000fff, 0x00000100,
+	0xa008, 0xffffffff, 0x00010000,
+	0x913c, 0xffff03ff, 0x01000100,
+	0x8c00, 0x000000ff, 0x00000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0xf700071f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x20ef8, 0x01ff01ff, 0x00000002,
+	0x20e98, 0xfffffbff, 0x00200000,
+	0x2015c, 0xffffffff, 0x00000f40,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x8978, 0x3fffffff, 0x04050140,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 scrapper_golden_registers[] =
+{
+	0x690, 0x3fff3fff, 0x20c00033,
+	0x918c, 0x0fff0fff, 0x00010006,
+	0x918c, 0x0fff0fff, 0x00010006,
+	0x91a8, 0x0fff0fff, 0x00010006,
+	0x91a8, 0x0fff0fff, 0x00010006,
+	0x9150, 0xffffdfff, 0x6e944040,
+	0x9150, 0xffffdfff, 0x6e944040,
+	0x917c, 0x0fff0fff, 0x00030002,
+	0x917c, 0x0fff0fff, 0x00030002,
+	0x9198, 0x0fff0fff, 0x00030002,
+	0x9198, 0x0fff0fff, 0x00030002,
+	0x915c, 0x0fff0fff, 0x00010000,
+	0x915c, 0x0fff0fff, 0x00010000,
+	0x3f90, 0xffff0001, 0xff000000,
+	0x3f90, 0xffff0001, 0xff000000,
+	0x9178, 0x0fff0fff, 0x00070000,
+	0x9178, 0x0fff0fff, 0x00070000,
+	0x9194, 0x0fff0fff, 0x00070000,
+	0x9194, 0x0fff0fff, 0x00070000,
+	0x9148, 0xffff0001, 0xff000000,
+	0x9148, 0xffff0001, 0xff000000,
+	0x9190, 0x0fff0fff, 0x00090008,
+	0x9190, 0x0fff0fff, 0x00090008,
+	0x91ac, 0x0fff0fff, 0x00090008,
+	0x91ac, 0x0fff0fff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0x00000fff, 0x00000001,
+	0x929c, 0x00000fff, 0x00000001,
+	0x55e4, 0xff607fff, 0xfc000100,
+	0x8a18, 0xff000fff, 0x00000100,
+	0x8a18, 0xff000fff, 0x00000100,
+	0x8b28, 0xff000fff, 0x00000100,
+	0x8b28, 0xff000fff, 0x00000100,
+	0x9144, 0xfffc0fff, 0x00000100,
+	0x9144, 0xfffc0fff, 0x00000100,
+	0x6ed8, 0x00010101, 0x00010000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0xfffffffe, 0x00000000,
+	0x9838, 0xfffffffe, 0x00000000,
+	0xd0c0, 0xff000fff, 0x00000100,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x73773777, 0x12010001,
+	0xd0b8, 0x73773777, 0x12010001,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2408, 0x00030000, 0x000c007f,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x4d8, 0x00000fff, 0x00000100,
+	0x4d8, 0x00000fff, 0x00000100,
+	0xa008, 0xffffffff, 0x00010000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x913c, 0xffff03ff, 0x01000100,
+	0x913c, 0xffff03ff, 0x01000100,
+	0x90e8, 0x001fffff, 0x010400c0,
+	0x8c00, 0x000000ff, 0x00000003,
+	0x8c00, 0x000000ff, 0x00000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c30, 0x0000000f, 0x00040005,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x900c, 0x00ffffff, 0x0017071f,
+	0x28350, 0x00000f01, 0x00000000,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0xf700071f, 0x00000002,
+	0x9508, 0xf700071f, 0x00000002,
+	0x9688, 0x00300000, 0x0017000f,
+	0x960c, 0xffffffff, 0x54763210,
+	0x960c, 0xffffffff, 0x54763210,
+	0x20ef8, 0x01ff01ff, 0x00000002,
+	0x20e98, 0xfffffbff, 0x00200000,
+	0x2015c, 0xffffffff, 0x00000f40,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x8978, 0x3fffffff, 0x04050140,
+	0x8978, 0x3fffffff, 0x04050140,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static void ni_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_CAYMAN:
+		radeon_program_register_sequence(rdev,
+						 cayman_golden_registers,
+						 (const u32)ARRAY_SIZE(cayman_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 cayman_golden_registers2,
+						 (const u32)ARRAY_SIZE(cayman_golden_registers2));
+		break;
+	case CHIP_ARUBA:
+		if ((rdev->pdev->device == 0x9900) ||
+		    (rdev->pdev->device == 0x9901) ||
+		    (rdev->pdev->device == 0x9903) ||
+		    (rdev->pdev->device == 0x9904) ||
+		    (rdev->pdev->device == 0x9905) ||
+		    (rdev->pdev->device == 0x9906) ||
+		    (rdev->pdev->device == 0x9907) ||
+		    (rdev->pdev->device == 0x9908) ||
+		    (rdev->pdev->device == 0x9909) ||
+		    (rdev->pdev->device == 0x990A) ||
+		    (rdev->pdev->device == 0x990B) ||
+		    (rdev->pdev->device == 0x990C) ||
+		    (rdev->pdev->device == 0x990D) ||
+		    (rdev->pdev->device == 0x990E) ||
+		    (rdev->pdev->device == 0x990F) ||
+		    (rdev->pdev->device == 0x9910) ||
+		    (rdev->pdev->device == 0x9913) ||
+		    (rdev->pdev->device == 0x9917) ||
+		    (rdev->pdev->device == 0x9918)) {
+			radeon_program_register_sequence(rdev,
+							 dvst_golden_registers,
+							 (const u32)ARRAY_SIZE(dvst_golden_registers));
+			radeon_program_register_sequence(rdev,
+							 dvst_golden_registers2,
+							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
+		} else {
+			radeon_program_register_sequence(rdev,
+							 scrapper_golden_registers,
+							 (const u32)ARRAY_SIZE(scrapper_golden_registers));
+			radeon_program_register_sequence(rdev,
+							 dvst_golden_registers2,
+							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00916a00}
+};
+
+static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00976b00}
+};
+
+int ni_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	u32 mem_type, running, blackout = 0;
+	u32 *io_mc_regs;
+	int i, ucode_size, regs_size;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		io_mc_regs = (u32 *)&barts_io_mc_regs;
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_TURKS:
+		io_mc_regs = (u32 *)&turks_io_mc_regs;
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_CAICOS:
+	default:
+		io_mc_regs = (u32 *)&caicos_io_mc_regs;
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_CAYMAN:
+		io_mc_regs = (u32 *)&cayman_io_mc_regs;
+		ucode_size = CAYMAN_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	}
+
+	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+		if (running) {
+			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+		}
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+		}
+		/* load the MC ucode */
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+		for (i = 0; i < ucode_size; i++)
+			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
+				break;
+			udelay(1);
+		}
+
+		if (running)
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+	}
+
+	return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+	size_t smc_req_size = 0;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		chip_name = "BARTS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_TURKS:
+		chip_name = "TURKS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_CAICOS:
+		chip_name = "CAICOS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_CAYMAN:
+		chip_name = "CAYMAN";
+		rlc_chip_name = "CAYMAN";
+		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_ARUBA:
+		chip_name = "ARUBA";
+		rlc_chip_name = "ARUBA";
+		/* pfp/me same size as CAYMAN */
+		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
+		mc_req_size = 0;
+		break;
+	default: BUG();
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+	if (err)
+		goto out;
+	if (rdev->pfp_fw->size != pfp_req_size) {
+		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+	if (err)
+		goto out;
+	if (rdev->me_fw->size != me_req_size) {
+		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		pr_err("ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	/* no MC ucode on TN */
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->mc_fw->size != mc_req_size) {
+			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->mc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	}
+
+	if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+		if (err) {
+			pr_err("smc: error loading firmware \"%s\"\n", fw_name);
+			release_firmware(rdev->smc_fw);
+			rdev->smc_fw = NULL;
+			err = 0;
+		} else if (rdev->smc_fw->size != smc_req_size) {
+			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->mc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	}
+
+out:
+	if (err) {
+		if (err != -EINVAL)
+			pr_err("ni_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->mc_fw);
+		rdev->mc_fw = NULL;
+	}
+	return err;
+}
+
+/**
+ * cayman_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int cayman_get_allowed_info_register(struct radeon_device *rdev,
+				     u32 reg, u32 *val)
+{
+	switch (reg) {
+	case GRBM_STATUS:
+	case GRBM_STATUS_SE0:
+	case GRBM_STATUS_SE1:
+	case SRBM_STATUS:
+	case SRBM_STATUS2:
+	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
+	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
+	case UVD_STATUS:
+		*val = RREG32(reg);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+int tn_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
+	int actual_temp = (temp / 8) - 49;
+
+	return actual_temp * 1000;
+}
+
+/*
+ * Core functions
+ */
+static void cayman_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 cgts_tcc_disable;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 cgts_sm_ctrl_reg;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+	u32 disabled_rb_mask;
+	int i, j;
+
+	switch (rdev->family) {
+	case CHIP_CAYMAN:
+		rdev->config.cayman.max_shader_engines = 2;
+		rdev->config.cayman.max_pipes_per_simd = 4;
+		rdev->config.cayman.max_tile_pipes = 8;
+		rdev->config.cayman.max_simds_per_se = 12;
+		rdev->config.cayman.max_backends_per_se = 4;
+		rdev->config.cayman.max_texture_channel_caches = 8;
+		rdev->config.cayman.max_gprs = 256;
+		rdev->config.cayman.max_threads = 256;
+		rdev->config.cayman.max_gs_threads = 32;
+		rdev->config.cayman.max_stack_entries = 512;
+		rdev->config.cayman.sx_num_of_sets = 8;
+		rdev->config.cayman.sx_max_export_size = 256;
+		rdev->config.cayman.sx_max_export_pos_size = 64;
+		rdev->config.cayman.sx_max_export_smx_size = 192;
+		rdev->config.cayman.max_hw_contexts = 8;
+		rdev->config.cayman.sq_num_cf_insts = 2;
+
+		rdev->config.cayman.sc_prim_fifo_size = 0x100;
+		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_ARUBA:
+	default:
+		rdev->config.cayman.max_shader_engines = 1;
+		rdev->config.cayman.max_pipes_per_simd = 4;
+		rdev->config.cayman.max_tile_pipes = 2;
+		if ((rdev->pdev->device == 0x9900) ||
+		    (rdev->pdev->device == 0x9901) ||
+		    (rdev->pdev->device == 0x9905) ||
+		    (rdev->pdev->device == 0x9906) ||
+		    (rdev->pdev->device == 0x9907) ||
+		    (rdev->pdev->device == 0x9908) ||
+		    (rdev->pdev->device == 0x9909) ||
+		    (rdev->pdev->device == 0x990B) ||
+		    (rdev->pdev->device == 0x990C) ||
+		    (rdev->pdev->device == 0x990F) ||
+		    (rdev->pdev->device == 0x9910) ||
+		    (rdev->pdev->device == 0x9917) ||
+		    (rdev->pdev->device == 0x9999) ||
+		    (rdev->pdev->device == 0x999C)) {
+			rdev->config.cayman.max_simds_per_se = 6;
+			rdev->config.cayman.max_backends_per_se = 2;
+			rdev->config.cayman.max_hw_contexts = 8;
+			rdev->config.cayman.sx_max_export_size = 256;
+			rdev->config.cayman.sx_max_export_pos_size = 64;
+			rdev->config.cayman.sx_max_export_smx_size = 192;
+		} else if ((rdev->pdev->device == 0x9903) ||
+			   (rdev->pdev->device == 0x9904) ||
+			   (rdev->pdev->device == 0x990A) ||
+			   (rdev->pdev->device == 0x990D) ||
+			   (rdev->pdev->device == 0x990E) ||
+			   (rdev->pdev->device == 0x9913) ||
+			   (rdev->pdev->device == 0x9918) ||
+			   (rdev->pdev->device == 0x999D)) {
+			rdev->config.cayman.max_simds_per_se = 4;
+			rdev->config.cayman.max_backends_per_se = 2;
+			rdev->config.cayman.max_hw_contexts = 8;
+			rdev->config.cayman.sx_max_export_size = 256;
+			rdev->config.cayman.sx_max_export_pos_size = 64;
+			rdev->config.cayman.sx_max_export_smx_size = 192;
+		} else if ((rdev->pdev->device == 0x9919) ||
+			   (rdev->pdev->device == 0x9990) ||
+			   (rdev->pdev->device == 0x9991) ||
+			   (rdev->pdev->device == 0x9994) ||
+			   (rdev->pdev->device == 0x9995) ||
+			   (rdev->pdev->device == 0x9996) ||
+			   (rdev->pdev->device == 0x999A) ||
+			   (rdev->pdev->device == 0x99A0)) {
+			rdev->config.cayman.max_simds_per_se = 3;
+			rdev->config.cayman.max_backends_per_se = 1;
+			rdev->config.cayman.max_hw_contexts = 4;
+			rdev->config.cayman.sx_max_export_size = 128;
+			rdev->config.cayman.sx_max_export_pos_size = 32;
+			rdev->config.cayman.sx_max_export_smx_size = 96;
+		} else {
+			rdev->config.cayman.max_simds_per_se = 2;
+			rdev->config.cayman.max_backends_per_se = 1;
+			rdev->config.cayman.max_hw_contexts = 4;
+			rdev->config.cayman.sx_max_export_size = 128;
+			rdev->config.cayman.sx_max_export_pos_size = 32;
+			rdev->config.cayman.sx_max_export_smx_size = 96;
+		}
+		rdev->config.cayman.max_texture_channel_caches = 2;
+		rdev->config.cayman.max_gprs = 256;
+		rdev->config.cayman.max_threads = 256;
+		rdev->config.cayman.max_gs_threads = 32;
+		rdev->config.cayman.max_stack_entries = 512;
+		rdev->config.cayman.sx_num_of_sets = 8;
+		rdev->config.cayman.sq_num_cf_insts = 2;
+
+		rdev->config.cayman.sc_prim_fifo_size = 0x40;
+		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 0x1);
+	WREG32(SRBM_INT_ACK, 0x1);
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (rdev->config.cayman.mem_row_size_in_kb > 4)
+		rdev->config.cayman.mem_row_size_in_kb = 4;
+	/* XXX use MC settings? */
+	rdev->config.cayman.shader_engine_tile_size = 32;
+	rdev->config.cayman.num_gpus = 1;
+	rdev->config.cayman.multi_gpu_tile_size = 64;
+
+	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
+	rdev->config.cayman.num_tile_pipes = (1 << tmp);
+	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
+	rdev->config.cayman.num_shader_engines = tmp + 1;
+	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
+	rdev->config.cayman.num_gpus = tmp + 1;
+	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
+	rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
+	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
+	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
+
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.cayman.tile_config = 0;
+	switch (rdev->config.cayman.num_tile_pipes) {
+	case 1:
+	default:
+		rdev->config.cayman.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.cayman.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.cayman.tile_config |= (2 << 0);
+		break;
+	case 8:
+		rdev->config.cayman.tile_config |= (3 << 0);
+		break;
+	}
+
+	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->config.cayman.tile_config |= 1 << 4;
+	else {
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
+			rdev->config.cayman.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.cayman.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.cayman.tile_config |= 2 << 4;
+			break;
+		}
+	}
+	rdev->config.cayman.tile_config |=
+		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+	rdev->config.cayman.tile_config |=
+		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+	tmp = 0;
+	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+		u32 rb_disable_bitmap;
+
+		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+		tmp <<= 4;
+		tmp |= rb_disable_bitmap;
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+	tmp = 0;
+	for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+		tmp |= (1 << i);
+	/* if all the backends are disabled, fix it up here */
+	if ((disabled_rb_mask & tmp) == tmp) {
+		for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+			disabled_rb_mask &= ~(1 << i);
+	}
+
+	for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
+		u32 simd_disable_bitmap;
+
+		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
+		simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
+		tmp <<= 16;
+		tmp |= simd_disable_bitmap;
+	}
+	rdev->config.cayman.active_simds = hweight32(~tmp);
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	if (ASIC_IS_DCE6(rdev))
+		WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+	if ((rdev->config.cayman.max_backends_per_se == 1) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		if ((disabled_rb_mask & 3) == 2) {
+			/* RB1 disabled, RB0 enabled */
+			tmp = 0x00000000;
+		} else {
+			/* RB0 disabled, RB1 enabled */
+			tmp = 0x11111111;
+		}
+	} else {
+		tmp = gb_addr_config & NUM_PIPES_MASK;
+		tmp = r6xx_remap_render_backend(rdev, tmp,
+						rdev->config.cayman.max_backends_per_se *
+						rdev->config.cayman.max_shader_engines,
+						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+	}
+	rdev->config.cayman.backend_map = tmp;
+	WREG32(GB_BACKEND_MAP, tmp);
+
+	cgts_tcc_disable = 0xffff0000;
+	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+		cgts_tcc_disable &= ~(1 << (16 + i));
+	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+
+	/* reprogram the shader complex */
+	cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
+	for (i = 0; i < 16; i++)
+		WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
+	WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
+
+	/* need to be explicitly zero-ed */
+	WREG32(VGT_OFFCHIP_LDS_BASE, 0);
+	WREG32(SQ_LSTMP_RING_BASE, 0);
+	WREG32(SQ_HSTMP_RING_BASE, 0);
+	WREG32(SQ_ESTMP_RING_BASE, 0);
+	WREG32(SQ_GSTMP_RING_BASE, 0);
+	WREG32(SQ_VSTMP_RING_BASE, 0);
+	WREG32(SQ_PSTMP_RING_BASE, 0);
+
+	WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
+
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
+				  FETCH_FIFO_HIWATER(0x4) |
+				  DONE_FIFO_HIWATER(0xe0) |
+				  ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
+	WREG32(SQ_CONFIG, (VC_ENABLE |
+			   EXPORT_SRC_C |
+			   GFX_PRIO(0) |
+			   CS1_PRIO(0) |
+			   CS2_PRIO(1)));
+	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERF_CTR0_SEL_0, 0);
+	WREG32(CB_PERF_CTR0_SEL_1, 0);
+	WREG32(CB_PERF_CTR1_SEL_0, 0);
+	WREG32(CB_PERF_CTR1_SEL_1, 0);
+	WREG32(CB_PERF_CTR2_SEL_0, 0);
+	WREG32(CB_PERF_CTR2_SEL_1, 0);
+	WREG32(CB_PERF_CTR3_SEL_0, 0);
+	WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+
+	/* set clockgating golden values on TN */
+	if (rdev->family == CHIP_ARUBA) {
+		tmp = RREG32_CG(CG_CGTT_LOCAL_0);
+		tmp &= ~0x00380000;
+		WREG32_CG(CG_CGTT_LOCAL_0, tmp);
+		tmp = RREG32_CG(CG_CGTT_LOCAL_1);
+		tmp &= ~0x0e000000;
+		WREG32_CG(CG_CGTT_LOCAL_1, tmp);
+	}
+}
+
+/*
+ * GART
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* flush hdp cache */
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int cayman_pcie_gart_enable(struct radeon_device *rdev)
+{
+	int i, r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_FRAGMENT_PROCESSING |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       BANK_SELECT(6) |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	WREG32(0x15D4, 0);
+	WREG32(0x15D8, 0);
+	WREG32(0x15DC, 0);
+
+	/* empty context1-7 */
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 8; i++) {
+		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
+			rdev->vm_manager.max_pfn - 1);
+		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+		       rdev->vm_manager.saved_table_addr[i]);
+	}
+
+	/* enable context1-7 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	cayman_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void cayman_pcie_gart_disable(struct radeon_device *rdev)
+{
+	unsigned i;
+
+	for (i = 1; i < 8; ++i) {
+		rdev->vm_manager.saved_table_addr[i] = RREG32(
+			VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
+	}
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void cayman_pcie_gart_fini(struct radeon_device *rdev)
+{
+	cayman_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+			      int ring, u32 cp_int_cntl)
+{
+	WREG32(SRBM_GFX_CNTL, RINGID(ring));
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
+/*
+ * CP.
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+			    struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
+
+	/* flush read cache over gart for this vmid */
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+	radeon_ring_write(ring, lower_32_bits(addr));
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
+	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	if (ring->rptr_save_reg) {
+		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg - 
+					  PACKET3_SET_CONFIG_REG_START) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
+
+	/* flush read cache over gart for this vmid */
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
+}
+
+static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	}
+}
+
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+			struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else {
+		if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+			rptr = RREG32(CP_RB0_RPTR);
+		else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+			rptr = RREG32(CP_RB1_RPTR);
+		else
+			rptr = RREG32(CP_RB2_RPTR);
+	}
+
+	return rptr;
+}
+
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+			struct radeon_ring *ring)
+{
+	u32 wptr;
+
+	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+		wptr = RREG32(CP_RB0_WPTR);
+	else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+		wptr = RREG32(CP_RB1_WPTR);
+	else
+		wptr = RREG32(CP_RB2_WPTR);
+
+	return wptr;
+}
+
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
+{
+	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+		WREG32(CP_RB0_WPTR, ring->wptr);
+		(void)RREG32(CP_RB0_WPTR);
+	} else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
+		WREG32(CP_RB1_WPTR, ring->wptr);
+		(void)RREG32(CP_RB1_WPTR);
+	} else {
+		WREG32(CP_RB2_WPTR, ring->wptr);
+		(void)RREG32(CP_RB2_WPTR);
+	}
+}
+
+static int cayman_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	cayman_cp_enable(rdev, false);
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int cayman_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	cayman_cp_enable(rdev, true);
+
+	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < cayman_default_size; i++)
+		radeon_ring_write(ring, cayman_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* Clear consts */
+	radeon_ring_write(ring, 0xc0036f00);
+	radeon_ring_write(ring, 0x00000bc4);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+
+	radeon_ring_write(ring, 0xc0026900);
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /*  */
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	/* XXX init other rings */
+
+	return 0;
+}
+
+static void cayman_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	cayman_cp_enable(rdev, false);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int cayman_cp_resume(struct radeon_device *rdev)
+{
+	static const int ridx[] = {
+		RADEON_RING_TYPE_GFX_INDEX,
+		CAYMAN_RING_TYPE_CP1_INDEX,
+		CAYMAN_RING_TYPE_CP2_INDEX
+	};
+	static const unsigned cp_rb_cntl[] = {
+		CP_RB0_CNTL,
+		CP_RB1_CNTL,
+		CP_RB2_CNTL,
+	};
+	static const unsigned cp_rb_rptr_addr[] = {
+		CP_RB0_RPTR_ADDR,
+		CP_RB1_RPTR_ADDR,
+		CP_RB2_RPTR_ADDR
+	};
+	static const unsigned cp_rb_rptr_addr_hi[] = {
+		CP_RB0_RPTR_ADDR_HI,
+		CP_RB1_RPTR_ADDR_HI,
+		CP_RB2_RPTR_ADDR_HI
+	};
+	static const unsigned cp_rb_base[] = {
+		CP_RB0_BASE,
+		CP_RB1_BASE,
+		CP_RB2_BASE
+	};
+	static const unsigned cp_rb_rptr[] = {
+		CP_RB0_RPTR,
+		CP_RB1_RPTR,
+		CP_RB2_RPTR
+	};
+	static const unsigned cp_rb_wptr[] = {
+		CP_RB0_WPTR,
+		CP_RB1_WPTR,
+		CP_RB2_WPTR
+	};
+	struct radeon_ring *ring;
+	int i, r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_SH |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, (1 << 27));
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+	WREG32(SCRATCH_UMSK, 0xff);
+
+	for (i = 0; i < 3; ++i) {
+		uint32_t rb_cntl;
+		uint64_t addr;
+
+		/* Set ring buffer size */
+		ring = &rdev->ring[ridx[i]];
+		rb_cntl = order_base_2(ring->ring_size / 8);
+		rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= BUF_SWAP_32BIT;
+#endif
+		WREG32(cp_rb_cntl[i], rb_cntl);
+
+		/* set the wb address whether it's enabled or not */
+		addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
+		WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
+		WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
+	}
+
+	/* set the rb base addr, this causes an internal reset of ALL rings */
+	for (i = 0; i < 3; ++i) {
+		ring = &rdev->ring[ridx[i]];
+		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
+	}
+
+	for (i = 0; i < 3; ++i) {
+		/* Initialize the ring buffer's read and write pointers */
+		ring = &rdev->ring[ridx[i]];
+		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
+
+		ring->wptr = 0;
+		WREG32(cp_rb_rptr[i], 0);
+		WREG32(cp_rb_wptr[i], ring->wptr);
+
+		mdelay(1);
+		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
+	}
+
+	/* start the rings */
+	cayman_cp_start(rdev);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	/* this only test cp0 */
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	if (r) {
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+		return r;
+	}
+
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(GRBM_STATUS);
+	if (tmp & (PA_BUSY | SC_BUSY |
+		   SH_BUSY | SX_BUSY |
+		   TA_BUSY | VGT_BUSY |
+		   DB_BUSY | CB_BUSY |
+		   GDS_BUSY | SPI_BUSY |
+		   IA_BUSY | IA_BUSY_NO_DMA))
+		reset_mask |= RADEON_RESET_GFX;
+
+	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+		   CP_BUSY | CP_COHERENCY_BUSY))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (tmp & GRBM_EE_BUSY)
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* DMA_STATUS_REG 0 */
+	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* DMA_STATUS_REG 1 */
+	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(SRBM_STATUS2);
+	if (tmp & DMA_BUSY)
+		reset_mask |= RADEON_RESET_DMA;
+
+	if (tmp & DMA1_BUSY)
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(SRBM_STATUS);
+	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+		reset_mask |= RADEON_RESET_RLC;
+
+	if (tmp & IH_BUSY)
+		reset_mask |= RADEON_RESET_IH;
+
+	if (tmp & SEM_BUSY)
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (tmp & GRBM_RQ_PENDING)
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (tmp & VMC_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+		   MCC_BUSY | MCD_BUSY))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (evergreen_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* VM_L2_STATUS */
+	tmp = RREG32(VM_L2_STATUS);
+	if (tmp & L2_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	evergreen_print_gpu_status_regs(rdev);
+	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(0x14F8));
+	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(0x14D8));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(0x14FC));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(0x14DC));
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* dma0 */
+		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	}
+
+	if (reset_mask & RADEON_RESET_DMA1) {
+		/* dma1 */
+		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	}
+
+	udelay(50);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+		grbm_soft_reset = SOFT_RESET_CB |
+			SOFT_RESET_DB |
+			SOFT_RESET_GDS |
+			SOFT_RESET_PA |
+			SOFT_RESET_SC |
+			SOFT_RESET_SPI |
+			SOFT_RESET_SH |
+			SOFT_RESET_SX |
+			SOFT_RESET_TC |
+			SOFT_RESET_TA |
+			SOFT_RESET_VGT |
+			SOFT_RESET_IA;
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+	}
+
+	if (reset_mask & RADEON_RESET_DMA)
+		srbm_soft_reset |= SOFT_RESET_DMA;
+
+	if (reset_mask & RADEON_RESET_DMA1)
+		srbm_soft_reset |= SOFT_RESET_DMA1;
+
+	if (reset_mask & RADEON_RESET_DISPLAY)
+		srbm_soft_reset |= SOFT_RESET_DC;
+
+	if (reset_mask & RADEON_RESET_RLC)
+		srbm_soft_reset |= SOFT_RESET_RLC;
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= SOFT_RESET_SEM;
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= SOFT_RESET_IH;
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= SOFT_RESET_VMC;
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		if (reset_mask & RADEON_RESET_MC)
+			srbm_soft_reset |= SOFT_RESET_MC;
+	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	udelay(50);
+
+	evergreen_print_gpu_status_regs(rdev);
+}
+
+int cayman_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	u32 reset_mask;
+
+	if (hard) {
+		evergreen_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
+	reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	cayman_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		evergreen_gpu_pci_config_reset(rdev);
+
+	r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+static void cayman_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void cayman_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void cayman_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static void cayman_vce_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Only set for CHIP_ARUBA */
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+		/*
+		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
+		 * to early fails cayman_vce_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable vce here.
+		 */
+		rdev->has_vce = 0;
+		return;
+	}
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void cayman_vce_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = vce_v1_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void cayman_vce_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	r = vce_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+		return;
+	}
+}
+
+static int cayman_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* enable pcie gen2 link */
+	evergreen_pcie_gen2_enable(rdev);
+	/* enable aspm */
+	evergreen_program_aspm(rdev);
+
+	/* scratch needs to be initialized before MC */
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	evergreen_mc_program(rdev);
+
+	if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
+		r = ni_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	r = cayman_pcie_gart_enable(rdev);
+	if (r)
+		return r;
+	cayman_gpu_init(rdev);
+
+	/* allocate rlc buffers */
+	if (rdev->flags & RADEON_IS_IGP) {
+		rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
+		rdev->rlc.reg_list_size =
+			(u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
+		rdev->rlc.cs_data = cayman_cs_data;
+		r = sumo_rlc_init(rdev);
+		if (r) {
+			DRM_ERROR("Failed to init rlc BOs!\n");
+			return r;
+		}
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	cayman_uvd_start(rdev);
+	cayman_vce_start(rdev);
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	evergreen_irq_set(rdev);
+
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = cayman_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = cayman_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
+	cayman_uvd_resume(rdev);
+	cayman_vce_resume(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_vm_manager_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+int cayman_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	ni_init_golden_registers(rdev);
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_resume(rdev);
+
+	rdev->accel_working = true;
+	r = cayman_startup(rdev);
+	if (r) {
+		DRM_ERROR("cayman startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+	return r;
+}
+
+int cayman_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	cayman_cp_enable(rdev, false);
+	cayman_dma_stop(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	evergreen_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	cayman_pcie_gart_disable(rdev);
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int cayman_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	ni_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize memory controller */
+	r = evergreen_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	cayman_uvd_init(rdev);
+	cayman_vce_init(rdev);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = cayman_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		cayman_cp_fini(rdev);
+		cayman_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		if (rdev->flags & RADEON_IS_IGP)
+			sumo_rlc_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_vm_manager_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		cayman_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 *
+	 * We can skip this check for TN, because there is no MC
+	 * ucode.
+	 */
+	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void cayman_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	cayman_cp_fini(rdev);
+	cayman_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	if (rdev->flags & RADEON_IS_IGP)
+		sumo_rlc_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	uvd_v1_0_fini(rdev);
+	radeon_uvd_fini(rdev);
+	if (rdev->has_vce)
+		radeon_vce_fini(rdev);
+	cayman_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+	/* number of VMs */
+	rdev->vm_manager.nvm = 8;
+	/* base offset of vram pages */
+	if (rdev->flags & RADEON_IS_IGP) {
+		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
+		tmp <<= 22;
+		rdev->vm_manager.vram_base_offset = tmp;
+	} else
+		rdev->vm_manager.vram_base_offset = 0;
+	return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+/**
+ * cayman_vm_decode_fault - print human readable fault info
+ *
+ * @rdev: radeon_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (cayman/TN).
+ */
+void cayman_vm_decode_fault(struct radeon_device *rdev,
+			    u32 status, u32 addr)
+{
+	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
+	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
+	char *block;
+
+	switch (mc_id) {
+	case 32:
+	case 16:
+	case 96:
+	case 80:
+	case 160:
+	case 144:
+	case 224:
+	case 208:
+		block = "CB";
+		break;
+	case 33:
+	case 17:
+	case 97:
+	case 81:
+	case 161:
+	case 145:
+	case 225:
+	case 209:
+		block = "CB_FMASK";
+		break;
+	case 34:
+	case 18:
+	case 98:
+	case 82:
+	case 162:
+	case 146:
+	case 226:
+	case 210:
+		block = "CB_CMASK";
+		break;
+	case 35:
+	case 19:
+	case 99:
+	case 83:
+	case 163:
+	case 147:
+	case 227:
+	case 211:
+		block = "CB_IMMED";
+		break;
+	case 36:
+	case 20:
+	case 100:
+	case 84:
+	case 164:
+	case 148:
+	case 228:
+	case 212:
+		block = "DB";
+		break;
+	case 37:
+	case 21:
+	case 101:
+	case 85:
+	case 165:
+	case 149:
+	case 229:
+	case 213:
+		block = "DB_HTILE";
+		break;
+	case 38:
+	case 22:
+	case 102:
+	case 86:
+	case 166:
+	case 150:
+	case 230:
+	case 214:
+		block = "SX";
+		break;
+	case 39:
+	case 23:
+	case 103:
+	case 87:
+	case 167:
+	case 151:
+	case 231:
+	case 215:
+		block = "DB_STEN";
+		break;
+	case 40:
+	case 24:
+	case 104:
+	case 88:
+	case 232:
+	case 216:
+	case 168:
+	case 152:
+		block = "TC_TFETCH";
+		break;
+	case 41:
+	case 25:
+	case 105:
+	case 89:
+	case 233:
+	case 217:
+	case 169:
+	case 153:
+		block = "TC_VFETCH";
+		break;
+	case 42:
+	case 26:
+	case 106:
+	case 90:
+	case 234:
+	case 218:
+	case 170:
+	case 154:
+		block = "VC";
+		break;
+	case 112:
+		block = "CP";
+		break;
+	case 113:
+	case 114:
+		block = "SH";
+		break;
+	case 115:
+		block = "VGT";
+		break;
+	case 178:
+		block = "IH";
+		break;
+	case 51:
+		block = "RLC";
+		break;
+	case 55:
+		block = "DMA";
+		break;
+	case 56:
+		block = "HDP";
+		break;
+	default:
+		block = "unknown";
+		break;
+	}
+
+	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+	       protections, vmid, addr,
+	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
+	       block, mc_id);
+}
+
+/**
+ * cayman_vm_flush - vm flush using the CP
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (cayman-si).
+ */
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		     unsigned vm_id, uint64_t pd_addr)
+{
+	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
+	radeon_ring_write(ring, pd_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+	radeon_ring_write(ring, 0x1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
+	radeon_ring_write(ring, 1 << vm_id);
+
+	/* wait for the invalidate to complete */
+	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+				 WAIT_REG_MEM_ENGINE(0))); /* me */
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0); /* ref */
+	radeon_ring_write(ring, 0); /* mask */
+	radeon_ring_write(ring, 0x20); /* poll interval */
+
+	/* sync PFP to ME, otherwise we might get invalid PFP reads */
+	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+	radeon_ring_write(ring, 0x0);
+}
+
+int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
+{
+	struct atom_clock_dividers dividers;
+	int r, i;
+
+	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					   ecclk, false, &dividers);
+	if (r)
+		return r;
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
new file mode 100644
index 0000000..ce787a9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_trace.h"
+#include "nid.h"
+
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+
+/**
+ * cayman_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+			     struct radeon_ring *ring)
+{
+	u32 rptr, reg;
+
+	if (rdev->wb.enabled) {
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	} else {
+		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+			reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET;
+		else
+			reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET;
+
+		rptr = RREG32(reg);
+	}
+
+	return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+	else
+		reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+	return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (cayman+).
+ */
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+	else
+		reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl;
+
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	/* dma0 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+	/* dma1 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 rb_cntl, dma_cntl, ib_cntl;
+	u32 rb_bufsz;
+	u32 reg_offset, wb_offset;
+	int i, r;
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0) {
+			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+			reg_offset = DMA0_REGISTER_OFFSET;
+			wb_offset = R600_WB_DMA_RPTR_OFFSET;
+		} else {
+			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+			reg_offset = DMA1_REGISTER_OFFSET;
+			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+		}
+
+		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = order_base_2(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(DMA_RB_RPTR + reg_offset, 0);
+		WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+		/* set the wb address whether it's enabled or not */
+		WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+		WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+		if (rdev->wb.enabled)
+			rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+		/* enable DMA IBs */
+		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+		ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+		WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+		dma_cntl = RREG32(DMA_CNTL + reg_offset);
+		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+		WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+		ring->wptr = 0;
+		WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+		ring->ready = true;
+
+		r = radeon_ring_test(rdev, ring->idx, ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+	}
+
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+	cayman_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+	u32 mask;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		mask = RADEON_RESET_DMA;
+	else
+		mask = RADEON_RESET_DMA1;
+
+	if (!(reset_mask & mask)) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+			      struct radeon_ib *ib,
+			      uint64_t pe, uint64_t src,
+			      unsigned count)
+{
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+						      0, 0, ndw);
+		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+		ib->ptr[ib->length_dw++] = lower_32_bits(src);
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+		pe += ndw * 4;
+		src += ndw * 4;
+		count -= ndw / 2;
+	}
+}
+
+/**
+ * cayman_dma_vm_write_pages - update PTEs by writing them manually
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update PTEs by writing them manually using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+			       struct radeon_ib *ib,
+			       uint64_t pe,
+			       uint64_t addr, unsigned count,
+			       uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		/* for non-physically contiguous pages (system) */
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
+						      0, 0, ndw);
+		ib->ptr[ib->length_dw++] = pe;
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+			if (flags & R600_PTE_SYSTEM) {
+				value = radeon_vm_map_gart(rdev, addr);
+			} else if (flags & R600_PTE_VALID) {
+				value = addr;
+			} else {
+				value = 0;
+			}
+			addr += incr;
+			value |= flags;
+			ib->ptr[ib->length_dw++] = value;
+			ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		}
+	}
+}
+
+/**
+ * cayman_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		if (flags & R600_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+
+		pe += ndw * 4;
+		addr += (ndw / 2) * incr;
+		count -= ndw / 2;
+	}
+}
+
+/**
+ * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
+{
+	while (ib->length_dw & 0x7)
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+			 unsigned vm_id, uint64_t pd_addr)
+{
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
+	radeon_ring_write(ring, pd_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm_id);
+
+	/* wait for invalidate to complete */
+	radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
+	radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 0); /* mask */
+	radeon_ring_write(ring, 0); /* value */
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
new file mode 100644
index 0000000..0fd8d6b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -0,0 +1,4380 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+#include "r600_dpm.h"
+#include "ni_dpm.h"
+#include "atom.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END 0xC000
+
+static const struct ni_cac_weights cac_weights_cayman_xt =
+{
+	0x15,
+	0x2,
+	0x19,
+	0x2,
+	0x8,
+	0x14,
+	0x2,
+	0x16,
+	0xE,
+	0x17,
+	0x13,
+	0x2B,
+	0x10,
+	0x7,
+	0x5,
+	0x5,
+	0x5,
+	0x2,
+	0x3,
+	0x9,
+	0x10,
+	0x10,
+	0x2B,
+	0xA,
+	0x9,
+	0x4,
+	0xD,
+	0xD,
+	0x3E,
+	0x18,
+	0x14,
+	0,
+	0x3,
+	0x3,
+	0x5,
+	0,
+	0x2,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0x1CC,
+	0,
+	0x164,
+	1,
+	1,
+	1,
+	1,
+	12,
+	12,
+	12,
+	0x12,
+	0x1F,
+	132,
+	5,
+	7,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0 },
+	true
+};
+
+static const struct ni_cac_weights cac_weights_cayman_pro =
+{
+	0x16,
+	0x4,
+	0x10,
+	0x2,
+	0xA,
+	0x16,
+	0x2,
+	0x18,
+	0x10,
+	0x1A,
+	0x16,
+	0x2D,
+	0x12,
+	0xA,
+	0x6,
+	0x6,
+	0x6,
+	0x2,
+	0x4,
+	0xB,
+	0x11,
+	0x11,
+	0x2D,
+	0xC,
+	0xC,
+	0x7,
+	0x10,
+	0x10,
+	0x3F,
+	0x1A,
+	0x16,
+	0,
+	0x7,
+	0x4,
+	0x6,
+	1,
+	0x2,
+	0x1,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0x30,
+	0,
+	0x1CF,
+	0,
+	0x166,
+	1,
+	1,
+	1,
+	1,
+	12,
+	12,
+	12,
+	0x15,
+	0x1F,
+	132,
+	6,
+	6,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0 },
+	true
+};
+
+static const struct ni_cac_weights cac_weights_cayman_le =
+{
+	0x7,
+	0xE,
+	0x1,
+	0xA,
+	0x1,
+	0x3F,
+	0x2,
+	0x18,
+	0x10,
+	0x1A,
+	0x1,
+	0x3F,
+	0x1,
+	0xE,
+	0x6,
+	0x6,
+	0x6,
+	0x2,
+	0x4,
+	0x9,
+	0x1A,
+	0x1A,
+	0x2C,
+	0xA,
+	0x11,
+	0x8,
+	0x19,
+	0x19,
+	0x1,
+	0x1,
+	0x1A,
+	0,
+	0x8,
+	0x5,
+	0x8,
+	0x1,
+	0x3,
+	0x1,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0x38,
+	0x38,
+	0x239,
+	0x3,
+	0x18A,
+	1,
+	1,
+	1,
+	1,
+	12,
+	12,
+	12,
+	0x15,
+	0x22,
+	132,
+	6,
+	6,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0 },
+	true
+};
+
+#define NISLANDS_MGCG_SEQUENCE  300
+
+static const u32 cayman_cgcg_cgls_default[] =
+{
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define CAYMAN_CGCG_CGLS_DEFAULT_LENGTH sizeof(cayman_cgcg_cgls_default) / (3 * sizeof(u32))
+
+static const u32 cayman_cgcg_cgls_disable[] =
+{
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00000644, 0x000f7902, 0x001f4180,
+	0x00000644, 0x000f3802, 0x001f4180
+};
+#define CAYMAN_CGCG_CGLS_DISABLE_LENGTH sizeof(cayman_cgcg_cgls_disable) / (3 * sizeof(u32))
+
+static const u32 cayman_cgcg_cgls_enable[] =
+{
+	0x00000644, 0x000f7882, 0x001f4080,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000020, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000021, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000022, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000023, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000024, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000025, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000026, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000027, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000028, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000029, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002a, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x0000002b, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff
+};
+#define CAYMAN_CGCG_CGLS_ENABLE_LENGTH  sizeof(cayman_cgcg_cgls_enable) / (3 * sizeof(u32))
+
+static const u32 cayman_mgcg_default[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x00003fc4, 0xc0000000, 0xffffffff,
+	0x00005448, 0x00000100, 0xffffffff,
+	0x000055e4, 0x00000100, 0xffffffff,
+	0x0000160c, 0x00000100, 0xffffffff,
+	0x00008984, 0x06000100, 0xffffffff,
+	0x0000c164, 0x00000100, 0xffffffff,
+	0x00008a18, 0x00000100, 0xffffffff,
+	0x0000897c, 0x06000100, 0xffffffff,
+	0x00008b28, 0x00000100, 0xffffffff,
+	0x00009144, 0x00800200, 0xffffffff,
+	0x00009a60, 0x00000100, 0xffffffff,
+	0x00009868, 0x00000100, 0xffffffff,
+	0x00008d58, 0x00000100, 0xffffffff,
+	0x00009510, 0x00000100, 0xffffffff,
+	0x0000949c, 0x00000100, 0xffffffff,
+	0x00009654, 0x00000100, 0xffffffff,
+	0x00009030, 0x00000100, 0xffffffff,
+	0x00009034, 0x00000100, 0xffffffff,
+	0x00009038, 0x00000100, 0xffffffff,
+	0x0000903c, 0x00000100, 0xffffffff,
+	0x00009040, 0x00000100, 0xffffffff,
+	0x0000a200, 0x00000100, 0xffffffff,
+	0x0000a204, 0x00000100, 0xffffffff,
+	0x0000a208, 0x00000100, 0xffffffff,
+	0x0000a20c, 0x00000100, 0xffffffff,
+	0x00009744, 0x00000100, 0xffffffff,
+	0x00003f80, 0x00000100, 0xffffffff,
+	0x0000a210, 0x00000100, 0xffffffff,
+	0x0000a214, 0x00000100, 0xffffffff,
+	0x000004d8, 0x00000100, 0xffffffff,
+	0x00009664, 0x00000100, 0xffffffff,
+	0x00009698, 0x00000100, 0xffffffff,
+	0x000004d4, 0x00000200, 0xffffffff,
+	0x000004d0, 0x00000000, 0xffffffff,
+	0x000030cc, 0x00000104, 0xffffffff,
+	0x0000d0c0, 0x00000100, 0xffffffff,
+	0x0000d8c0, 0x00000100, 0xffffffff,
+	0x0000802c, 0x40000000, 0xffffffff,
+	0x00003fc4, 0x40000000, 0xffffffff,
+	0x0000915c, 0x00010000, 0xffffffff,
+	0x00009160, 0x00030002, 0xffffffff,
+	0x00009164, 0x00050004, 0xffffffff,
+	0x00009168, 0x00070006, 0xffffffff,
+	0x00009178, 0x00070000, 0xffffffff,
+	0x0000917c, 0x00030002, 0xffffffff,
+	0x00009180, 0x00050004, 0xffffffff,
+	0x0000918c, 0x00010006, 0xffffffff,
+	0x00009190, 0x00090008, 0xffffffff,
+	0x00009194, 0x00070000, 0xffffffff,
+	0x00009198, 0x00030002, 0xffffffff,
+	0x0000919c, 0x00050004, 0xffffffff,
+	0x000091a8, 0x00010006, 0xffffffff,
+	0x000091ac, 0x00090008, 0xffffffff,
+	0x000091b0, 0x00070000, 0xffffffff,
+	0x000091b4, 0x00030002, 0xffffffff,
+	0x000091b8, 0x00050004, 0xffffffff,
+	0x000091c4, 0x00010006, 0xffffffff,
+	0x000091c8, 0x00090008, 0xffffffff,
+	0x000091cc, 0x00070000, 0xffffffff,
+	0x000091d0, 0x00030002, 0xffffffff,
+	0x000091d4, 0x00050004, 0xffffffff,
+	0x000091e0, 0x00010006, 0xffffffff,
+	0x000091e4, 0x00090008, 0xffffffff,
+	0x000091e8, 0x00000000, 0xffffffff,
+	0x000091ec, 0x00070000, 0xffffffff,
+	0x000091f0, 0x00030002, 0xffffffff,
+	0x000091f4, 0x00050004, 0xffffffff,
+	0x00009200, 0x00010006, 0xffffffff,
+	0x00009204, 0x00090008, 0xffffffff,
+	0x00009208, 0x00070000, 0xffffffff,
+	0x0000920c, 0x00030002, 0xffffffff,
+	0x00009210, 0x00050004, 0xffffffff,
+	0x0000921c, 0x00010006, 0xffffffff,
+	0x00009220, 0x00090008, 0xffffffff,
+	0x00009224, 0x00070000, 0xffffffff,
+	0x00009228, 0x00030002, 0xffffffff,
+	0x0000922c, 0x00050004, 0xffffffff,
+	0x00009238, 0x00010006, 0xffffffff,
+	0x0000923c, 0x00090008, 0xffffffff,
+	0x00009240, 0x00070000, 0xffffffff,
+	0x00009244, 0x00030002, 0xffffffff,
+	0x00009248, 0x00050004, 0xffffffff,
+	0x00009254, 0x00010006, 0xffffffff,
+	0x00009258, 0x00090008, 0xffffffff,
+	0x0000925c, 0x00070000, 0xffffffff,
+	0x00009260, 0x00030002, 0xffffffff,
+	0x00009264, 0x00050004, 0xffffffff,
+	0x00009270, 0x00010006, 0xffffffff,
+	0x00009274, 0x00090008, 0xffffffff,
+	0x00009278, 0x00070000, 0xffffffff,
+	0x0000927c, 0x00030002, 0xffffffff,
+	0x00009280, 0x00050004, 0xffffffff,
+	0x0000928c, 0x00010006, 0xffffffff,
+	0x00009290, 0x00090008, 0xffffffff,
+	0x000092a8, 0x00070000, 0xffffffff,
+	0x000092ac, 0x00030002, 0xffffffff,
+	0x000092b0, 0x00050004, 0xffffffff,
+	0x000092bc, 0x00010006, 0xffffffff,
+	0x000092c0, 0x00090008, 0xffffffff,
+	0x000092c4, 0x00070000, 0xffffffff,
+	0x000092c8, 0x00030002, 0xffffffff,
+	0x000092cc, 0x00050004, 0xffffffff,
+	0x000092d8, 0x00010006, 0xffffffff,
+	0x000092dc, 0x00090008, 0xffffffff,
+	0x00009294, 0x00000000, 0xffffffff,
+	0x0000802c, 0x40010000, 0xffffffff,
+	0x00003fc4, 0x40010000, 0xffffffff,
+	0x0000915c, 0x00010000, 0xffffffff,
+	0x00009160, 0x00030002, 0xffffffff,
+	0x00009164, 0x00050004, 0xffffffff,
+	0x00009168, 0x00070006, 0xffffffff,
+	0x00009178, 0x00070000, 0xffffffff,
+	0x0000917c, 0x00030002, 0xffffffff,
+	0x00009180, 0x00050004, 0xffffffff,
+	0x0000918c, 0x00010006, 0xffffffff,
+	0x00009190, 0x00090008, 0xffffffff,
+	0x00009194, 0x00070000, 0xffffffff,
+	0x00009198, 0x00030002, 0xffffffff,
+	0x0000919c, 0x00050004, 0xffffffff,
+	0x000091a8, 0x00010006, 0xffffffff,
+	0x000091ac, 0x00090008, 0xffffffff,
+	0x000091b0, 0x00070000, 0xffffffff,
+	0x000091b4, 0x00030002, 0xffffffff,
+	0x000091b8, 0x00050004, 0xffffffff,
+	0x000091c4, 0x00010006, 0xffffffff,
+	0x000091c8, 0x00090008, 0xffffffff,
+	0x000091cc, 0x00070000, 0xffffffff,
+	0x000091d0, 0x00030002, 0xffffffff,
+	0x000091d4, 0x00050004, 0xffffffff,
+	0x000091e0, 0x00010006, 0xffffffff,
+	0x000091e4, 0x00090008, 0xffffffff,
+	0x000091e8, 0x00000000, 0xffffffff,
+	0x000091ec, 0x00070000, 0xffffffff,
+	0x000091f0, 0x00030002, 0xffffffff,
+	0x000091f4, 0x00050004, 0xffffffff,
+	0x00009200, 0x00010006, 0xffffffff,
+	0x00009204, 0x00090008, 0xffffffff,
+	0x00009208, 0x00070000, 0xffffffff,
+	0x0000920c, 0x00030002, 0xffffffff,
+	0x00009210, 0x00050004, 0xffffffff,
+	0x0000921c, 0x00010006, 0xffffffff,
+	0x00009220, 0x00090008, 0xffffffff,
+	0x00009224, 0x00070000, 0xffffffff,
+	0x00009228, 0x00030002, 0xffffffff,
+	0x0000922c, 0x00050004, 0xffffffff,
+	0x00009238, 0x00010006, 0xffffffff,
+	0x0000923c, 0x00090008, 0xffffffff,
+	0x00009240, 0x00070000, 0xffffffff,
+	0x00009244, 0x00030002, 0xffffffff,
+	0x00009248, 0x00050004, 0xffffffff,
+	0x00009254, 0x00010006, 0xffffffff,
+	0x00009258, 0x00090008, 0xffffffff,
+	0x0000925c, 0x00070000, 0xffffffff,
+	0x00009260, 0x00030002, 0xffffffff,
+	0x00009264, 0x00050004, 0xffffffff,
+	0x00009270, 0x00010006, 0xffffffff,
+	0x00009274, 0x00090008, 0xffffffff,
+	0x00009278, 0x00070000, 0xffffffff,
+	0x0000927c, 0x00030002, 0xffffffff,
+	0x00009280, 0x00050004, 0xffffffff,
+	0x0000928c, 0x00010006, 0xffffffff,
+	0x00009290, 0x00090008, 0xffffffff,
+	0x000092a8, 0x00070000, 0xffffffff,
+	0x000092ac, 0x00030002, 0xffffffff,
+	0x000092b0, 0x00050004, 0xffffffff,
+	0x000092bc, 0x00010006, 0xffffffff,
+	0x000092c0, 0x00090008, 0xffffffff,
+	0x000092c4, 0x00070000, 0xffffffff,
+	0x000092c8, 0x00030002, 0xffffffff,
+	0x000092cc, 0x00050004, 0xffffffff,
+	0x000092d8, 0x00010006, 0xffffffff,
+	0x000092dc, 0x00090008, 0xffffffff,
+	0x00009294, 0x00000000, 0xffffffff,
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x00003fc4, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000010, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000011, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000012, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000013, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000014, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000015, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000016, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000017, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000018, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000019, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001a, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x0000001b, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff
+};
+#define CAYMAN_MGCG_DEFAULT_LENGTH sizeof(cayman_mgcg_default) / (3 * sizeof(u32))
+
+static const u32 cayman_mgcg_disable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xffffffff,
+	0x00009150, 0x00600000, 0xffffffff
+};
+#define CAYMAN_MGCG_DISABLE_LENGTH   sizeof(cayman_mgcg_disable) / (3 * sizeof(u32))
+
+static const u32 cayman_mgcg_enable[] =
+{
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x000008f8, 0x00000002, 0xffffffff,
+	0x000008fc, 0x00600000, 0xffffffff,
+	0x000008f8, 0x00000003, 0xffffffff,
+	0x000008fc, 0x00000000, 0xffffffff,
+	0x00009150, 0x96944200, 0xffffffff
+};
+
+#define CAYMAN_MGCG_ENABLE_LENGTH   sizeof(cayman_mgcg_enable) / (3 * sizeof(u32))
+
+#define NISLANDS_SYSLS_SEQUENCE  100
+
+static const u32 cayman_sysls_default[] =
+{
+	/* Register,   Value,     Mask bits */
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x0000d8bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x00002f50, 0x00000404, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00008dfc, 0x00000000, 0xffffffff
+};
+#define CAYMAN_SYSLS_DEFAULT_LENGTH sizeof(cayman_sysls_default) / (3 * sizeof(u32))
+
+static const u32 cayman_sysls_disable[] =
+{
+	/* Register,   Value,     Mask bits */
+	0x0000d0c0, 0x00000000, 0xffffffff,
+	0x0000d8c0, 0x00000000, 0xffffffff,
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x0000d8bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x00041401, 0xffffffff,
+	0x0000264c, 0x00040400, 0xffffffff,
+	0x00002648, 0x00040400, 0xffffffff,
+	0x00002650, 0x00040400, 0xffffffff,
+	0x000020b8, 0x00040400, 0xffffffff,
+	0x000020bc, 0x00040400, 0xffffffff,
+	0x000020c0, 0x00040c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680000, 0xffffffff,
+	0x00002f50, 0x00000404, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x000064ec, 0x00007ffd, 0xffffffff,
+	0x00000c7c, 0x0000ff00, 0xffffffff,
+	0x00008dfc, 0x0000007f, 0xffffffff
+};
+#define CAYMAN_SYSLS_DISABLE_LENGTH sizeof(cayman_sysls_disable) / (3 * sizeof(u32))
+
+static const u32 cayman_sysls_enable[] =
+{
+	/* Register,   Value,     Mask bits */
+	0x000055e8, 0x00000001, 0xffffffff,
+	0x0000d0bc, 0x00000100, 0xffffffff,
+	0x0000d8bc, 0x00000100, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x00002f50, 0x00000903, 0xffffffff,
+	0x000004c8, 0x00000000, 0xffffffff,
+	0x000064ec, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00008dfc, 0x00000000, 0xffffffff
+};
+#define CAYMAN_SYSLS_ENABLE_LENGTH sizeof(cayman_sysls_enable) / (3 * sizeof(u32))
+
+struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
+struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
+struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
+{
+	struct ni_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+struct ni_ps *ni_get_ps(struct radeon_ps *rps)
+{
+	struct ni_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static void ni_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+						     u16 v, s32 t,
+						     u32 ileakage,
+						     u32 *leakage)
+{
+	s64 kt, kv, leakage_w, i_leakage, vddc, temperature;
+
+	i_leakage = div64_s64(drm_int2fixp(ileakage), 1000);
+	vddc = div64_s64(drm_int2fixp(v), 1000);
+	temperature = div64_s64(drm_int2fixp(t), 1000);
+
+	kt = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->at), 1000),
+			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bt), 1000), temperature)));
+	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 1000),
+			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 1000), vddc)));
+
+	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+	*leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
+					     const struct ni_leakage_coeffients *coeff,
+					     u16 v,
+					     s32 t,
+					     u32 i_leakage,
+					     u32 *leakage)
+{
+	ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+	/* we never hit the non-gddr5 limit so disable it */
+	u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
+
+static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
+					struct radeon_ps *rps)
+{
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct radeon_clock_and_voltage_limits *max_limits;
+	bool disable_mclk_switching;
+	u32 mclk;
+	u16 vddci;
+	int i;
+
+	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+	    ni_dpm_vblank_too_short(rdev))
+		disable_mclk_switching = true;
+	else
+		disable_mclk_switching = false;
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (rdev->pm.dpm.ac_power == false) {
+		for (i = 0; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk > max_limits->mclk)
+				ps->performance_levels[i].mclk = max_limits->mclk;
+			if (ps->performance_levels[i].sclk > max_limits->sclk)
+				ps->performance_levels[i].sclk = max_limits->sclk;
+			if (ps->performance_levels[i].vddc > max_limits->vddc)
+				ps->performance_levels[i].vddc = max_limits->vddc;
+			if (ps->performance_levels[i].vddci > max_limits->vddci)
+				ps->performance_levels[i].vddci = max_limits->vddci;
+		}
+	}
+
+	/* XXX validate the min clocks required for display */
+
+	/* adjust low state */
+	if (disable_mclk_switching) {
+		ps->performance_levels[0].mclk =
+			ps->performance_levels[ps->performance_level_count - 1].mclk;
+		ps->performance_levels[0].vddci =
+			ps->performance_levels[ps->performance_level_count - 1].vddci;
+	}
+
+	btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
+				  &ps->performance_levels[0].sclk,
+				  &ps->performance_levels[0].mclk);
+
+	for (i = 1; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+			ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+		if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+			ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+	}
+
+	/* adjust remaining states */
+	if (disable_mclk_switching) {
+		mclk = ps->performance_levels[0].mclk;
+		vddci = ps->performance_levels[0].vddci;
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (mclk < ps->performance_levels[i].mclk)
+				mclk = ps->performance_levels[i].mclk;
+			if (vddci < ps->performance_levels[i].vddci)
+				vddci = ps->performance_levels[i].vddci;
+		}
+		for (i = 0; i < ps->performance_level_count; i++) {
+			ps->performance_levels[i].mclk = mclk;
+			ps->performance_levels[i].vddci = vddci;
+		}
+	} else {
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+				ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+			if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+				ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+		}
+	}
+
+	for (i = 1; i < ps->performance_level_count; i++)
+		btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
+					  &ps->performance_levels[i].sclk,
+					  &ps->performance_levels[i].mclk);
+
+	for (i = 0; i < ps->performance_level_count; i++)
+		btc_adjust_clock_combinations(rdev, max_limits,
+					      &ps->performance_levels[i]);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+						   ps->performance_levels[i].sclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddci, &ps->performance_levels[i].vddci);
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+						   rdev->clock.current_dispclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+	}
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		btc_apply_voltage_delta_rules(rdev,
+					      max_limits->vddc, max_limits->vddci,
+					      &ps->performance_levels[i].vddc,
+					      &ps->performance_levels[i].vddci);
+	}
+
+	ps->dc_compatible = true;
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+			ps->dc_compatible = false;
+
+		if (ps->performance_levels[i].vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
+			ps->performance_levels[i].flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
+	}
+}
+
+static void ni_cg_clockgating_default(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *ps = NULL;
+
+	ps = (const u32 *)&cayman_cgcg_cgls_default;
+	count = CAYMAN_CGCG_CGLS_DEFAULT_LENGTH;
+
+	btc_program_mgcg_hw_sequence(rdev, ps, count);
+}
+
+static void ni_gfx_clockgating_enable(struct radeon_device *rdev,
+				      bool enable)
+{
+	u32 count;
+	const u32 *ps = NULL;
+
+	if (enable) {
+		ps = (const u32 *)&cayman_cgcg_cgls_enable;
+		count = CAYMAN_CGCG_CGLS_ENABLE_LENGTH;
+	} else {
+		ps = (const u32 *)&cayman_cgcg_cgls_disable;
+		count = CAYMAN_CGCG_CGLS_DISABLE_LENGTH;
+	}
+
+	btc_program_mgcg_hw_sequence(rdev, ps, count);
+}
+
+static void ni_mg_clockgating_default(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *ps = NULL;
+
+	ps = (const u32 *)&cayman_mgcg_default;
+	count = CAYMAN_MGCG_DEFAULT_LENGTH;
+
+	btc_program_mgcg_hw_sequence(rdev, ps, count);
+}
+
+static void ni_mg_clockgating_enable(struct radeon_device *rdev,
+				     bool enable)
+{
+	u32 count;
+	const u32 *ps = NULL;
+
+	if (enable) {
+		ps = (const u32 *)&cayman_mgcg_enable;
+		count = CAYMAN_MGCG_ENABLE_LENGTH;
+	} else {
+		ps = (const u32 *)&cayman_mgcg_disable;
+		count = CAYMAN_MGCG_DISABLE_LENGTH;
+	}
+
+	btc_program_mgcg_hw_sequence(rdev, ps, count);
+}
+
+static void ni_ls_clockgating_default(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *ps = NULL;
+
+	ps = (const u32 *)&cayman_sysls_default;
+	count = CAYMAN_SYSLS_DEFAULT_LENGTH;
+
+	btc_program_mgcg_hw_sequence(rdev, ps, count);
+}
+
+static void ni_ls_clockgating_enable(struct radeon_device *rdev,
+				     bool enable)
+{
+	u32 count;
+	const u32 *ps = NULL;
+
+	if (enable) {
+		ps = (const u32 *)&cayman_sysls_enable;
+		count = CAYMAN_SYSLS_ENABLE_LENGTH;
+	} else {
+		ps = (const u32 *)&cayman_sysls_disable;
+		count = CAYMAN_SYSLS_DISABLE_LENGTH;
+	}
+
+	btc_program_mgcg_hw_sequence(rdev, ps, count);
+
+}
+
+static int ni_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
+							     struct radeon_clock_voltage_dependency_table *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++) {
+			if (0xff01 == table->entries[i].v) {
+				if (pi->max_vddc == 0)
+					return -EINVAL;
+				table->entries[i].v = pi->max_vddc;
+			}
+		}
+	}
+	return 0;
+}
+
+static int ni_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
+{
+	int ret = 0;
+
+	ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
+								&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+
+	ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
+								&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+	return ret;
+}
+
+static void ni_stop_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+#if 0
+static int ni_notify_hw_of_power_source(struct radeon_device *rdev,
+					bool ac_power)
+{
+	if (ac_power)
+		return (rv770_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+			0 : -EINVAL;
+
+	return 0;
+}
+#endif
+
+static PPSMC_Result ni_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+						      PPSMC_Msg msg, u32 parameter)
+{
+	WREG32(SMC_SCRATCH0, parameter);
+	return rv770_send_msg_to_smc(rdev, msg);
+}
+
+static int ni_restrict_performance_levels_before_switch(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+int ni_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level)
+{
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+		if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+static void ni_stop_smc(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK;
+		if (tmp != 1)
+			break;
+		udelay(1);
+	}
+
+	udelay(100);
+
+	r7xx_stop_smc(rdev);
+}
+
+static int ni_process_firmware_header(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					NISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+					&tmp, pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	pi->state_table_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					NISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+					&tmp, pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	pi->soft_regs_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+					&tmp, pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	eg_pi->mc_reg_table_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					NISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+					&tmp, pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	ni_pi->fan_table_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+					&tmp, pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	ni_pi->arb_table_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					NISLANDS_SMC_FIRMWARE_HEADER_cacTable,
+					&tmp, pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	ni_pi->cac_table_start = (u16)tmp;
+
+	ret = rv770_read_smc_sram_dword(rdev,
+					NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					NISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+					&tmp, pi->sram_end);
+
+	if (ret)
+		return ret;
+
+	ni_pi->spll_table_start = (u16)tmp;
+
+
+	return ret;
+}
+
+static void ni_read_clock_registers(struct radeon_device *rdev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+
+	ni_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+	ni_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+	ni_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+	ni_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+	ni_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+	ni_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+	ni_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+	ni_pi->clock_registers.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2);
+	ni_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+	ni_pi->clock_registers.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2);
+	ni_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+	ni_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+	ni_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+	ni_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+#if 0
+static int ni_enter_ulp_state(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (pi->gfx_clock_gating) {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+		RREG32(GB_ADDR_CONFIG);
+	}
+
+	WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
+		 ~HOST_SMC_MSG_MASK);
+
+	udelay(25000);
+
+	return 0;
+}
+#endif
+
+static void ni_program_response_times(struct radeon_device *rdev)
+{
+	u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+	u32 vddc_dly, bb_dly, acpi_dly, vbi_dly, mclk_switch_limit;
+	u32 reference_clock;
+
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+	voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
+	backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
+
+	if (voltage_response_time == 0)
+		voltage_response_time = 1000;
+
+	if (backbias_response_time == 0)
+		backbias_response_time = 1000;
+
+	acpi_delay_time = 15000;
+	vbi_time_out = 100000;
+
+	reference_clock = radeon_get_xclk(rdev);
+
+	vddc_dly = (voltage_response_time  * reference_clock) / 1600;
+	bb_dly   = (backbias_response_time * reference_clock) / 1600;
+	acpi_dly = (acpi_delay_time * reference_clock) / 1600;
+	vbi_dly  = (vbi_time_out * reference_clock) / 1600;
+
+	mclk_switch_limit = (460 * reference_clock) / 100;
+
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_switch_lim, mclk_switch_limit);
+}
+
+static void ni_populate_smc_voltage_table(struct radeon_device *rdev,
+					  struct atom_voltage_table *voltage_table,
+					  NISLANDS_SMC_STATETABLE *table)
+{
+	unsigned int i;
+
+	for (i = 0; i < voltage_table->count; i++) {
+		table->highSMIO[i] = 0;
+		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+	}
+}
+
+static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
+					   NISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	unsigned char i;
+
+	if (eg_pi->vddc_voltage_table.count) {
+		ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
+		table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0;
+		table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] =
+			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+		for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+			if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+				table->maxVDDCIndexInPPTable = i;
+				break;
+			}
+		}
+	}
+
+	if (eg_pi->vddci_voltage_table.count) {
+		ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
+
+		table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
+		table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+	}
+}
+
+static int ni_populate_voltage_value(struct radeon_device *rdev,
+				     struct atom_voltage_table *table,
+				     u16 value,
+				     NISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++) {
+		if (value <= table->entries[i].value) {
+			voltage->index = (u8)i;
+			voltage->value = cpu_to_be16(table->entries[i].value);
+			break;
+		}
+	}
+
+	if (i >= table->count)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void ni_populate_mvdd_value(struct radeon_device *rdev,
+				   u32 mclk,
+				   NISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (!pi->mvdd_control) {
+		voltage->index = eg_pi->mvdd_high_index;
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+		return;
+	}
+
+	if (mclk <= pi->mvdd_split_frequency) {
+		voltage->index = eg_pi->mvdd_low_index;
+		voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
+	} else {
+		voltage->index = eg_pi->mvdd_high_index;
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+	}
+}
+
+static int ni_get_std_voltage_value(struct radeon_device *rdev,
+				    NISLANDS_SMC_VOLTAGE_VALUE *voltage,
+				    u16 *std_voltage)
+{
+	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries &&
+	    ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count))
+		*std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+	else
+		*std_voltage = be16_to_cpu(voltage->value);
+
+	return 0;
+}
+
+static void ni_populate_std_voltage_value(struct radeon_device *rdev,
+					  u16 value, u8 index,
+					  NISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	voltage->index = index;
+	voltage->value = cpu_to_be16(value);
+}
+
+static u32 ni_get_smc_power_scaling_factor(struct radeon_device *rdev)
+{
+	u32 xclk_period;
+	u32 xclk = radeon_get_xclk(rdev);
+	u32 tmp = RREG32(CG_CAC_CTRL) & TID_CNT_MASK;
+
+	xclk_period = (1000000000UL / xclk);
+	xclk_period /= 10000UL;
+
+	return tmp * xclk_period;
+}
+
+static u32 ni_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+	return (power_in_watts * scaling_factor) << 2;
+}
+
+static u32 ni_calculate_power_boost_limit(struct radeon_device *rdev,
+					  struct radeon_ps *radeon_state,
+					  u32 near_tdp_limit)
+{
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 power_boost_limit = 0;
+	int ret;
+
+	if (ni_pi->enable_power_containment &&
+	    ni_pi->use_power_boost_limit) {
+		NISLANDS_SMC_VOLTAGE_VALUE vddc;
+		u16 std_vddc_med;
+		u16 std_vddc_high;
+		u64 tmp, n, d;
+
+		if (state->performance_level_count < 3)
+			return 0;
+
+		ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[state->performance_level_count - 2].vddc,
+						&vddc);
+		if (ret)
+			return 0;
+
+		ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_med);
+		if (ret)
+			return 0;
+
+		ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[state->performance_level_count - 1].vddc,
+						&vddc);
+		if (ret)
+			return 0;
+
+		ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_high);
+		if (ret)
+			return 0;
+
+		n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90);
+		d = ((u64)std_vddc_high * (u64)std_vddc_high * 100);
+		tmp = div64_u64(n, d);
+
+		if (tmp >> 32)
+			return 0;
+		power_boost_limit = (u32)tmp;
+	}
+
+	return power_boost_limit;
+}
+
+static int ni_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
+					    bool adjust_polarity,
+					    u32 tdp_adjustment,
+					    u32 *tdp_limit,
+					    u32 *near_tdp_limit)
+{
+	if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
+		return -EINVAL;
+
+	if (adjust_polarity) {
+		*tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
+		*near_tdp_limit = rdev->pm.dpm.near_tdp_limit + (*tdp_limit - rdev->pm.dpm.tdp_limit);
+	} else {
+		*tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
+		*near_tdp_limit = rdev->pm.dpm.near_tdp_limit - (rdev->pm.dpm.tdp_limit - *tdp_limit);
+	}
+
+	return 0;
+}
+
+static int ni_populate_smc_tdp_limits(struct radeon_device *rdev,
+				      struct radeon_ps *radeon_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+
+	if (ni_pi->enable_power_containment) {
+		NISLANDS_SMC_STATETABLE *smc_table = &ni_pi->smc_statetable;
+		u32 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
+		u32 tdp_limit;
+		u32 near_tdp_limit;
+		u32 power_boost_limit;
+		int ret;
+
+		if (scaling_factor == 0)
+			return -EINVAL;
+
+		memset(smc_table, 0, sizeof(NISLANDS_SMC_STATETABLE));
+
+		ret = ni_calculate_adjusted_tdp_limits(rdev,
+						       false, /* ??? */
+						       rdev->pm.dpm.tdp_adjustment,
+						       &tdp_limit,
+						       &near_tdp_limit);
+		if (ret)
+			return ret;
+
+		power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state,
+								   near_tdp_limit);
+
+		smc_table->dpm2Params.TDPLimit =
+			cpu_to_be32(ni_scale_power_for_smc(tdp_limit, scaling_factor));
+		smc_table->dpm2Params.NearTDPLimit =
+			cpu_to_be32(ni_scale_power_for_smc(near_tdp_limit, scaling_factor));
+		smc_table->dpm2Params.SafePowerLimit =
+			cpu_to_be32(ni_scale_power_for_smc((near_tdp_limit * NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100,
+							   scaling_factor));
+		smc_table->dpm2Params.PowerBoostLimit =
+			cpu_to_be32(ni_scale_power_for_smc(power_boost_limit, scaling_factor));
+
+		ret = rv770_copy_bytes_to_smc(rdev,
+					      (u16)(pi->state_table_start + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
+						    offsetof(PP_NIslands_DPM2Parameters, TDPLimit)),
+					      (u8 *)(&smc_table->dpm2Params.TDPLimit),
+					      sizeof(u32) * 4, pi->sram_end);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
+				u32 arb_freq_src, u32 arb_freq_dest)
+{
+	u32 mc_arb_dram_timing;
+	u32 mc_arb_dram_timing2;
+	u32 burst_time;
+	u32 mc_cg_config;
+
+	switch (arb_freq_src) {
+	case MC_CG_ARB_FREQ_F0:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F2:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F3:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (arb_freq_dest) {
+	case MC_CG_ARB_FREQ_F0:
+		WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F2:
+		WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F3:
+		WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
+	WREG32(MC_CG_CONFIG, mc_cg_config);
+	WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
+
+	return 0;
+}
+
+static int ni_init_arb_table_index(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
+					&tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= ((u32)MC_CG_ARB_FREQ_F1) << 24;
+
+	return rv770_write_smc_sram_dword(rdev, ni_pi->arb_table_start,
+					  tmp, pi->sram_end);
+}
+
+static int ni_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
+{
+	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int ni_force_switch_to_arb_f0(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
+					&tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp = (tmp >> 24) & 0xff;
+
+	if (tmp == MC_CG_ARB_FREQ_F0)
+		return 0;
+
+	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
+						struct rv7xx_pl *pl,
+						SMC_NIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+	u32 dram_timing;
+	u32 dram_timing2;
+
+	arb_regs->mc_arb_rfsh_rate =
+		(u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
+
+
+	radeon_atom_set_engine_dram_timings(rdev, pl->sclk, pl->mclk);
+
+	dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+
+	arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
+	arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+
+	return 0;
+}
+
+static int ni_do_program_memory_timing_parameters(struct radeon_device *rdev,
+						  struct radeon_ps *radeon_state,
+						  unsigned int first_arb_set)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	SMC_NIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+	int i, ret = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		ret = ni_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
+		if (ret)
+			break;
+
+		ret = rv770_copy_bytes_to_smc(rdev,
+					      (u16)(ni_pi->arb_table_start +
+						    offsetof(SMC_NIslands_MCArbDramTimingRegisters, data) +
+						    sizeof(SMC_NIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i)),
+					      (u8 *)&arb_regs,
+					      (u16)sizeof(SMC_NIslands_MCArbDramTimingRegisterSet),
+					      pi->sram_end);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+static int ni_program_memory_timing_parameters(struct radeon_device *rdev,
+					       struct radeon_ps *radeon_new_state)
+{
+	return ni_do_program_memory_timing_parameters(rdev, radeon_new_state,
+						      NISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static void ni_populate_initial_mvdd_value(struct radeon_device *rdev,
+					   struct NISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	voltage->index = eg_pi->mvdd_high_index;
+	voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+}
+
+static int ni_populate_smc_initial_state(struct radeon_device *rdev,
+					 struct radeon_ps *radeon_initial_state,
+					 NISLANDS_SMC_STATETABLE *table)
+{
+	struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 reg;
+	int ret;
+
+	table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
+		cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
+	table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
+		cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
+	table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
+	table->initialState.levels[0].mclk.vDLL_CNTL =
+		cpu_to_be32(ni_pi->clock_registers.dll_cntl);
+	table->initialState.levels[0].mclk.vMPLL_SS =
+		cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
+	table->initialState.levels[0].mclk.vMPLL_SS2 =
+		cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
+	table->initialState.levels[0].mclk.mclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+		cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+		cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+		cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
+	table->initialState.levels[0].sclk.sclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].sclk);
+	table->initialState.levels[0].arbRefreshState =
+		NISLANDS_INITIAL_STATE_ARB_INDEX;
+
+	table->initialState.levels[0].ACIndex = 0;
+
+	ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+					initial_state->performance_levels[0].vddc,
+					&table->initialState.levels[0].vddc);
+	if (!ret) {
+		u16 std_vddc;
+
+		ret = ni_get_std_voltage_value(rdev,
+					       &table->initialState.levels[0].vddc,
+					       &std_vddc);
+		if (!ret)
+			ni_populate_std_voltage_value(rdev, std_vddc,
+						      table->initialState.levels[0].vddc.index,
+						      &table->initialState.levels[0].std_vddc);
+	}
+
+	if (eg_pi->vddci_control)
+		ni_populate_voltage_value(rdev,
+					  &eg_pi->vddci_voltage_table,
+					  initial_state->performance_levels[0].vddci,
+					  &table->initialState.levels[0].vddci);
+
+	ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+
+	reg = CG_R(0xffff) | CG_L(0);
+	table->initialState.levels[0].aT = cpu_to_be32(reg);
+
+	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+
+	if (pi->boot_in_gen2)
+		table->initialState.levels[0].gen2PCIE = 1;
+	else
+		table->initialState.levels[0].gen2PCIE = 0;
+
+	if (pi->mem_gddr5) {
+		table->initialState.levels[0].strobeMode =
+			cypress_get_strobe_mode_settings(rdev,
+							 initial_state->performance_levels[0].mclk);
+
+		if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+			table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
+		else
+			table->initialState.levels[0].mcFlags =  0;
+	}
+
+	table->initialState.levelCount = 1;
+
+	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	table->initialState.levels[0].dpm2.MaxPS = 0;
+	table->initialState.levels[0].dpm2.NearTDPDec = 0;
+	table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+	table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
+				      NISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 mpll_ad_func_cntl   = ni_pi->clock_registers.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl   = ni_pi->clock_registers.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
+	u32 spll_func_cntl      = ni_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2    = ni_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3    = ni_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4    = ni_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 mclk_pwrmgt_cntl    = ni_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32 dll_cntl            = ni_pi->clock_registers.dll_cntl;
+	u32 reg;
+	int ret;
+
+	table->ACPIState = table->initialState;
+
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		ret = ni_populate_voltage_value(rdev,
+						&eg_pi->vddc_voltage_table,
+						pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = ni_get_std_voltage_value(rdev,
+						       &table->ACPIState.levels[0].vddc, &std_vddc);
+			if (!ret)
+				ni_populate_std_voltage_value(rdev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+
+		if (pi->pcie_gen2) {
+			if (pi->acpi_pcie_gen2)
+				table->ACPIState.levels[0].gen2PCIE = 1;
+			else
+				table->ACPIState.levels[0].gen2PCIE = 0;
+		} else {
+			table->ACPIState.levels[0].gen2PCIE = 0;
+		}
+	} else {
+		ret = ni_populate_voltage_value(rdev,
+						&eg_pi->vddc_voltage_table,
+						pi->min_vddc_in_table,
+						&table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = ni_get_std_voltage_value(rdev,
+						       &table->ACPIState.levels[0].vddc,
+						       &std_vddc);
+			if (!ret)
+				ni_populate_std_voltage_value(rdev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+		table->ACPIState.levels[0].gen2PCIE = 0;
+	}
+
+	if (eg_pi->acpi_vddci) {
+		if (eg_pi->vddci_control)
+			ni_populate_voltage_value(rdev,
+						  &eg_pi->vddci_voltage_table,
+						  eg_pi->acpi_vddci,
+						  &table->ACPIState.levels[0].vddci);
+	}
+
+
+	mpll_ad_func_cntl &= ~PDNB;
+
+	mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
+
+	if (pi->mem_gddr5)
+		mpll_dq_func_cntl &= ~PDNB;
+	mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
+
+
+	mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
+			     MRDCKA1_RESET |
+			     MRDCKB0_RESET |
+			     MRDCKB1_RESET |
+			     MRDCKC0_RESET |
+			     MRDCKC1_RESET |
+			     MRDCKD0_RESET |
+			     MRDCKD1_RESET);
+
+	mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
+			      MRDCKA1_PDNB |
+			      MRDCKB0_PDNB |
+			      MRDCKB1_PDNB |
+			      MRDCKC0_PDNB |
+			      MRDCKC1_PDNB |
+			      MRDCKD0_PDNB |
+			      MRDCKD1_PDNB);
+
+	dll_cntl |= (MRDCKA0_BYPASS |
+		     MRDCKA1_BYPASS |
+		     MRDCKB0_BYPASS |
+		     MRDCKB1_BYPASS |
+		     MRDCKC0_BYPASS |
+		     MRDCKC1_BYPASS |
+		     MRDCKD0_BYPASS |
+		     MRDCKD1_BYPASS);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+	table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+	table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
+
+	table->ACPIState.levels[0].mclk.mclk_value = 0;
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
+
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+
+	if (eg_pi->dynamic_ac_timing)
+		table->ACPIState.levels[0].ACIndex = 1;
+
+	table->ACPIState.levels[0].dpm2.MaxPS = 0;
+	table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+	table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+	table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int ni_init_smc_table(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	int ret;
+	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
+	NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable;
+
+	memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE));
+
+	ni_populate_smc_voltage_tables(rdev, table);
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_NI:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+		break;
+	case THERMAL_TYPE_NONE:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+		break;
+	default:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+		break;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (pi->mem_gddr5)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table);
+	if (ret)
+		return ret;
+
+	ret = ni_populate_smc_acpi_state(rdev, table);
+	if (ret)
+		return ret;
+
+	table->driverState = table->initialState;
+
+	table->ULVState = table->initialState;
+
+	ret = ni_do_program_memory_timing_parameters(rdev, radeon_boot_state,
+						     NISLANDS_INITIAL_STATE_ARB_INDEX);
+	if (ret)
+		return ret;
+
+	return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table,
+				       sizeof(NISLANDS_SMC_STATETABLE), pi->sram_end);
+}
+
+static int ni_calculate_sclk_params(struct radeon_device *rdev,
+				    u32 engine_clock,
+				    NISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum = ni_pi->clock_registers.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = ni_pi->clock_registers.cg_spll_spread_spectrum_2;
+	u64 tmp;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+
+
+	tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16834;
+	do_div(tmp, reference_clock);
+	fbdiv = (u32) tmp;
+
+	spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+	spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->sclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = engine_clock * dividers.post_div;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLK_S_MASK;
+			cg_spll_spread_spectrum |= CLK_S(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+		}
+	}
+
+	sclk->sclk_value = engine_clock;
+	sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+	sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+	sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+	sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+	return 0;
+}
+
+static int ni_populate_sclk_value(struct radeon_device *rdev,
+				  u32 engine_clock,
+				  NISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	NISLANDS_SMC_SCLK_VALUE sclk_tmp;
+	int ret;
+
+	ret = ni_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
+	if (!ret) {
+		sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+		sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+		sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+		sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+		sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+	}
+
+	return ret;
+}
+
+static int ni_init_smc_spll_table(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
+	NISLANDS_SMC_SCLK_VALUE sclk_params;
+	u32 fb_div;
+	u32 p_div;
+	u32 clk_s;
+	u32 clk_v;
+	u32 sclk = 0;
+	int i, ret;
+	u32 tmp;
+
+	if (ni_pi->spll_table_start == 0)
+		return -EINVAL;
+
+	spll_table = kzalloc(sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+	if (spll_table == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < 256; i++) {
+		ret = ni_calculate_sclk_params(rdev, sclk, &sclk_params);
+		if (ret)
+			break;
+
+		p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+		fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+		clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+		clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+		fb_div &= ~0x00001FFF;
+		fb_div >>= 1;
+		clk_v >>= 6;
+
+		if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+			ret = -EINVAL;
+
+		if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+			ret = -EINVAL;
+
+		if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+			ret = -EINVAL;
+
+		if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+			ret = -EINVAL;
+
+		if (ret)
+			break;
+
+		tmp = ((fb_div << SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+			((p_div << SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+		spll_table->freq[i] = cpu_to_be32(tmp);
+
+		tmp = ((clk_v << SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+			((clk_s << SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+		spll_table->ss[i] = cpu_to_be32(tmp);
+
+		sclk += 512;
+	}
+
+	if (!ret)
+		ret = rv770_copy_bytes_to_smc(rdev, ni_pi->spll_table_start, (u8 *)spll_table,
+					      sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), pi->sram_end);
+
+	kfree(spll_table);
+
+	return ret;
+}
+
+static int ni_populate_mclk_value(struct radeon_device *rdev,
+				  u32 engine_clock,
+				  u32 memory_clock,
+				  NISLANDS_SMC_MCLK_VALUE *mclk,
+				  bool strobe_mode,
+				  bool dll_state_on)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
+	u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
+	u32 mpll_ss1 = ni_pi->clock_registers.mpll_ss1;
+	u32 mpll_ss2 = ni_pi->clock_registers.mpll_ss2;
+	struct atom_clock_dividers dividers;
+	u32 ibias;
+	u32 dll_speed;
+	int ret;
+	u32 mc_seq_misc7;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
+					     memory_clock, strobe_mode, &dividers);
+	if (ret)
+		return ret;
+
+	if (!strobe_mode) {
+		mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
+
+		if (mc_seq_misc7 & 0x8000000)
+			dividers.post_div = 1;
+	}
+
+	ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
+
+	mpll_ad_func_cntl &= ~(CLKR_MASK |
+			       YCLK_POST_DIV_MASK |
+			       CLKF_MASK |
+			       CLKFRAC_MASK |
+			       IBIAS_MASK);
+	mpll_ad_func_cntl |= CLKR(dividers.ref_div);
+	mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
+	mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
+	mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
+	mpll_ad_func_cntl |= IBIAS(ibias);
+
+	if (dividers.vco_mode)
+		mpll_ad_func_cntl_2 |= VCO_MODE;
+	else
+		mpll_ad_func_cntl_2 &= ~VCO_MODE;
+
+	if (pi->mem_gddr5) {
+		mpll_dq_func_cntl &= ~(CLKR_MASK |
+				       YCLK_POST_DIV_MASK |
+				       CLKF_MASK |
+				       CLKFRAC_MASK |
+				       IBIAS_MASK);
+		mpll_dq_func_cntl |= CLKR(dividers.ref_div);
+		mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
+		mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
+		mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
+		mpll_dq_func_cntl |= IBIAS(ibias);
+
+		if (strobe_mode)
+			mpll_dq_func_cntl &= ~PDNB;
+		else
+			mpll_dq_func_cntl |= PDNB;
+
+		if (dividers.vco_mode)
+			mpll_dq_func_cntl_2 |= VCO_MODE;
+		else
+			mpll_dq_func_cntl_2 &= ~VCO_MODE;
+	}
+
+	if (pi->mclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = memory_clock * dividers.post_div;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+			u32 reference_clock = rdev->clock.mpll.reference_freq;
+			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+			u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+			u32 clk_v = ss.percentage *
+				(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
+
+			mpll_ss1 &= ~CLKV_MASK;
+			mpll_ss1 |= CLKV(clk_v);
+
+			mpll_ss2 &= ~CLKS_MASK;
+			mpll_ss2 |= CLKS(clk_s);
+		}
+	}
+
+	dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
+					memory_clock);
+
+	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
+	if (dll_state_on)
+		mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
+				     MRDCKA1_PDNB |
+				     MRDCKB0_PDNB |
+				     MRDCKB1_PDNB |
+				     MRDCKC0_PDNB |
+				     MRDCKC1_PDNB |
+				     MRDCKD0_PDNB |
+				     MRDCKD1_PDNB);
+	else
+		mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
+				      MRDCKA1_PDNB |
+				      MRDCKB0_PDNB |
+				      MRDCKB1_PDNB |
+				      MRDCKC0_PDNB |
+				      MRDCKC1_PDNB |
+				      MRDCKD0_PDNB |
+				      MRDCKD1_PDNB);
+
+
+	mclk->mclk_value = cpu_to_be32(memory_clock);
+	mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	mclk->vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+	mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	mclk->vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+	mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+	mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+	mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+	return 0;
+}
+
+static void ni_populate_smc_sp(struct radeon_device *rdev,
+			       struct radeon_ps *radeon_state,
+			       NISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct ni_ps *ps = ni_get_ps(radeon_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int i;
+
+	for (i = 0; i < ps->performance_level_count - 1; i++)
+		smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+	smc_state->levels[ps->performance_level_count - 1].bSP =
+		cpu_to_be32(pi->psp);
+}
+
+static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
+					 struct rv7xx_pl *pl,
+					 NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	int ret;
+	bool dll_state_on;
+	u16 std_vddc;
+	u32 tmp = RREG32(DC_STUTTER_CNTL);
+
+	level->gen2PCIE = pi->pcie_gen2 ?
+		((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
+
+	ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk);
+	if (ret)
+		return ret;
+
+	level->mcFlags =  0;
+	if (pi->mclk_stutter_mode_threshold &&
+	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+	    !eg_pi->uvd_enabled &&
+	    (tmp & DC_STUTTER_ENABLE_A) &&
+	    (tmp & DC_STUTTER_ENABLE_B))
+		level->mcFlags |= NISLANDS_SMC_MC_STUTTER_EN;
+
+	if (pi->mem_gddr5) {
+		if (pl->mclk > pi->mclk_edc_enable_threshold)
+			level->mcFlags |= NISLANDS_SMC_MC_EDC_RD_FLAG;
+		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+			level->mcFlags |= NISLANDS_SMC_MC_EDC_WR_FLAG;
+
+		level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
+
+		if (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) {
+			if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
+			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+			else
+				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+		} else {
+			dll_state_on = false;
+			if (pl->mclk > ni_pi->mclk_rtt_mode_threshold)
+				level->mcFlags |= NISLANDS_SMC_MC_RTT_ENABLE;
+		}
+
+		ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk,
+					     &level->mclk,
+					     (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) != 0,
+					     dll_state_on);
+	} else
+		ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1);
+
+	if (ret)
+		return ret;
+
+	ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+					pl->vddc, &level->vddc);
+	if (ret)
+		return ret;
+
+	ret = ni_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
+	if (ret)
+		return ret;
+
+	ni_populate_std_voltage_value(rdev, std_vddc,
+				      level->vddc.index, &level->std_vddc);
+
+	if (eg_pi->vddci_control) {
+		ret = ni_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
+						pl->vddci, &level->vddci);
+		if (ret)
+			return ret;
+	}
+
+	ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
+
+	return ret;
+}
+
+static int ni_populate_smc_t(struct radeon_device *rdev,
+			     struct radeon_ps *radeon_state,
+			     NISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	u32 a_t;
+	u32 t_l, t_h;
+	u32 high_bsp;
+	int i, ret;
+
+	if (state->performance_level_count >= 9)
+		return -EINVAL;
+
+	if (state->performance_level_count < 2) {
+		a_t = CG_R(0xffff) | CG_L(0);
+		smc_state->levels[0].aT = cpu_to_be32(a_t);
+		return 0;
+	}
+
+	smc_state->levels[0].aT = cpu_to_be32(0);
+
+	for (i = 0; i <= state->performance_level_count - 2; i++) {
+		if (eg_pi->uvd_enabled)
+			ret = r600_calculate_at(
+				1000 * (i * (eg_pi->smu_uvd_hs ? 2 : 8) + 2),
+				100 * R600_AH_DFLT,
+				state->performance_levels[i + 1].sclk,
+				state->performance_levels[i].sclk,
+				&t_l,
+				&t_h);
+		else
+			ret = r600_calculate_at(
+				1000 * (i + 1),
+				100 * R600_AH_DFLT,
+				state->performance_levels[i + 1].sclk,
+				state->performance_levels[i].sclk,
+				&t_l,
+				&t_h);
+
+		if (ret) {
+			t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+			t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+		}
+
+		a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+		a_t |= CG_R(t_l * pi->bsp / 20000);
+		smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+		high_bsp = (i == state->performance_level_count - 2) ?
+			pi->pbsp : pi->bsp;
+
+		a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+		smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+	}
+
+	return 0;
+}
+
+static int ni_populate_power_containment_values(struct radeon_device *rdev,
+						struct radeon_ps *radeon_state,
+						NISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	u32 prev_sclk;
+	u32 max_sclk;
+	u32 min_sclk;
+	int i, ret;
+	u32 tdp_limit;
+	u32 near_tdp_limit;
+	u32 power_boost_limit;
+	u8 max_ps_percent;
+
+	if (ni_pi->enable_power_containment == false)
+		return 0;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	ret = ni_calculate_adjusted_tdp_limits(rdev,
+					       false, /* ??? */
+					       rdev->pm.dpm.tdp_adjustment,
+					       &tdp_limit,
+					       &near_tdp_limit);
+	if (ret)
+		return ret;
+
+	power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, near_tdp_limit);
+
+	ret = rv770_write_smc_sram_dword(rdev,
+					 pi->state_table_start +
+					 offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
+					 offsetof(PP_NIslands_DPM2Parameters, PowerBoostLimit),
+					 ni_scale_power_for_smc(power_boost_limit, ni_get_smc_power_scaling_factor(rdev)),
+					 pi->sram_end);
+	if (ret)
+		power_boost_limit = 0;
+
+	smc_state->levels[0].dpm2.MaxPS = 0;
+	smc_state->levels[0].dpm2.NearTDPDec = 0;
+	smc_state->levels[0].dpm2.AboveSafeInc = 0;
+	smc_state->levels[0].dpm2.BelowSafeInc = 0;
+	smc_state->levels[0].stateFlags |= power_boost_limit ? PPSMC_STATEFLAG_POWERBOOST : 0;
+
+	for (i = 1; i < state->performance_level_count; i++) {
+		prev_sclk = state->performance_levels[i-1].sclk;
+		max_sclk  = state->performance_levels[i].sclk;
+		max_ps_percent = (i != (state->performance_level_count - 1)) ?
+			NISLANDS_DPM2_MAXPS_PERCENT_M : NISLANDS_DPM2_MAXPS_PERCENT_H;
+
+		if (max_sclk < prev_sclk)
+			return -EINVAL;
+
+		if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || eg_pi->uvd_enabled)
+			min_sclk = max_sclk;
+		else if (1 == i)
+			min_sclk = prev_sclk;
+		else
+			min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+
+		if (min_sclk < state->performance_levels[0].sclk)
+			min_sclk = state->performance_levels[0].sclk;
+
+		if (min_sclk == 0)
+			return -EINVAL;
+
+		smc_state->levels[i].dpm2.MaxPS =
+			(u8)((NISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+		smc_state->levels[i].dpm2.NearTDPDec = NISLANDS_DPM2_NEAR_TDP_DEC;
+		smc_state->levels[i].dpm2.AboveSafeInc = NISLANDS_DPM2_ABOVE_SAFE_INC;
+		smc_state->levels[i].dpm2.BelowSafeInc = NISLANDS_DPM2_BELOW_SAFE_INC;
+		smc_state->levels[i].stateFlags |=
+			((i != (state->performance_level_count - 1)) && power_boost_limit) ?
+			PPSMC_STATEFLAG_POWERBOOST : 0;
+	}
+
+	return 0;
+}
+
+static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
+					 struct radeon_ps *radeon_state,
+					 NISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	u32 sq_power_throttle;
+	u32 sq_power_throttle2;
+	bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+	int i;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	if (rdev->pm.dpm.sq_ramping_threshold == 0)
+		return -EINVAL;
+
+	if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (NISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+		enable_sq_ramping = false;
+
+	if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+		enable_sq_ramping = false;
+
+	if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+		enable_sq_ramping = false;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		sq_power_throttle  = 0;
+		sq_power_throttle2 = 0;
+
+		if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
+		    enable_sq_ramping) {
+			sq_power_throttle |= MAX_POWER(NISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+			sq_power_throttle |= MIN_POWER(NISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+			sq_power_throttle2 |= MAX_POWER_DELTA(NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+			sq_power_throttle2 |= STI_SIZE(NISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+			sq_power_throttle2 |= LTI_RATIO(NISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+		} else {
+			sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+			sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+		}
+
+		smc_state->levels[i].SQPowerThrottle   = cpu_to_be32(sq_power_throttle);
+		smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+	}
+
+	return 0;
+}
+
+static int ni_enable_power_containment(struct radeon_device *rdev,
+				       struct radeon_ps *radeon_new_state,
+				       bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (ni_pi->enable_power_containment) {
+		if (enable) {
+			if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
+				smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+					ni_pi->pc_enabled = false;
+				} else {
+					ni_pi->pc_enabled = true;
+				}
+			}
+		} else {
+			smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
+			if (smc_result != PPSMC_Result_OK)
+				ret = -EINVAL;
+			ni_pi->pc_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
+					 struct radeon_ps *radeon_state,
+					 NISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	int i, ret;
+	u32 threshold = state->performance_levels[state->performance_level_count - 1].sclk * 100 / 100;
+
+	if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
+		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	smc_state->levelCount = 0;
+
+	if (state->performance_level_count > NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE)
+		return -EINVAL;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		ret = ni_convert_power_level_to_smc(rdev, &state->performance_levels[i],
+						    &smc_state->levels[i]);
+		smc_state->levels[i].arbRefreshState =
+			(u8)(NISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+		if (ret)
+			return ret;
+
+		if (ni_pi->enable_power_containment)
+			smc_state->levels[i].displayWatermark =
+				(state->performance_levels[i].sclk < threshold) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+		else
+			smc_state->levels[i].displayWatermark = (i < 2) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+		if (eg_pi->dynamic_ac_timing)
+			smc_state->levels[i].ACIndex = NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+		else
+			smc_state->levels[i].ACIndex = 0;
+
+		smc_state->levelCount++;
+	}
+
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_watermark_threshold,
+				      cpu_to_be32(threshold / 512));
+
+	ni_populate_smc_sp(rdev, radeon_state, smc_state);
+
+	ret = ni_populate_power_containment_values(rdev, radeon_state, smc_state);
+	if (ret)
+		ni_pi->enable_power_containment = false;
+
+	ret = ni_populate_sq_ramping_values(rdev, radeon_state, smc_state);
+	if (ret)
+		ni_pi->enable_sq_ramping = false;
+
+	return ni_populate_smc_t(rdev, radeon_state, smc_state);
+}
+
+static int ni_upload_sw_state(struct radeon_device *rdev,
+			      struct radeon_ps *radeon_new_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u16 address = pi->state_table_start +
+		offsetof(NISLANDS_SMC_STATETABLE, driverState);
+	u16 state_size = sizeof(NISLANDS_SMC_SWSTATE) +
+		((NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1) * sizeof(NISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+	int ret;
+	NISLANDS_SMC_SWSTATE *smc_state = kzalloc(state_size, GFP_KERNEL);
+
+	if (smc_state == NULL)
+		return -ENOMEM;
+
+	ret = ni_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
+	if (ret)
+		goto done;
+
+	ret = rv770_copy_bytes_to_smc(rdev, address, (u8 *)smc_state, state_size, pi->sram_end);
+
+done:
+	kfree(smc_state);
+
+	return ret;
+}
+
+static int ni_set_mc_special_registers(struct radeon_device *rdev,
+				       struct ni_mc_reg_table *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 i, j, k;
+	u32 temp_reg;
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		switch (table->mc_reg_address[i].s1) {
+		case MC_SEQ_MISC1 >> 2:
+			if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			temp_reg = RREG32(MC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+			for (k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			j++;
+			if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			temp_reg = RREG32(MC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+			for(k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+				if (!pi->mem_gddr5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+			if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		case MC_SEQ_RESERVE_M >> 2:
+			temp_reg = RREG32(MC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+			for (k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			j++;
+			if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		default:
+			break;
+		}
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+	bool result = true;
+
+	switch (in_reg) {
+	case  MC_SEQ_RAS_TIMING >> 2:
+		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_CAS_TIMING >> 2:
+		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING2 >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
+		break;
+	case MC_PMG_CMD_EMRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS1 >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+		break;
+	case MC_SEQ_PMG_TIMING >> 2:
+		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS2 >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static void ni_set_valid_flag(struct ni_mc_reg_table *table)
+{
+	u8 i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+				table->valid_flag |= 1 << i;
+				break;
+			}
+		}
+	}
+}
+
+static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table)
+{
+	u32 i;
+	u16 address;
+
+	for (i = 0; i < table->last; i++)
+		table->mc_reg_address[i].s0 =
+			ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+			address : table->mc_reg_address[i].s1;
+}
+
+static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+				      struct ni_mc_reg_table *ni_table)
+{
+	u8 i, j;
+
+	if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+		return -EINVAL;
+	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+		return -EINVAL;
+
+	for (i = 0; i < table->last; i++)
+		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+	ni_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		ni_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++)
+			ni_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+	}
+	ni_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	int ret;
+	struct atom_mc_reg_table *table;
+	struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
+	u8 module_index = rv770_get_memory_module_index(rdev);
+
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+
+	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+
+	if (ret)
+		goto init_mc_done;
+
+	ret = ni_copy_vbios_mc_reg_table(table, ni_table);
+
+	if (ret)
+		goto init_mc_done;
+
+	ni_set_s0_mc_reg_index(ni_table);
+
+	ret = ni_set_mc_special_registers(rdev, ni_table);
+
+	if (ret)
+		goto init_mc_done;
+
+	ni_set_valid_flag(ni_table);
+
+init_mc_done:
+	kfree(table);
+
+	return ret;
+}
+
+static void ni_populate_mc_reg_addresses(struct radeon_device *rdev,
+					 SMC_NIslands_MCRegisters *mc_reg_table)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 i, j;
+
+	for (i = 0, j = 0; j < ni_pi->mc_reg_table.last; j++) {
+		if (ni_pi->mc_reg_table.valid_flag & (1 << j)) {
+			if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+				break;
+			mc_reg_table->address[i].s0 =
+				cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+	mc_reg_table->last = (u8)i;
+}
+
+
+static void ni_convert_mc_registers(struct ni_mc_reg_entry *entry,
+				    SMC_NIslands_MCRegisterSet *data,
+				    u32 num_entries, u32 valid_flag)
+{
+	u32 i, j;
+
+	for (i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & (1 << j)) {
+			data->value[i] = cpu_to_be32(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static void ni_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
+						 struct rv7xx_pl *pl,
+						 SMC_NIslands_MCRegisterSet *mc_reg_table_data)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 i = 0;
+
+	for (i = 0; i < ni_pi->mc_reg_table.num_entries; i++) {
+		if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+			break;
+	}
+
+	if ((i == ni_pi->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data,
+				ni_pi->mc_reg_table.last,
+				ni_pi->mc_reg_table.valid_flag);
+}
+
+static void ni_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
+					   struct radeon_ps *radeon_state,
+					   SMC_NIslands_MCRegisters *mc_reg_table)
+{
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	int i;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		ni_convert_mc_reg_table_entry_to_smc(rdev,
+						     &state->performance_levels[i],
+						     &mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+	}
+}
+
+static int ni_populate_mc_reg_table(struct radeon_device *rdev,
+				    struct radeon_ps *radeon_boot_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
+	SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
+
+	memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
+
+	rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_seq_index, 1);
+
+	ni_populate_mc_reg_addresses(rdev, mc_reg_table);
+
+	ni_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
+					     &mc_reg_table->data[0]);
+
+	ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[0],
+				&mc_reg_table->data[1],
+				ni_pi->mc_reg_table.last,
+				ni_pi->mc_reg_table.valid_flag);
+
+	ni_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, mc_reg_table);
+
+	return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
+				       (u8 *)mc_reg_table,
+				       sizeof(SMC_NIslands_MCRegisters),
+				       pi->sram_end);
+}
+
+static int ni_upload_mc_reg_table(struct radeon_device *rdev,
+				  struct radeon_ps *radeon_new_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
+	SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
+	u16 address;
+
+	memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
+
+	ni_convert_mc_reg_table_to_smc(rdev, radeon_new_state, mc_reg_table);
+
+	address = eg_pi->mc_reg_table_start +
+		(u16)offsetof(SMC_NIslands_MCRegisters, data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+
+	return rv770_copy_bytes_to_smc(rdev, address,
+				       (u8 *)&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+				       sizeof(SMC_NIslands_MCRegisterSet) * ni_new_state->performance_level_count,
+				       pi->sram_end);
+}
+
+static int ni_init_driver_calculated_leakage_table(struct radeon_device *rdev,
+						   PP_NIslands_CACTABLES *cac_tables)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 leakage = 0;
+	unsigned int i, j, table_size;
+	s32 t;
+	u32 smc_leakage, max_leakage = 0;
+	u32 scaling_factor;
+
+	table_size = eg_pi->vddc_voltage_table.count;
+
+	if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
+		table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+
+	scaling_factor = ni_get_smc_power_scaling_factor(rdev);
+
+	for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) {
+		for (j = 0; j < table_size; j++) {
+			t = (1000 * ((i + 1) * 8));
+
+			if (t < ni_pi->cac_data.leakage_minimum_temperature)
+				t = ni_pi->cac_data.leakage_minimum_temperature;
+
+			ni_calculate_leakage_for_v_and_t(rdev,
+							 &ni_pi->cac_data.leakage_coefficients,
+							 eg_pi->vddc_voltage_table.entries[j].value,
+							 t,
+							 ni_pi->cac_data.i_leakage,
+							 &leakage);
+
+			smc_leakage = ni_scale_power_for_smc(leakage, scaling_factor) / 1000;
+			if (smc_leakage > max_leakage)
+				max_leakage = smc_leakage;
+
+			cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(smc_leakage);
+		}
+	}
+
+	for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+		for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
+			cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(max_leakage);
+	}
+	return 0;
+}
+
+static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
+					    PP_NIslands_CACTABLES *cac_tables)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_cac_leakage_table *leakage_table =
+		&rdev->pm.dpm.dyn_state.cac_leakage_table;
+	u32 i, j, table_size;
+	u32 smc_leakage, max_leakage = 0;
+	u32 scaling_factor;
+
+	if (!leakage_table)
+		return -EINVAL;
+
+	table_size = leakage_table->count;
+
+	if (eg_pi->vddc_voltage_table.count != table_size)
+		table_size = (eg_pi->vddc_voltage_table.count < leakage_table->count) ?
+			eg_pi->vddc_voltage_table.count : leakage_table->count;
+
+	if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
+		table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+
+	if (table_size == 0)
+		return -EINVAL;
+
+	scaling_factor = ni_get_smc_power_scaling_factor(rdev);
+
+	for (j = 0; j < table_size; j++) {
+		smc_leakage = leakage_table->entries[j].leakage;
+
+		if (smc_leakage > max_leakage)
+			max_leakage = smc_leakage;
+
+		for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
+			cac_tables->cac_lkge_lut[i][j] =
+				cpu_to_be32(ni_scale_power_for_smc(smc_leakage, scaling_factor));
+	}
+
+	for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+		for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
+			cac_tables->cac_lkge_lut[i][j] =
+				cpu_to_be32(ni_scale_power_for_smc(max_leakage, scaling_factor));
+	}
+	return 0;
+}
+
+static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	PP_NIslands_CACTABLES *cac_tables = NULL;
+	int i, ret;
+	u32 reg;
+
+	if (ni_pi->enable_cac == false)
+		return 0;
+
+	cac_tables = kzalloc(sizeof(PP_NIslands_CACTABLES), GFP_KERNEL);
+	if (!cac_tables)
+		return -ENOMEM;
+
+	reg = RREG32(CG_CAC_CTRL) & ~(TID_CNT_MASK | TID_UNIT_MASK);
+	reg |= (TID_CNT(ni_pi->cac_weights->tid_cnt) |
+		TID_UNIT(ni_pi->cac_weights->tid_unit));
+	WREG32(CG_CAC_CTRL, reg);
+
+	for (i = 0; i < NISLANDS_DCCAC_MAX_LEVELS; i++)
+		ni_pi->dc_cac_table[i] = ni_pi->cac_weights->dc_cac[i];
+
+	for (i = 0; i < SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES; i++)
+		cac_tables->cac_bif_lut[i] = ni_pi->cac_weights->pcie_cac[i];
+
+	ni_pi->cac_data.i_leakage = rdev->pm.dpm.cac_leakage;
+	ni_pi->cac_data.pwr_const = 0;
+	ni_pi->cac_data.dc_cac_value = ni_pi->dc_cac_table[NISLANDS_DCCAC_LEVEL_0];
+	ni_pi->cac_data.bif_cac_value = 0;
+	ni_pi->cac_data.mc_wr_weight = ni_pi->cac_weights->mc_write_weight;
+	ni_pi->cac_data.mc_rd_weight = ni_pi->cac_weights->mc_read_weight;
+	ni_pi->cac_data.allow_ovrflw = 0;
+	ni_pi->cac_data.l2num_win_tdp = ni_pi->lta_window_size;
+	ni_pi->cac_data.num_win_tdp = 0;
+	ni_pi->cac_data.lts_truncate_n = ni_pi->lts_truncate;
+
+	if (ni_pi->driver_calculate_cac_leakage)
+		ret = ni_init_driver_calculated_leakage_table(rdev, cac_tables);
+	else
+		ret = ni_init_simplified_leakage_table(rdev, cac_tables);
+
+	if (ret)
+		goto done_free;
+
+	cac_tables->pwr_const      = cpu_to_be32(ni_pi->cac_data.pwr_const);
+	cac_tables->dc_cacValue    = cpu_to_be32(ni_pi->cac_data.dc_cac_value);
+	cac_tables->bif_cacValue   = cpu_to_be32(ni_pi->cac_data.bif_cac_value);
+	cac_tables->AllowOvrflw    = ni_pi->cac_data.allow_ovrflw;
+	cac_tables->MCWrWeight     = ni_pi->cac_data.mc_wr_weight;
+	cac_tables->MCRdWeight     = ni_pi->cac_data.mc_rd_weight;
+	cac_tables->numWin_TDP     = ni_pi->cac_data.num_win_tdp;
+	cac_tables->l2numWin_TDP   = ni_pi->cac_data.l2num_win_tdp;
+	cac_tables->lts_truncate_n = ni_pi->cac_data.lts_truncate_n;
+
+	ret = rv770_copy_bytes_to_smc(rdev, ni_pi->cac_table_start, (u8 *)cac_tables,
+				      sizeof(PP_NIslands_CACTABLES), pi->sram_end);
+
+done_free:
+	if (ret) {
+		ni_pi->enable_cac = false;
+		ni_pi->enable_power_containment = false;
+	}
+
+	kfree(cac_tables);
+
+	return 0;
+}
+
+static int ni_initialize_hardware_cac_manager(struct radeon_device *rdev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	u32 reg;
+
+	if (!ni_pi->enable_cac ||
+	    !ni_pi->cac_configuration_required)
+		return 0;
+
+	if (ni_pi->cac_weights == NULL)
+		return -EINVAL;
+
+	reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_0) & ~(WEIGHT_TCP_SIG0_MASK |
+						      WEIGHT_TCP_SIG1_MASK |
+						      WEIGHT_TA_SIG_MASK);
+	reg |= (WEIGHT_TCP_SIG0(ni_pi->cac_weights->weight_tcp_sig0) |
+		WEIGHT_TCP_SIG1(ni_pi->cac_weights->weight_tcp_sig1) |
+		WEIGHT_TA_SIG(ni_pi->cac_weights->weight_ta_sig));
+	WREG32_CG(CG_CAC_REGION_1_WEIGHT_0, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_1) & ~(WEIGHT_TCC_EN0_MASK |
+						      WEIGHT_TCC_EN1_MASK |
+						      WEIGHT_TCC_EN2_MASK);
+	reg |= (WEIGHT_TCC_EN0(ni_pi->cac_weights->weight_tcc_en0) |
+		WEIGHT_TCC_EN1(ni_pi->cac_weights->weight_tcc_en1) |
+		WEIGHT_TCC_EN2(ni_pi->cac_weights->weight_tcc_en2));
+	WREG32_CG(CG_CAC_REGION_1_WEIGHT_1, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_0) & ~(WEIGHT_CB_EN0_MASK |
+						      WEIGHT_CB_EN1_MASK |
+						      WEIGHT_CB_EN2_MASK |
+						      WEIGHT_CB_EN3_MASK);
+	reg |= (WEIGHT_CB_EN0(ni_pi->cac_weights->weight_cb_en0) |
+		WEIGHT_CB_EN1(ni_pi->cac_weights->weight_cb_en1) |
+		WEIGHT_CB_EN2(ni_pi->cac_weights->weight_cb_en2) |
+		WEIGHT_CB_EN3(ni_pi->cac_weights->weight_cb_en3));
+	WREG32_CG(CG_CAC_REGION_2_WEIGHT_0, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_1) & ~(WEIGHT_DB_SIG0_MASK |
+						      WEIGHT_DB_SIG1_MASK |
+						      WEIGHT_DB_SIG2_MASK |
+						      WEIGHT_DB_SIG3_MASK);
+	reg |= (WEIGHT_DB_SIG0(ni_pi->cac_weights->weight_db_sig0) |
+		WEIGHT_DB_SIG1(ni_pi->cac_weights->weight_db_sig1) |
+		WEIGHT_DB_SIG2(ni_pi->cac_weights->weight_db_sig2) |
+		WEIGHT_DB_SIG3(ni_pi->cac_weights->weight_db_sig3));
+	WREG32_CG(CG_CAC_REGION_2_WEIGHT_1, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_2) & ~(WEIGHT_SXM_SIG0_MASK |
+						      WEIGHT_SXM_SIG1_MASK |
+						      WEIGHT_SXM_SIG2_MASK |
+						      WEIGHT_SXS_SIG0_MASK |
+						      WEIGHT_SXS_SIG1_MASK);
+	reg |= (WEIGHT_SXM_SIG0(ni_pi->cac_weights->weight_sxm_sig0) |
+		WEIGHT_SXM_SIG1(ni_pi->cac_weights->weight_sxm_sig1) |
+		WEIGHT_SXM_SIG2(ni_pi->cac_weights->weight_sxm_sig2) |
+		WEIGHT_SXS_SIG0(ni_pi->cac_weights->weight_sxs_sig0) |
+		WEIGHT_SXS_SIG1(ni_pi->cac_weights->weight_sxs_sig1));
+	WREG32_CG(CG_CAC_REGION_2_WEIGHT_2, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_0) & ~(WEIGHT_XBR_0_MASK |
+						      WEIGHT_XBR_1_MASK |
+						      WEIGHT_XBR_2_MASK |
+						      WEIGHT_SPI_SIG0_MASK);
+	reg |= (WEIGHT_XBR_0(ni_pi->cac_weights->weight_xbr_0) |
+		WEIGHT_XBR_1(ni_pi->cac_weights->weight_xbr_1) |
+		WEIGHT_XBR_2(ni_pi->cac_weights->weight_xbr_2) |
+		WEIGHT_SPI_SIG0(ni_pi->cac_weights->weight_spi_sig0));
+	WREG32_CG(CG_CAC_REGION_3_WEIGHT_0, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_1) & ~(WEIGHT_SPI_SIG1_MASK |
+						      WEIGHT_SPI_SIG2_MASK |
+						      WEIGHT_SPI_SIG3_MASK |
+						      WEIGHT_SPI_SIG4_MASK |
+						      WEIGHT_SPI_SIG5_MASK);
+	reg |= (WEIGHT_SPI_SIG1(ni_pi->cac_weights->weight_spi_sig1) |
+		WEIGHT_SPI_SIG2(ni_pi->cac_weights->weight_spi_sig2) |
+		WEIGHT_SPI_SIG3(ni_pi->cac_weights->weight_spi_sig3) |
+		WEIGHT_SPI_SIG4(ni_pi->cac_weights->weight_spi_sig4) |
+		WEIGHT_SPI_SIG5(ni_pi->cac_weights->weight_spi_sig5));
+	WREG32_CG(CG_CAC_REGION_3_WEIGHT_1, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_0) & ~(WEIGHT_LDS_SIG0_MASK |
+						      WEIGHT_LDS_SIG1_MASK |
+						      WEIGHT_SC_MASK);
+	reg |= (WEIGHT_LDS_SIG0(ni_pi->cac_weights->weight_lds_sig0) |
+		WEIGHT_LDS_SIG1(ni_pi->cac_weights->weight_lds_sig1) |
+		WEIGHT_SC(ni_pi->cac_weights->weight_sc));
+	WREG32_CG(CG_CAC_REGION_4_WEIGHT_0, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_1) & ~(WEIGHT_BIF_MASK |
+						      WEIGHT_CP_MASK |
+						      WEIGHT_PA_SIG0_MASK |
+						      WEIGHT_PA_SIG1_MASK |
+						      WEIGHT_VGT_SIG0_MASK);
+	reg |= (WEIGHT_BIF(ni_pi->cac_weights->weight_bif) |
+		WEIGHT_CP(ni_pi->cac_weights->weight_cp) |
+		WEIGHT_PA_SIG0(ni_pi->cac_weights->weight_pa_sig0) |
+		WEIGHT_PA_SIG1(ni_pi->cac_weights->weight_pa_sig1) |
+		WEIGHT_VGT_SIG0(ni_pi->cac_weights->weight_vgt_sig0));
+	WREG32_CG(CG_CAC_REGION_4_WEIGHT_1, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_2) & ~(WEIGHT_VGT_SIG1_MASK |
+						      WEIGHT_VGT_SIG2_MASK |
+						      WEIGHT_DC_SIG0_MASK |
+						      WEIGHT_DC_SIG1_MASK |
+						      WEIGHT_DC_SIG2_MASK);
+	reg |= (WEIGHT_VGT_SIG1(ni_pi->cac_weights->weight_vgt_sig1) |
+		WEIGHT_VGT_SIG2(ni_pi->cac_weights->weight_vgt_sig2) |
+		WEIGHT_DC_SIG0(ni_pi->cac_weights->weight_dc_sig0) |
+		WEIGHT_DC_SIG1(ni_pi->cac_weights->weight_dc_sig1) |
+		WEIGHT_DC_SIG2(ni_pi->cac_weights->weight_dc_sig2));
+	WREG32_CG(CG_CAC_REGION_4_WEIGHT_2, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_3) & ~(WEIGHT_DC_SIG3_MASK |
+						      WEIGHT_UVD_SIG0_MASK |
+						      WEIGHT_UVD_SIG1_MASK |
+						      WEIGHT_SPARE0_MASK |
+						      WEIGHT_SPARE1_MASK);
+	reg |= (WEIGHT_DC_SIG3(ni_pi->cac_weights->weight_dc_sig3) |
+		WEIGHT_UVD_SIG0(ni_pi->cac_weights->weight_uvd_sig0) |
+		WEIGHT_UVD_SIG1(ni_pi->cac_weights->weight_uvd_sig1) |
+		WEIGHT_SPARE0(ni_pi->cac_weights->weight_spare0) |
+		WEIGHT_SPARE1(ni_pi->cac_weights->weight_spare1));
+	WREG32_CG(CG_CAC_REGION_4_WEIGHT_3, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_0) & ~(WEIGHT_SQ_VSP_MASK |
+						      WEIGHT_SQ_VSP0_MASK);
+	reg |= (WEIGHT_SQ_VSP(ni_pi->cac_weights->weight_sq_vsp) |
+		WEIGHT_SQ_VSP0(ni_pi->cac_weights->weight_sq_vsp0));
+	WREG32_CG(CG_CAC_REGION_5_WEIGHT_0, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_1) & ~(WEIGHT_SQ_GPR_MASK);
+	reg |= WEIGHT_SQ_GPR(ni_pi->cac_weights->weight_sq_gpr);
+	WREG32_CG(CG_CAC_REGION_5_WEIGHT_1, reg);
+
+	reg = RREG32_CG(CG_CAC_REGION_4_OVERRIDE_4) & ~(OVR_MODE_SPARE_0_MASK |
+							OVR_VAL_SPARE_0_MASK |
+							OVR_MODE_SPARE_1_MASK |
+							OVR_VAL_SPARE_1_MASK);
+	reg |= (OVR_MODE_SPARE_0(ni_pi->cac_weights->ovr_mode_spare_0) |
+		OVR_VAL_SPARE_0(ni_pi->cac_weights->ovr_val_spare_0) |
+		OVR_MODE_SPARE_1(ni_pi->cac_weights->ovr_mode_spare_1) |
+		OVR_VAL_SPARE_1(ni_pi->cac_weights->ovr_val_spare_1));
+	WREG32_CG(CG_CAC_REGION_4_OVERRIDE_4, reg);
+
+	reg = RREG32(SQ_CAC_THRESHOLD) & ~(VSP_MASK |
+					   VSP0_MASK |
+					   GPR_MASK);
+	reg |= (VSP(ni_pi->cac_weights->vsp) |
+		VSP0(ni_pi->cac_weights->vsp0) |
+		GPR(ni_pi->cac_weights->gpr));
+	WREG32(SQ_CAC_THRESHOLD, reg);
+
+	reg = (MCDW_WR_ENABLE |
+	       MCDX_WR_ENABLE |
+	       MCDY_WR_ENABLE |
+	       MCDZ_WR_ENABLE |
+	       INDEX(0x09D4));
+	WREG32(MC_CG_CONFIG, reg);
+
+	reg = (READ_WEIGHT(ni_pi->cac_weights->mc_read_weight) |
+	       WRITE_WEIGHT(ni_pi->cac_weights->mc_write_weight) |
+	       ALLOW_OVERFLOW);
+	WREG32(MC_CG_DATAPORT, reg);
+
+	return 0;
+}
+
+static int ni_enable_smc_cac(struct radeon_device *rdev,
+			     struct radeon_ps *radeon_new_state,
+			     bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	int ret = 0;
+	PPSMC_Result smc_result;
+
+	if (ni_pi->enable_cac) {
+		if (enable) {
+			if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
+				smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_CollectCAC_PowerCorreln);
+
+				if (ni_pi->support_cac_long_term_average) {
+					smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
+					if (PPSMC_Result_OK != smc_result)
+						ni_pi->support_cac_long_term_average = false;
+				}
+
+				smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
+				if (PPSMC_Result_OK != smc_result)
+					ret = -EINVAL;
+
+				ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
+			}
+		} else if (ni_pi->cac_enabled) {
+			smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
+
+			ni_pi->cac_enabled = false;
+
+			if (ni_pi->support_cac_long_term_average) {
+				smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
+				if (PPSMC_Result_OK != smc_result)
+					ni_pi->support_cac_long_term_average = false;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int ni_pcie_performance_request(struct radeon_device *rdev,
+				       u8 perf_req, bool advertise)
+{
+#if defined(CONFIG_ACPI)
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
+	    (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
+		if (eg_pi->pcie_performance_request_registered == false)
+			radeon_acpi_pcie_notify_device_ready(rdev);
+		eg_pi->pcie_performance_request_registered = true;
+		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
+	} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
+		    eg_pi->pcie_performance_request_registered) {
+		eg_pi->pcie_performance_request_registered = false;
+		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
+	}
+#endif
+	return 0;
+}
+
+static int ni_advertise_gen2_capability(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+
+	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+		pi->pcie_gen2 = true;
+	else
+		pi->pcie_gen2 = false;
+
+	if (!pi->pcie_gen2)
+		ni_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
+
+	return 0;
+}
+
+static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
+					    bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp, bif;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+
+	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+		if (enable) {
+			if (!pi->boot_in_gen2) {
+				bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
+				bif |= CG_CLIENT_REQ(0xd);
+				WREG32(CG_BIF_REQ_AND_RSP, bif);
+			}
+			tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+			tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
+			tmp |= LC_GEN2_EN_STRAP;
+
+			tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+			udelay(10);
+			tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+		} else {
+			if (!pi->boot_in_gen2) {
+				bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
+				bif |= CG_CLIENT_REQ(0xd);
+				WREG32(CG_BIF_REQ_AND_RSP, bif);
+
+				tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+				tmp &= ~LC_GEN2_EN_STRAP;
+			}
+			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+		}
+	}
+}
+
+static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
+					bool enable)
+{
+	ni_enable_bif_dynamic_pcie_gen2(rdev, enable);
+
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+}
+
+void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+					   struct radeon_ps *new_ps,
+					   struct radeon_ps *old_ps)
+{
+	struct ni_ps *new_state = ni_get_ps(new_ps);
+	struct ni_ps *current_state = ni_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
+	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+					  struct radeon_ps *new_ps,
+					  struct radeon_ps *old_ps)
+{
+	struct ni_ps *new_state = ni_get_ps(new_ps);
+	struct ni_ps *current_state = ni_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
+	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+void ni_dpm_setup_asic(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int r;
+
+	r = ni_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
+	ni_read_clock_registers(rdev);
+	btc_read_arb_registers(rdev);
+	rv770_get_memory_type(rdev);
+	if (eg_pi->pcie_performance_request)
+		ni_advertise_gen2_capability(rdev);
+	rv770_get_pcie_gen2_status(rdev);
+	rv770_enable_acpi_pm(rdev);
+}
+
+void ni_update_current_ps(struct radeon_device *rdev,
+			  struct radeon_ps *rps)
+{
+	struct ni_ps *new_ps = ni_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+
+	eg_pi->current_rps = *rps;
+	ni_pi->current_ps = *new_ps;
+	eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+}
+
+void ni_update_requested_ps(struct radeon_device *rdev,
+			    struct radeon_ps *rps)
+{
+	struct ni_ps *new_ps = ni_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+
+	eg_pi->requested_rps = *rps;
+	ni_pi->requested_ps = *new_ps;
+	eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+}
+
+int ni_dpm_enable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+	int ret;
+
+	if (pi->gfx_clock_gating)
+		ni_cg_clockgating_default(rdev);
+	if (btc_dpm_enabled(rdev))
+		return -EINVAL;
+	if (pi->mg_clock_gating)
+		ni_mg_clockgating_default(rdev);
+	if (eg_pi->ls_clock_gating)
+		ni_ls_clockgating_default(rdev);
+	if (pi->voltage_control) {
+		rv770_enable_voltage_control(rdev, true);
+		ret = cypress_construct_voltage_tables(rdev);
+		if (ret) {
+			DRM_ERROR("cypress_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = ni_initialize_mc_reg_table(rdev);
+		if (ret)
+			eg_pi->dynamic_ac_timing = false;
+	}
+	if (pi->dynamic_ss)
+		cypress_enable_spread_spectrum(rdev, true);
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, true);
+	rv770_setup_bsp(rdev);
+	rv770_program_git(rdev);
+	rv770_program_tp(rdev);
+	rv770_program_tpp(rdev);
+	rv770_program_sstp(rdev);
+	cypress_enable_display_gap(rdev);
+	rv770_program_vc(rdev);
+	if (pi->dynamic_pcie_gen2)
+		ni_enable_dynamic_pcie_gen2(rdev, true);
+	ret = rv770_upload_firmware(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_upload_firmware failed\n");
+		return ret;
+	}
+	ret = ni_process_firmware_header(rdev);
+	if (ret) {
+		DRM_ERROR("ni_process_firmware_header failed\n");
+		return ret;
+	}
+	ret = ni_initial_switch_from_arb_f0_to_f1(rdev);
+	if (ret) {
+		DRM_ERROR("ni_initial_switch_from_arb_f0_to_f1 failed\n");
+		return ret;
+	}
+	ret = ni_init_smc_table(rdev);
+	if (ret) {
+		DRM_ERROR("ni_init_smc_table failed\n");
+		return ret;
+	}
+	ret = ni_init_smc_spll_table(rdev);
+	if (ret) {
+		DRM_ERROR("ni_init_smc_spll_table failed\n");
+		return ret;
+	}
+	ret = ni_init_arb_table_index(rdev);
+	if (ret) {
+		DRM_ERROR("ni_init_arb_table_index failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = ni_populate_mc_reg_table(rdev, boot_ps);
+		if (ret) {
+			DRM_ERROR("ni_populate_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = ni_initialize_smc_cac_tables(rdev);
+	if (ret) {
+		DRM_ERROR("ni_initialize_smc_cac_tables failed\n");
+		return ret;
+	}
+	ret = ni_initialize_hardware_cac_manager(rdev);
+	if (ret) {
+		DRM_ERROR("ni_initialize_hardware_cac_manager failed\n");
+		return ret;
+	}
+	ret = ni_populate_smc_tdp_limits(rdev, boot_ps);
+	if (ret) {
+		DRM_ERROR("ni_populate_smc_tdp_limits failed\n");
+		return ret;
+	}
+	ni_program_response_times(rdev);
+	r7xx_start_smc(rdev);
+	ret = cypress_notify_smc_display_change(rdev, false);
+	if (ret) {
+		DRM_ERROR("cypress_notify_smc_display_change failed\n");
+		return ret;
+	}
+	cypress_enable_sclk_control(rdev, true);
+	if (eg_pi->memory_transition)
+		cypress_enable_mclk_control(rdev, true);
+	cypress_start_dpm(rdev);
+	if (pi->gfx_clock_gating)
+		ni_gfx_clockgating_enable(rdev, true);
+	if (pi->mg_clock_gating)
+		ni_mg_clockgating_enable(rdev, true);
+	if (eg_pi->ls_clock_gating)
+		ni_ls_clockgating_enable(rdev, true);
+
+	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	ni_update_current_ps(rdev, boot_ps);
+
+	return 0;
+}
+
+void ni_dpm_disable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+	if (!btc_dpm_enabled(rdev))
+		return;
+	rv770_clear_vc(rdev);
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, false);
+	ni_enable_power_containment(rdev, boot_ps, false);
+	ni_enable_smc_cac(rdev, boot_ps, false);
+	cypress_enable_spread_spectrum(rdev, false);
+	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+	if (pi->dynamic_pcie_gen2)
+		ni_enable_dynamic_pcie_gen2(rdev, false);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+
+	if (pi->gfx_clock_gating)
+		ni_gfx_clockgating_enable(rdev, false);
+	if (pi->mg_clock_gating)
+		ni_mg_clockgating_enable(rdev, false);
+	if (eg_pi->ls_clock_gating)
+		ni_ls_clockgating_enable(rdev, false);
+	ni_stop_dpm(rdev);
+	btc_reset_to_default(rdev);
+	ni_stop_smc(rdev);
+	ni_force_switch_to_arb_f0(rdev);
+
+	ni_update_current_ps(rdev, boot_ps);
+}
+
+static int ni_power_control_set_level(struct radeon_device *rdev)
+{
+	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
+	int ret;
+
+	ret = ni_restrict_performance_levels_before_switch(rdev);
+	if (ret)
+		return ret;
+	ret = rv770_halt_smc(rdev);
+	if (ret)
+		return ret;
+	ret = ni_populate_smc_tdp_limits(rdev, new_ps);
+	if (ret)
+		return ret;
+	ret = rv770_resume_smc(rdev);
+	if (ret)
+		return ret;
+	ret = rv770_set_sw_state(rdev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int ni_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+	struct radeon_ps *new_ps = &requested_ps;
+
+	ni_update_requested_ps(rdev, new_ps);
+
+	ni_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
+
+	return 0;
+}
+
+int ni_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *new_ps = &eg_pi->requested_rps;
+	struct radeon_ps *old_ps = &eg_pi->current_rps;
+	int ret;
+
+	ret = ni_restrict_performance_levels_before_switch(rdev);
+	if (ret) {
+		DRM_ERROR("ni_restrict_performance_levels_before_switch failed\n");
+		return ret;
+	}
+	ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+	ret = ni_enable_power_containment(rdev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("ni_enable_power_containment failed\n");
+		return ret;
+	}
+	ret = ni_enable_smc_cac(rdev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("ni_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = rv770_halt_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_halt_smc failed\n");
+		return ret;
+	}
+	if (eg_pi->smu_uvd_hs)
+		btc_notify_uvd_to_smc(rdev, new_ps);
+	ret = ni_upload_sw_state(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("ni_upload_sw_state failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = ni_upload_mc_reg_table(rdev, new_ps);
+		if (ret) {
+			DRM_ERROR("ni_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = ni_program_memory_timing_parameters(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("ni_program_memory_timing_parameters failed\n");
+		return ret;
+	}
+	ret = rv770_resume_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_resume_smc failed\n");
+		return ret;
+	}
+	ret = rv770_set_sw_state(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_set_sw_state failed\n");
+		return ret;
+	}
+	ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+	ret = ni_enable_smc_cac(rdev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("ni_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = ni_enable_power_containment(rdev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("ni_enable_power_containment failed\n");
+		return ret;
+	}
+
+	/* update tdp */
+	ret = ni_power_control_set_level(rdev);
+	if (ret) {
+		DRM_ERROR("ni_power_control_set_level failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void ni_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *new_ps = &eg_pi->requested_rps;
+
+	ni_update_current_ps(rdev, new_ps);
+}
+
+#if 0
+void ni_dpm_reset_asic(struct radeon_device *rdev)
+{
+	ni_restrict_performance_levels_before_switch(rdev);
+	rv770_set_boot_state(rdev);
+}
+#endif
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void ni_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					  struct radeon_ps *rps,
+					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					  u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
+		rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+		rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		rdev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
+				      struct radeon_ps *rps, int index,
+				      union pplib_clock_info *clock_info)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl = &ps->performance_levels[index];
+
+	ps->performance_level_count = index + 1;
+
+	pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+	pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+	pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+	pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+
+	pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
+	pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
+	pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
+
+	/* patch up vddc if necessary */
+	if (pl->vddc == 0xff01) {
+		if (pi->max_vddc)
+			pl->vddc = pi->max_vddc;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+		pi->acpi_vddc = pl->vddc;
+		eg_pi->acpi_vddci = pl->vddci;
+		if (ps->performance_levels[0].flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
+			pi->acpi_pcie_gen2 = true;
+		else
+			pi->acpi_pcie_gen2 = false;
+	}
+
+	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+		eg_pi->ulv.supported = true;
+		eg_pi->ulv.pl = pl;
+	}
+
+	if (pi->min_vddc_in_table > pl->vddc)
+		pi->min_vddc_in_table = pl->vddc;
+
+	if (pi->max_vddc_in_table < pl->vddc)
+		pi->max_vddc_in_table = pl->vddc;
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		u16 vddc, vddci, mvdd;
+		radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
+		pl->mclk = rdev->clock.default_mclk;
+		pl->sclk = rdev->clock.default_sclk;
+		pl->vddc = vddc;
+		pl->vddci = vddci;
+	}
+
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+	}
+}
+
+static int ni_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	union pplib_clock_info *clock_info;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	struct ni_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		if (power_info->pplib.ucStateEntrySize - 1) {
+			u8 *idx;
+			ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
+			if (ps == NULL) {
+				kfree(rdev->pm.dpm.ps);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.ps[i].ps_priv = ps;
+			ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+							 non_clock_info,
+							 power_info->pplib.ucNonClockSize);
+			idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
+			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+				clock_info = (union pplib_clock_info *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+					 (idx[j] * power_info->pplib.ucClockInfoSize));
+				ni_parse_pplib_clock_info(rdev,
+							  &rdev->pm.dpm.ps[i], j,
+							  clock_info);
+			}
+		}
+	}
+	rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
+	return 0;
+}
+
+int ni_dpm_init(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi;
+	struct evergreen_power_info *eg_pi;
+	struct ni_power_info *ni_pi;
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	ni_pi = kzalloc(sizeof(struct ni_power_info), GFP_KERNEL);
+	if (ni_pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = ni_pi;
+	eg_pi = &ni_pi->eg;
+	pi = &eg_pi->rv7xx;
+
+	rv770_get_max_vddc(rdev);
+
+	eg_pi->ulv.supported = false;
+	pi->acpi_vddc = 0;
+	eg_pi->acpi_vddci = 0;
+	pi->min_vddc_in_table = 0;
+	pi->max_vddc_in_table = 0;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = ni_parse_power_table(rdev);
+	if (ret)
+		return ret;
+	ret = r600_parse_extended_power_table(rdev);
+	if (ret)
+		return ret;
+
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+		kcalloc(4,
+			sizeof(struct radeon_clock_voltage_dependency_entry),
+			GFP_KERNEL);
+	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+		r600_free_extended_power_table(rdev);
+		return -ENOMEM;
+	}
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+	ni_patch_dependency_tables_based_on_leakage(rdev);
+
+	if (rdev->pm.dpm.voltage_response_time == 0)
+		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (rdev->pm.dpm.backbias_response_time == 0)
+		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->ref_div = dividers.ref_div + 1;
+	else
+		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	pi->rlp = RV770_RLP_DFLT;
+	pi->rmp = RV770_RMP_DFLT;
+	pi->lhp = RV770_LHP_DFLT;
+	pi->lmp = RV770_LMP_DFLT;
+
+	eg_pi->ats[0].rlp = RV770_RLP_DFLT;
+	eg_pi->ats[0].rmp = RV770_RMP_DFLT;
+	eg_pi->ats[0].lhp = RV770_LHP_DFLT;
+	eg_pi->ats[0].lmp = RV770_LMP_DFLT;
+
+	eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
+	eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
+	eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
+	eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
+
+	eg_pi->smu_uvd_hs = true;
+
+	if (rdev->pdev->device == 0x6707) {
+		pi->mclk_strobe_mode_threshold = 55000;
+		pi->mclk_edc_enable_threshold = 55000;
+		eg_pi->mclk_edc_wr_enable_threshold = 55000;
+	} else {
+		pi->mclk_strobe_mode_threshold = 40000;
+		pi->mclk_edc_enable_threshold = 40000;
+		eg_pi->mclk_edc_wr_enable_threshold = 40000;
+	}
+	ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+	pi->voltage_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
+
+	pi->mvdd_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
+
+	eg_pi->vddci_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
+
+	rv770_get_engine_memory_ss(rdev);
+
+	pi->asi = RV770_ASI_DFLT;
+	pi->pasi = CYPRESS_HASI_DFLT;
+	pi->vrc = CYPRESS_VRC_DFLT;
+
+	pi->power_gating = false;
+
+	pi->gfx_clock_gating = true;
+
+	pi->mg_clock_gating = true;
+	pi->mgcgtssm = true;
+	eg_pi->ls_clock_gating = false;
+	eg_pi->sclk_deep_sleep = false;
+
+	pi->dynamic_pcie_gen2 = true;
+
+	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	pi->display_gap = true;
+
+	pi->dcodt = true;
+
+	pi->ulps = true;
+
+	eg_pi->dynamic_ac_timing = true;
+	eg_pi->abm = true;
+	eg_pi->mcls = true;
+	eg_pi->light_sleep = true;
+	eg_pi->memory_transition = true;
+#if defined(CONFIG_ACPI)
+	eg_pi->pcie_performance_request =
+		radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+	eg_pi->pcie_performance_request = false;
+#endif
+
+	eg_pi->dll_default_on = false;
+
+	eg_pi->sclk_deep_sleep = false;
+
+	pi->mclk_stutter_mode_threshold = 0;
+
+	pi->sram_end = SMC_RAM_END;
+
+	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 3;
+	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+	rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
+	rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
+	rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 12500;
+
+	ni_pi->cac_data.leakage_coefficients.at = 516;
+	ni_pi->cac_data.leakage_coefficients.bt = 18;
+	ni_pi->cac_data.leakage_coefficients.av = 51;
+	ni_pi->cac_data.leakage_coefficients.bv = 2957;
+
+	switch (rdev->pdev->device) {
+	case 0x6700:
+	case 0x6701:
+	case 0x6702:
+	case 0x6703:
+	case 0x6718:
+		ni_pi->cac_weights = &cac_weights_cayman_xt;
+		break;
+	case 0x6705:
+	case 0x6719:
+	case 0x671D:
+	case 0x671C:
+	default:
+		ni_pi->cac_weights = &cac_weights_cayman_pro;
+		break;
+	case 0x6704:
+	case 0x6706:
+	case 0x6707:
+	case 0x6708:
+	case 0x6709:
+		ni_pi->cac_weights = &cac_weights_cayman_le;
+		break;
+	}
+
+	if (ni_pi->cac_weights->enable_power_containment_by_default) {
+		ni_pi->enable_power_containment = true;
+		ni_pi->enable_cac = true;
+		ni_pi->enable_sq_ramping = true;
+	} else {
+		ni_pi->enable_power_containment = false;
+		ni_pi->enable_cac = false;
+		ni_pi->enable_sq_ramping = false;
+	}
+
+	ni_pi->driver_calculate_cac_leakage = false;
+	ni_pi->cac_configuration_required = true;
+
+	if (ni_pi->cac_configuration_required) {
+		ni_pi->support_cac_long_term_average = true;
+		ni_pi->lta_window_size = ni_pi->cac_weights->l2_lta_window_size;
+		ni_pi->lts_truncate = ni_pi->cac_weights->lts_truncate;
+	} else {
+		ni_pi->support_cac_long_term_average = false;
+		ni_pi->lta_window_size = 0;
+		ni_pi->lts_truncate = 0;
+	}
+
+	ni_pi->use_power_boost_limit = true;
+
+	/* make sure dc limits are valid */
+	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	return 0;
+}
+
+void ni_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+	r600_free_extended_power_table(rdev);
+}
+
+void ni_dpm_print_power_state(struct radeon_device *rdev,
+			      struct radeon_ps *rps)
+{
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl;
+	int i;
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->performance_level_count; i++) {
+		pl = &ps->performance_levels[i];
+		if (rdev->family >= CHIP_TAHITI)
+			printk("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+			       i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+		else
+			printk("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+			       i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+	}
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		pl = &ps->performance_levels[current_index];
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+			   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+	}
+}
+
+u32 ni_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		return 0;
+	} else {
+		pl = &ps->performance_levels[current_index];
+		return pl->sclk;
+	}
+}
+
+u32 ni_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		return 0;
+	} else {
+		pl = &ps->performance_levels[current_index];
+		return pl->mclk;
+	}
+}
+
+u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].sclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].mclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_dpm.h b/drivers/gpu/drm/radeon/ni_dpm.h
new file mode 100644
index 0000000..6bbee91
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dpm.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __NI_DPM_H__
+#define __NI_DPM_H__
+
+#include "cypress_dpm.h"
+#include "btc_dpm.h"
+#include "nislands_smc.h"
+
+struct ni_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct ni_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ni_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+#define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2
+
+enum ni_dc_cac_level
+{
+	NISLANDS_DCCAC_LEVEL_0 = 0,
+	NISLANDS_DCCAC_LEVEL_1,
+	NISLANDS_DCCAC_LEVEL_2,
+	NISLANDS_DCCAC_LEVEL_3,
+	NISLANDS_DCCAC_LEVEL_4,
+	NISLANDS_DCCAC_LEVEL_5,
+	NISLANDS_DCCAC_LEVEL_6,
+	NISLANDS_DCCAC_LEVEL_7,
+	NISLANDS_DCCAC_MAX_LEVELS
+};
+
+struct ni_leakage_coeffients
+{
+	u32 at;
+	u32 bt;
+	u32 av;
+	u32 bv;
+	s32 t_slope;
+	s32 t_intercept;
+	u32 t_ref;
+};
+
+struct ni_cac_data
+{
+	struct ni_leakage_coeffients leakage_coefficients;
+	u32 i_leakage;
+	s32 leakage_minimum_temperature;
+	u32 pwr_const;
+	u32 dc_cac_value;
+	u32 bif_cac_value;
+	u32 lkge_pwr;
+	u8 mc_wr_weight;
+	u8 mc_rd_weight;
+	u8 allow_ovrflw;
+	u8 num_win_tdp;
+	u8 l2num_win_tdp;
+	u8 lts_truncate_n;
+};
+
+struct ni_cac_weights
+{
+	u32 weight_tcp_sig0;
+	u32 weight_tcp_sig1;
+	u32 weight_ta_sig;
+	u32 weight_tcc_en0;
+	u32 weight_tcc_en1;
+	u32 weight_tcc_en2;
+	u32 weight_cb_en0;
+	u32 weight_cb_en1;
+	u32 weight_cb_en2;
+	u32 weight_cb_en3;
+	u32 weight_db_sig0;
+	u32 weight_db_sig1;
+	u32 weight_db_sig2;
+	u32 weight_db_sig3;
+	u32 weight_sxm_sig0;
+	u32 weight_sxm_sig1;
+	u32 weight_sxm_sig2;
+	u32 weight_sxs_sig0;
+	u32 weight_sxs_sig1;
+	u32 weight_xbr_0;
+	u32 weight_xbr_1;
+	u32 weight_xbr_2;
+	u32 weight_spi_sig0;
+	u32 weight_spi_sig1;
+	u32 weight_spi_sig2;
+	u32 weight_spi_sig3;
+	u32 weight_spi_sig4;
+	u32 weight_spi_sig5;
+	u32 weight_lds_sig0;
+	u32 weight_lds_sig1;
+	u32 weight_sc;
+	u32 weight_bif;
+	u32 weight_cp;
+	u32 weight_pa_sig0;
+	u32 weight_pa_sig1;
+	u32 weight_vgt_sig0;
+	u32 weight_vgt_sig1;
+	u32 weight_vgt_sig2;
+	u32 weight_dc_sig0;
+	u32 weight_dc_sig1;
+	u32 weight_dc_sig2;
+	u32 weight_dc_sig3;
+	u32 weight_uvd_sig0;
+	u32 weight_uvd_sig1;
+	u32 weight_spare0;
+	u32 weight_spare1;
+	u32 weight_sq_vsp;
+	u32 weight_sq_vsp0;
+	u32 weight_sq_gpr;
+	u32 ovr_mode_spare_0;
+	u32 ovr_val_spare_0;
+	u32 ovr_mode_spare_1;
+	u32 ovr_val_spare_1;
+	u32 vsp;
+	u32 vsp0;
+	u32 gpr;
+	u8 mc_read_weight;
+	u8 mc_write_weight;
+	u32 tid_cnt;
+	u32 tid_unit;
+	u32 l2_lta_window_size;
+	u32 lts_truncate;
+	u32 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+	u32 pcie_cac[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
+	bool enable_power_containment_by_default;
+};
+
+struct ni_ps {
+	u16 performance_level_count;
+	bool dc_compatible;
+	struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+struct ni_power_info {
+	/* must be first! */
+	struct evergreen_power_info eg;
+	struct ni_clock_registers clock_registers;
+	struct ni_mc_reg_table mc_reg_table;
+	u32 mclk_rtt_mode_threshold;
+	/* flags */
+	bool use_power_boost_limit;
+	bool support_cac_long_term_average;
+	bool cac_enabled;
+	bool cac_configuration_required;
+	bool driver_calculate_cac_leakage;
+	bool pc_enabled;
+	bool enable_power_containment;
+	bool enable_cac;
+	bool enable_sq_ramping;
+	/* smc offsets */
+	u16 arb_table_start;
+	u16 fan_table_start;
+	u16 cac_table_start;
+	u16 spll_table_start;
+	/* CAC stuff */
+	struct ni_cac_data cac_data;
+	u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
+	const struct ni_cac_weights *cac_weights;
+	u8 lta_window_size;
+	u8 lts_truncate;
+	struct ni_ps current_ps;
+	struct ni_ps requested_ps;
+	/* scratch structs */
+	SMC_NIslands_MCRegisters smc_mc_reg_table;
+	NISLANDS_SMC_STATETABLE smc_statetable;
+};
+
+#define NISLANDS_INITIAL_STATE_ARB_INDEX    0
+#define NISLANDS_ACPI_STATE_ARB_INDEX       1
+#define NISLANDS_ULV_STATE_ARB_INDEX        2
+#define NISLANDS_DRIVER_STATE_ARB_INDEX     3
+
+#define NISLANDS_DPM2_MAX_PULSE_SKIP        256
+
+#define NISLANDS_DPM2_NEAR_TDP_DEC          10
+#define NISLANDS_DPM2_ABOVE_SAFE_INC        5
+#define NISLANDS_DPM2_BELOW_SAFE_INC        20
+
+#define NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT            80
+
+#define NISLANDS_DPM2_MAXPS_PERCENT_H                   90
+#define NISLANDS_DPM2_MAXPS_PERCENT_M                   0
+
+#define NISLANDS_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
+#define NISLANDS_DPM2_SQ_RAMP_MIN_POWER                 0x12
+#define NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
+#define NISLANDS_DPM2_SQ_RAMP_STI_SIZE                  0x1E
+#define NISLANDS_DPM2_SQ_RAMP_LTI_RATIO                 0xF
+
+int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
+				u32 arb_freq_src, u32 arb_freq_dest);
+void ni_update_current_ps(struct radeon_device *rdev,
+			  struct radeon_ps *rps);
+void ni_update_requested_ps(struct radeon_device *rdev,
+			    struct radeon_ps *rps);
+
+void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+					   struct radeon_ps *new_ps,
+					   struct radeon_ps *old_ps);
+void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+					  struct radeon_ps *new_ps,
+					  struct radeon_ps *old_ps);
+
+bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 0000000..827ccc8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL                         0x6840
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x68b4
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x68c4
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL                           0x68d4
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x68f0
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x6960
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x6964
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x6a80
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+#define NI_DP_MSE_LINK_TIMING                          0x73a0
+#	define NI_DP_MSE_LINK_FRAME			(((x) & 0x3ff) << 0)
+#	define NI_DP_MSE_LINK_LINE                      (((x) & 0x3) << 16)
+
+#define NI_DP_MSE_MISC_CNTL                            0x736c
+#       define NI_DP_MSE_BLANK_CODE                    (((x) & 0x1) << 0)
+#       define NI_DP_MSE_TIMESTAMP_MODE                (((x) & 0x1) << 4)
+#       define NI_DP_MSE_ZERO_ENCODER                  (((x) & 0x1) << 8)
+
+#define NI_DP_MSE_RATE_CNTL                            0x7384
+#       define NI_DP_MSE_RATE_Y(x)                   (((x) & 0x3ffffff) << 0)
+#       define NI_DP_MSE_RATE_X(x)                   (((x) & 0x3f) << 26)
+
+#define NI_DP_MSE_RATE_UPDATE                          0x738c
+
+#define NI_DP_MSE_SAT0                                 0x7390
+#       define NI_DP_MSE_SAT_SRC0(x)                   (((x) & 0x7) << 0)
+#       define NI_DP_MSE_SAT_SLOT_COUNT0(x)            (((x) & 0x3f) << 8)
+#       define NI_DP_MSE_SAT_SRC1(x)                   (((x) & 0x7) << 16)
+#       define NI_DP_MSE_SAT_SLOT_COUNT1(x)            (((x) & 0x3f) << 24)
+
+#define NI_DP_MSE_SAT1                                 0x7394
+
+#define NI_DP_MSE_SAT2                                 0x7398
+
+#define NI_DP_MSE_SAT_UPDATE                           0x739c
+#       define NI_DP_MSE_SAT_UPDATE_MASK               0x3
+#       define NI_DP_MSE_16_MTP_KEEPOUT                0x100
+
+#define NI_DIG_BE_CNTL                                 0x7140
+#       define NI_DIG_FE_SOURCE_SELECT(x)              (((x) & 0x7f) << 8)
+#       define NI_DIG_FE_DIG_MODE(x)                   (((x) & 0x7) << 16)
+#       define NI_DIG_MODE_DP_SST                      0
+#       define NI_DIG_MODE_LVDS                        1
+#       define NI_DIG_MODE_TMDS_DVI                    2
+#       define NI_DIG_MODE_TMDS_HDMI                   3
+#       define NI_DIG_MODE_DP_MST                      5
+#       define NI_DIG_HPD_SELECT(x)                    (((x) & 0x7) << 28)
+
+#define NI_DIG_FE_CNTL                                 0x7000
+#       define NI_DIG_SOURCE_SELECT(x)                 (((x) & 0x3) << 0)
+#       define NI_DIG_STEREOSYNC_SELECT(x)             (((x) & 0x3) << 4)
+#       define NI_DIG_STEREOSYNC_GATE_EN(x)            (((x) & 0x1) << 8)
+#       define NI_DIG_DUAL_LINK_ENABLE(x)              (((x) & 0x1) << 16)
+#       define NI_DIG_SWAP(x)                          (((x) & 0x1) << 18)
+#       define NI_DIG_SYMCLK_FE_ON                     (0x1 << 24)
+#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 0000000..3c9fec8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,1369 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define CAYMAN_MAX_SH_GPRS           256
+#define CAYMAN_MAX_TEMP_GPRS         16
+#define CAYMAN_MAX_SH_THREADS        256
+#define CAYMAN_MAX_SH_STACK_ENTRIES  4096
+#define CAYMAN_MAX_FRC_EOV_CNT       16384
+#define CAYMAN_MAX_BACKENDS          8
+#define CAYMAN_MAX_BACKENDS_MASK     0xFF
+#define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF
+#define CAYMAN_MAX_SIMDS             16
+#define CAYMAN_MAX_SIMDS_MASK        0xFFFF
+#define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF
+#define CAYMAN_MAX_PIPES             8
+#define CAYMAN_MAX_PIPES_MASK        0xFF
+#define CAYMAN_MAX_LDS_NUM           0xFFFF
+#define CAYMAN_MAX_TCC               16
+#define CAYMAN_MAX_TCC_MASK          0xFF
+
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN       0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN        0x12010001
+
+#define DMIF_ADDR_CONFIG  				0xBD4
+
+/* fusion vce clocks */
+#define CG_ECLK_CNTL                                    0x620
+#       define ECLK_DIVIDER_MASK                        0x7f
+#       define ECLK_DIR_CNTL_EN                         (1 << 8)
+#define CG_ECLK_STATUS                                  0x624
+#       define ECLK_STATUS                              (1 << 0)
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC  				0xC00
+
+#define	SRBM_GFX_CNTL				        0x0E44
+#define		RINGID(x)					(((x) & 0x3) << 0)
+#define		VMID(x)						(((x) & 0x7) << 0)
+#define	SRBM_STATUS				        0x0E50
+#define		RLC_RQ_PENDING 				(1 << 3)
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		RLC_BUSY 				(1 << 15)
+#define		IH_BUSY 				(1 << 17)
+
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
+#define	SRBM_STATUS2				        0x0EC4
+#define		DMA_BUSY 				(1 << 5)
+#define		DMA1_BUSY 				(1 << 6)
+
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 18)
+/* CONTEXT1_IDENTITY_ACCESS_MODE
+ * 0 physical = logical
+ * 1 logical via context1 page table
+ * 2 inside identity aperture use translation, outside physical = logical
+ * 3 inside identity aperture physical = logical, outside use translation
+ */
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define		PAGE_TABLE_BLOCK_SIZE(x)			(((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT0_CNTL2				0x1430
+#define VM_CONTEXT1_CNTL2				0x1434
+#define VM_INVALIDATE_REQUEST				0x1478
+#define VM_INVALIDATE_RESPONSE				0x147c
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+#define		PROTECTIONS_MASK			(0xf << 0)
+#define		PROTECTIONS_SHIFT			0
+		/* bit 0: range
+		 * bit 2: pde0
+		 * bit 3: valid
+		 * bit 4: read
+		 * bit 5: write
+		 */
+#define		MEMORY_CLIENT_ID_MASK			(0xff << 12)
+#define		MEMORY_CLIENT_ID_SHIFT			12
+#define		MEMORY_CLIENT_RW_MASK			(1 << 24)
+#define		MEMORY_CLIENT_RW_SHIFT			24
+#define		FAULT_VMID_MASK				(0x7 << 25)
+#define		FAULT_VMID_SHIFT			25
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_MX_L1_TLB_CNTL				0x2064
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+#define	FUS_MC_VM_FB_OFFSET				0x2068
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+#define MC_SEQ_MISC0           				0x2a00
+#define		MC_SEQ_MISC0_GDDR5_SHIFT      		28
+#define		MC_SEQ_MISC0_GDDR5_MASK      		0xf0000000
+#define		MC_SEQ_MISC0_GDDR5_VALUE      		5
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL					0x2F4C
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0x3F8C
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+
+#define RLC_GFX_INDEX           			0x3FC4
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		SX_CLEAN					(1 << 11)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY_NO_DMA					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SH_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_SX_CLEAN					(1 << 0)
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SH_BUSY					(1 << 28)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SH					(1 << 9)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+#define	CP_SEM_WAIT_TIMER				0x85BC
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+#define	CP_COHER_CNTL2					0x85E8
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_RB2_RPTR					0x86f8
+#define	CP_RB1_RPTR					0x86fc
+#define	CP_RB0_RPTR					0x8700
+#define	CP_RB_WPTR_DELAY				0x8704
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+#define	CP_PERFMON_CNTL					0x87FC
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+
+#define CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_QD_PIPES_SHIFT				8
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0xFFFF0000
+#define		INACTIVE_SIMDS_SHIFT				16
+
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define	VGT_NUM_INSTANCES				0x8974
+#define VGT_TF_RING_SIZE				0x8988
+#define VGT_OFFCHIP_LDS_BASE				0x89b4
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		GFX_PRIO(x)					((x) << 2)
+#define		CS1_PRIO(x)					((x) << 4)
+#define		CS2_PRIO(x)					((x) << 6)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define SQ_ESGS_RING_SIZE				0x8c44
+#define SQ_GSVS_RING_SIZE				0x8c4c
+#define SQ_ESTMP_RING_BASE				0x8c50
+#define SQ_ESTMP_RING_SIZE				0x8c54
+#define SQ_GSTMP_RING_BASE				0x8c58
+#define SQ_GSTMP_RING_SIZE				0x8c5c
+#define SQ_VSTMP_RING_BASE				0x8c60
+#define SQ_VSTMP_RING_SIZE				0x8c64
+#define SQ_PSTMP_RING_BASE				0x8c68
+#define SQ_PSTMP_RING_SIZE				0x8c6c
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define SQ_LSTMP_RING_BASE				0x8e10
+#define SQ_LSTMP_RING_SIZE				0x8e14
+#define SQ_HSTMP_RING_BASE				0x8e18
+#define SQ_HSTMP_RING_SIZE				0x8e1c
+#define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
+#define		DYN_GPR_ENABLE					(1 << 8)
+#define SQ_CONST_MEM_BASE				0x8df8
+
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define		CRC_SIMD_ID_WADDR_DISABLE			(1 << 8)
+
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_TCC_DISABLE				0x914C
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+#define	CGTS_SM_CTRL_REG				0x9150
+#define		OVERRIDE				(1 << 21)
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+#define		NUM_LOWER_PIPES(x)			((x) << 30)
+#define		NUM_LOWER_PIPES_MASK			0x40000000
+#define		NUM_LOWER_PIPES_SHIFT			30
+#define GB_BACKEND_MAP  				0x98FC
+
+#define CB_PERF_CTR0_SEL_0				0x9A20
+#define CB_PERF_CTR0_SEL_1				0x9A24
+#define CB_PERF_CTR1_SEL_0				0x9A28
+#define CB_PERF_CTR1_SEL_1				0x9A2C
+#define CB_PERF_CTR2_SEL_0				0x9A30
+#define CB_PERF_CTR2_SEL_1				0x9A34
+#define CB_PERF_CTR3_SEL_0				0x9A38
+#define CB_PERF_CTR3_SEL_1				0x9A3C
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		NUMBER_OF_SETS(x)				((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	CP_RB0_BASE					0xC100
+#define	CP_RB0_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB0_RPTR_ADDR				0xC10C
+#define	CP_RB0_RPTR_ADDR_HI				0xC110
+#define	CP_RB0_WPTR					0xC114
+
+#define CP_INT_CNTL                                     0xC124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+
+#define	CP_RB1_BASE					0xC180
+#define	CP_RB1_CNTL					0xC184
+#define	CP_RB1_RPTR_ADDR				0xC188
+#define	CP_RB1_RPTR_ADDR_HI				0xC18C
+#define	CP_RB1_WPTR					0xC190
+#define	CP_RB2_BASE					0xC194
+#define	CP_RB2_CNTL					0xC198
+#define	CP_RB2_RPTR_ADDR				0xC19C
+#define	CP_RB2_RPTR_ADDR_HI				0xC1A0
+#define	CP_RB2_WPTR					0xC1A4
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_DEBUG					0xC1FC
+
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
+/* TN SMU registers */
+#define	TN_CURRENT_GNB_TEMP				0x1F390
+
+/* pm registers */
+#define	SMC_MSG						0x20c
+#define		HOST_SMC_MSG(x)				((x) << 0)
+#define		HOST_SMC_MSG_MASK			(0xff << 0)
+#define		HOST_SMC_MSG_SHIFT			0
+#define		HOST_SMC_RESP(x)			((x) << 8)
+#define		HOST_SMC_RESP_MASK			(0xff << 8)
+#define		HOST_SMC_RESP_SHIFT			8
+#define		SMC_HOST_MSG(x)				((x) << 16)
+#define		SMC_HOST_MSG_MASK			(0xff << 16)
+#define		SMC_HOST_MSG_SHIFT			16
+#define		SMC_HOST_RESP(x)			((x) << 24)
+#define		SMC_HOST_RESP_MASK			(0xff << 24)
+#define		SMC_HOST_RESP_SHIFT			24
+
+#define	CG_SPLL_FUNC_CNTL				0x600
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_PDIV_A(x)				((x) << 20)
+#define		SPLL_PDIV_A_MASK			(0x7f << 20)
+#define		SPLL_PDIV_A_SHIFT			20
+#define	CG_SPLL_FUNC_CNTL_2				0x604
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define	CG_SPLL_FUNC_CNTL_3				0x608
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_FB_DIV_SHIFT			0
+#define		SPLL_DITHEN				(1 << 28)
+
+#define MPLL_CNTL_MODE                                  0x61c
+#       define SS_SSEN                                  (1 << 24)
+#       define SS_DSMODE_EN                             (1 << 25)
+
+#define	MPLL_AD_FUNC_CNTL				0x624
+#define		CLKF(x)					((x) << 0)
+#define		CLKF_MASK				(0x7f << 0)
+#define		CLKR(x)					((x) << 7)
+#define		CLKR_MASK				(0x1f << 7)
+#define		CLKFRAC(x)				((x) << 12)
+#define		CLKFRAC_MASK				(0x1f << 12)
+#define		YCLK_POST_DIV(x)			((x) << 17)
+#define		YCLK_POST_DIV_MASK			(3 << 17)
+#define		IBIAS(x)				((x) << 20)
+#define		IBIAS_MASK				(0x3ff << 20)
+#define		RESET					(1 << 30)
+#define		PDNB					(1 << 31)
+#define	MPLL_AD_FUNC_CNTL_2				0x628
+#define		BYPASS					(1 << 19)
+#define		BIAS_GEN_PDNB				(1 << 24)
+#define		RESET_EN				(1 << 25)
+#define		VCO_MODE				(1 << 29)
+#define	MPLL_DQ_FUNC_CNTL				0x62c
+#define	MPLL_DQ_FUNC_CNTL_2				0x630
+
+#define GENERAL_PWRMGT                                  0x63c
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define ENABLE_GEN2PCIE                          (1 << 4)
+#       define ENABLE_GEN2XSP                           (1 << 5)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (3 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define LOW_VOLT_D2_ACPI                         (1 << 8)
+#       define LOW_VOLT_D3_ACPI                         (1 << 9)
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define BACKBIAS_PAD_EN                          (1 << 18)
+#       define BACKBIAS_VALUE                           (1 << 19)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#       define AC_DC_SW                                 (1 << 24)
+
+#define SCLK_PWRMGT_CNTL                                  0x644
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#       define DYN_LIGHT_SLEEP_EN                         (1 << 14)
+#define	MCLK_PWRMGT_CNTL				0x648
+#       define DLL_SPEED(x)				((x) << 0)
+#       define DLL_SPEED_MASK				(0x1f << 0)
+#       define MPLL_PWRMGT_OFF                          (1 << 5)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCKA0_PDNB                             (1 << 8)
+#       define MRDCKA1_PDNB                             (1 << 9)
+#       define MRDCKB0_PDNB                             (1 << 10)
+#       define MRDCKB1_PDNB                             (1 << 11)
+#       define MRDCKC0_PDNB                             (1 << 12)
+#       define MRDCKC1_PDNB                             (1 << 13)
+#       define MRDCKD0_PDNB                             (1 << 14)
+#       define MRDCKD1_PDNB                             (1 << 15)
+#       define MRDCKA0_RESET                            (1 << 16)
+#       define MRDCKA1_RESET                            (1 << 17)
+#       define MRDCKB0_RESET                            (1 << 18)
+#       define MRDCKB1_RESET                            (1 << 19)
+#       define MRDCKC0_RESET                            (1 << 20)
+#       define MRDCKC1_RESET                            (1 << 21)
+#       define MRDCKD0_RESET                            (1 << 22)
+#       define MRDCKD1_RESET                            (1 << 23)
+#       define DLL_READY_READ                           (1 << 24)
+#       define USE_DISPLAY_GAP                          (1 << 25)
+#       define USE_DISPLAY_URGENT_NORMAL                (1 << 26)
+#       define MPLL_TURNOFF_D2                          (1 << 28)
+#define	DLL_CNTL					0x64c
+#       define MRDCKA0_BYPASS                           (1 << 24)
+#       define MRDCKA1_BYPASS                           (1 << 25)
+#       define MRDCKB0_BYPASS                           (1 << 26)
+#       define MRDCKB1_BYPASS                           (1 << 27)
+#       define MRDCKC0_BYPASS                           (1 << 28)
+#       define MRDCKC1_BYPASS                           (1 << 29)
+#       define MRDCKD0_BYPASS                           (1 << 30)
+#       define MRDCKD1_BYPASS                           (1 << 31)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x66c
+#       define CURRENT_STATE_INDEX_MASK                   (0xf << 4)
+#       define CURRENT_STATE_INDEX_SHIFT                  4
+
+#define CG_AT                                           0x6d4
+#       define CG_R(x)					((x) << 0)
+#       define CG_R_MASK				(0xffff << 0)
+#       define CG_L(x)					((x) << 16)
+#       define CG_L_MASK				(0xffff << 16)
+
+#define	CG_BIF_REQ_AND_RSP				0x7f4
+#define		CG_CLIENT_REQ(x)			((x) << 0)
+#define		CG_CLIENT_REQ_MASK			(0xff << 0)
+#define		CG_CLIENT_REQ_SHIFT			0
+#define		CG_CLIENT_RESP(x)			((x) << 8)
+#define		CG_CLIENT_RESP_MASK			(0xff << 8)
+#define		CG_CLIENT_RESP_SHIFT			8
+#define		CLIENT_CG_REQ(x)			((x) << 16)
+#define		CLIENT_CG_REQ_MASK			(0xff << 16)
+#define		CLIENT_CG_REQ_SHIFT			16
+#define		CLIENT_CG_RESP(x)			((x) << 24)
+#define		CLIENT_CG_RESP_MASK			(0xff << 24)
+#define		CLIENT_CG_RESP_SHIFT			24
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x790
+#define		SSEN					(1 << 0)
+#define		CLK_S(x)				((x) << 4)
+#define		CLK_S_MASK				(0xfff << 4)
+#define		CLK_S_SHIFT				4
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x794
+#define		CLK_V(x)				((x) << 0)
+#define		CLK_V_MASK				(0x3ffffff << 0)
+#define		CLK_V_SHIFT				0
+
+#define SMC_SCRATCH0                                    0x81c
+
+#define	CG_SPLL_FUNC_CNTL_4				0x850
+
+#define	MPLL_SS1					0x85c
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	MPLL_SS2					0x860
+#define		CLKS(x)					((x) << 0)
+#define		CLKS_MASK				(0xfff << 0)
+
+#define	CG_CAC_CTRL					0x88c
+#define		TID_CNT(x)				((x) << 0)
+#define		TID_CNT_MASK				(0x3fff << 0)
+#define		TID_UNIT(x)				((x) << 14)
+#define		TID_UNIT_MASK				(0xf << 14)
+
+#define	CG_IND_ADDR					0x8f8
+#define	CG_IND_DATA					0x8fc
+/* CGIND regs */
+#define	CG_CGTT_LOCAL_0					0x00
+#define	CG_CGTT_LOCAL_1					0x01
+
+#define MC_CG_CONFIG                                    0x25bc
+#define         MCDW_WR_ENABLE                          (1 << 0)
+#define         MCDX_WR_ENABLE                          (1 << 1)
+#define         MCDY_WR_ENABLE                          (1 << 2)
+#define         MCDZ_WR_ENABLE                          (1 << 3)
+#define		MC_RD_ENABLE(x)				((x) << 4)
+#define		MC_RD_ENABLE_MASK			(3 << 4)
+#define		INDEX(x)				((x) << 6)
+#define		INDEX_MASK				(0xfff << 6)
+#define		INDEX_SHIFT				6
+
+#define	MC_ARB_CAC_CNTL					0x2750
+#define         ENABLE                                  (1 << 0)
+#define		READ_WEIGHT(x)				((x) << 1)
+#define		READ_WEIGHT_MASK			(0x3f << 1)
+#define		READ_WEIGHT_SHIFT			1
+#define		WRITE_WEIGHT(x)				((x) << 7)
+#define		WRITE_WEIGHT_MASK			(0x3f << 7)
+#define		WRITE_WEIGHT_SHIFT			7
+#define         ALLOW_OVERFLOW                          (1 << 13)
+
+#define	MC_ARB_DRAM_TIMING				0x2774
+#define	MC_ARB_DRAM_TIMING2				0x2778
+
+#define	MC_ARB_RFSH_RATE				0x27b0
+#define		POWERMODE0(x)				((x) << 0)
+#define		POWERMODE0_MASK				(0xff << 0)
+#define		POWERMODE0_SHIFT			0
+#define		POWERMODE1(x)				((x) << 8)
+#define		POWERMODE1_MASK				(0xff << 8)
+#define		POWERMODE1_SHIFT			8
+#define		POWERMODE2(x)				((x) << 16)
+#define		POWERMODE2_MASK				(0xff << 16)
+#define		POWERMODE2_SHIFT			16
+#define		POWERMODE3(x)				((x) << 24)
+#define		POWERMODE3_MASK				(0xff << 24)
+#define		POWERMODE3_SHIFT			24
+
+#define MC_ARB_CG                                       0x27e8
+#define		CG_ARB_REQ(x)				((x) << 0)
+#define		CG_ARB_REQ_MASK				(0xff << 0)
+#define		CG_ARB_REQ_SHIFT			0
+#define		CG_ARB_RESP(x)				((x) << 8)
+#define		CG_ARB_RESP_MASK			(0xff << 8)
+#define		CG_ARB_RESP_SHIFT			8
+#define		ARB_CG_REQ(x)				((x) << 16)
+#define		ARB_CG_REQ_MASK				(0xff << 16)
+#define		ARB_CG_REQ_SHIFT			16
+#define		ARB_CG_RESP(x)				((x) << 24)
+#define		ARB_CG_RESP_MASK			(0xff << 24)
+#define		ARB_CG_RESP_SHIFT			24
+
+#define	MC_ARB_DRAM_TIMING_1				0x27f0
+#define	MC_ARB_DRAM_TIMING_2				0x27f4
+#define	MC_ARB_DRAM_TIMING_3				0x27f8
+#define	MC_ARB_DRAM_TIMING2_1				0x27fc
+#define	MC_ARB_DRAM_TIMING2_2				0x2800
+#define	MC_ARB_DRAM_TIMING2_3				0x2804
+#define MC_ARB_BURST_TIME                               0x2808
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0x1f << 0)
+#define		STATE0_SHIFT				0
+#define		STATE1(x)				((x) << 5)
+#define		STATE1_MASK				(0x1f << 5)
+#define		STATE1_SHIFT				5
+#define		STATE2(x)				((x) << 10)
+#define		STATE2_MASK				(0x1f << 10)
+#define		STATE2_SHIFT				10
+#define		STATE3(x)				((x) << 15)
+#define		STATE3_MASK				(0x1f << 15)
+#define		STATE3_SHIFT				15
+
+#define MC_CG_DATAPORT                                  0x2884
+
+#define MC_SEQ_RAS_TIMING                               0x28a0
+#define MC_SEQ_CAS_TIMING                               0x28a4
+#define MC_SEQ_MISC_TIMING                              0x28a8
+#define MC_SEQ_MISC_TIMING2                             0x28ac
+#define MC_SEQ_PMG_TIMING                               0x28b0
+#define MC_SEQ_RD_CTL_D0                                0x28b4
+#define MC_SEQ_RD_CTL_D1                                0x28b8
+#define MC_SEQ_WR_CTL_D0                                0x28bc
+#define MC_SEQ_WR_CTL_D1                                0x28c0
+
+#define MC_SEQ_MISC0                                    0x2a00
+#define         MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define         MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define         MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0x2a04
+#define MC_SEQ_RESERVE_M                                0x2a08
+#define MC_PMG_CMD_EMRS                                 0x2a0c
+
+#define MC_SEQ_MISC3                                    0x2a2c
+
+#define MC_SEQ_MISC5                                    0x2a54
+#define MC_SEQ_MISC6                                    0x2a58
+
+#define MC_SEQ_MISC7                                    0x2a64
+
+#define MC_SEQ_RAS_TIMING_LP                            0x2a6c
+#define MC_SEQ_CAS_TIMING_LP                            0x2a70
+#define MC_SEQ_MISC_TIMING_LP                           0x2a74
+#define MC_SEQ_MISC_TIMING2_LP                          0x2a78
+#define MC_SEQ_WR_CTL_D0_LP                             0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP                             0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP                           0x2a88
+
+#define MC_PMG_CMD_MRS                                  0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP                             0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP                             0x2b20
+
+#define MC_PMG_CMD_MRS1                                 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0x2b48
+#define MC_SEQ_PMG_TIMING_LP                            0x2b4c
+
+#define MC_PMG_CMD_MRS2                                 0x2b5c
+#define MC_SEQ_PMG_CMD_MRS2_LP                          0x2b60
+
+#define AUX_CONTROL					0x6200
+#define 	AUX_EN					(1 << 0)
+#define 	AUX_LS_READ_EN				(1 << 8)
+#define 	AUX_LS_UPDATE_DISABLE(x)		(((x) & 0x1) << 12)
+#define 	AUX_HPD_DISCON(x)			(((x) & 0x1) << 16)
+#define 	AUX_DET_EN				(1 << 18)
+#define 	AUX_HPD_SEL(x)				(((x) & 0x7) << 20)
+#define 	AUX_IMPCAL_REQ_EN			(1 << 24)
+#define 	AUX_TEST_MODE				(1 << 28)
+#define 	AUX_DEGLITCH_EN				(1 << 29)
+#define AUX_SW_CONTROL					0x6204
+#define 	AUX_SW_GO				(1 << 0)
+#define 	AUX_LS_READ_TRIG			(1 << 2)
+#define 	AUX_SW_START_DELAY(x)			(((x) & 0xf) << 4)
+#define 	AUX_SW_WR_BYTES(x)			(((x) & 0x1f) << 16)
+
+#define AUX_SW_INTERRUPT_CONTROL			0x620c
+#define 	AUX_SW_DONE_INT				(1 << 0)
+#define 	AUX_SW_DONE_ACK				(1 << 1)
+#define 	AUX_SW_DONE_MASK			(1 << 2)
+#define 	AUX_SW_LS_DONE_INT			(1 << 4)
+#define 	AUX_SW_LS_DONE_MASK			(1 << 6)
+#define AUX_SW_STATUS					0x6210
+#define 	AUX_SW_DONE				(1 << 0)
+#define 	AUX_SW_REQ				(1 << 1)
+#define 	AUX_SW_RX_TIMEOUT_STATE(x)		(((x) & 0x7) << 4)
+#define 	AUX_SW_RX_TIMEOUT			(1 << 7)
+#define 	AUX_SW_RX_OVERFLOW			(1 << 8)
+#define 	AUX_SW_RX_HPD_DISCON			(1 << 9)
+#define 	AUX_SW_RX_PARTIAL_BYTE			(1 << 10)
+#define 	AUX_SW_NON_AUX_MODE			(1 << 11)
+#define 	AUX_SW_RX_MIN_COUNT_VIOL		(1 << 12)
+#define 	AUX_SW_RX_INVALID_STOP			(1 << 14)
+#define 	AUX_SW_RX_SYNC_INVALID_L		(1 << 17)
+#define 	AUX_SW_RX_SYNC_INVALID_H		(1 << 18)
+#define 	AUX_SW_RX_INVALID_START			(1 << 19)
+#define 	AUX_SW_RX_RECV_NO_DET			(1 << 20)
+#define 	AUX_SW_RX_RECV_INVALID_H		(1 << 22)
+#define 	AUX_SW_RX_RECV_INVALID_V		(1 << 23)
+
+#define AUX_SW_DATA					0x6218
+#define AUX_SW_DATA_RW					(1 << 0)
+#define AUX_SW_DATA_MASK(x)				(((x) & 0xff) << 8)
+#define AUX_SW_DATA_INDEX(x)				(((x) & 0x1f) << 16)
+#define AUX_SW_AUTOINCREMENT_DISABLE			(1 << 31)
+
+#define	LB_SYNC_RESET_SEL				0x6b28
+#define		LB_SYNC_RESET_SEL_MASK			(3 << 0)
+#define		LB_SYNC_RESET_SEL_SHIFT			0
+
+#define	DC_STUTTER_CNTL					0x6b30
+#define		DC_STUTTER_ENABLE_A			(1 << 0)
+#define		DC_STUTTER_ENABLE_B			(1 << 1)
+
+#define SQ_CAC_THRESHOLD                                0x8e4c
+#define		VSP(x)					((x) << 0)
+#define		VSP_MASK				(0xff << 0)
+#define		VSP_SHIFT				0
+#define		VSP0(x)					((x) << 8)
+#define		VSP0_MASK				(0xff << 8)
+#define		VSP0_SHIFT				8
+#define		GPR(x)					((x) << 16)
+#define		GPR_MASK				(0xff << 16)
+#define		GPR_SHIFT				16
+
+#define SQ_POWER_THROTTLE                               0x8e58
+#define		MIN_POWER(x)				((x) << 0)
+#define		MIN_POWER_MASK				(0x3fff << 0)
+#define		MIN_POWER_SHIFT				0
+#define		MAX_POWER(x)				((x) << 16)
+#define		MAX_POWER_MASK				(0x3fff << 16)
+#define		MAX_POWER_SHIFT				0
+#define SQ_POWER_THROTTLE2                              0x8e5c
+#define		MAX_POWER_DELTA(x)			((x) << 0)
+#define		MAX_POWER_DELTA_MASK			(0x3fff << 0)
+#define		MAX_POWER_DELTA_SHIFT			0
+#define		STI_SIZE(x)				((x) << 16)
+#define		STI_SIZE_MASK				(0x3ff << 16)
+#define		STI_SIZE_SHIFT				16
+#define		LTI_RATIO(x)				((x) << 27)
+#define		LTI_RATIO_MASK				(0xf << 27)
+#define		LTI_RATIO_SHIFT				27
+
+/* CG indirect registers */
+#define CG_CAC_REGION_1_WEIGHT_0                        0x83
+#define		WEIGHT_TCP_SIG0(x)			((x) << 0)
+#define		WEIGHT_TCP_SIG0_MASK			(0x3f << 0)
+#define		WEIGHT_TCP_SIG0_SHIFT			0
+#define		WEIGHT_TCP_SIG1(x)			((x) << 6)
+#define		WEIGHT_TCP_SIG1_MASK			(0x3f << 6)
+#define		WEIGHT_TCP_SIG1_SHIFT			6
+#define		WEIGHT_TA_SIG(x)			((x) << 12)
+#define		WEIGHT_TA_SIG_MASK			(0x3f << 12)
+#define		WEIGHT_TA_SIG_SHIFT			12
+#define CG_CAC_REGION_1_WEIGHT_1                        0x84
+#define		WEIGHT_TCC_EN0(x)			((x) << 0)
+#define		WEIGHT_TCC_EN0_MASK			(0x3f << 0)
+#define		WEIGHT_TCC_EN0_SHIFT			0
+#define		WEIGHT_TCC_EN1(x)			((x) << 6)
+#define		WEIGHT_TCC_EN1_MASK			(0x3f << 6)
+#define		WEIGHT_TCC_EN1_SHIFT			6
+#define		WEIGHT_TCC_EN2(x)			((x) << 12)
+#define		WEIGHT_TCC_EN2_MASK			(0x3f << 12)
+#define		WEIGHT_TCC_EN2_SHIFT			12
+#define		WEIGHT_TCC_EN3(x)			((x) << 18)
+#define		WEIGHT_TCC_EN3_MASK			(0x3f << 18)
+#define		WEIGHT_TCC_EN3_SHIFT			18
+#define CG_CAC_REGION_2_WEIGHT_0                        0x85
+#define		WEIGHT_CB_EN0(x)			((x) << 0)
+#define		WEIGHT_CB_EN0_MASK			(0x3f << 0)
+#define		WEIGHT_CB_EN0_SHIFT			0
+#define		WEIGHT_CB_EN1(x)			((x) << 6)
+#define		WEIGHT_CB_EN1_MASK			(0x3f << 6)
+#define		WEIGHT_CB_EN1_SHIFT			6
+#define		WEIGHT_CB_EN2(x)			((x) << 12)
+#define		WEIGHT_CB_EN2_MASK			(0x3f << 12)
+#define		WEIGHT_CB_EN2_SHIFT			12
+#define		WEIGHT_CB_EN3(x)			((x) << 18)
+#define		WEIGHT_CB_EN3_MASK			(0x3f << 18)
+#define		WEIGHT_CB_EN3_SHIFT			18
+#define CG_CAC_REGION_2_WEIGHT_1                        0x86
+#define		WEIGHT_DB_SIG0(x)			((x) << 0)
+#define		WEIGHT_DB_SIG0_MASK			(0x3f << 0)
+#define		WEIGHT_DB_SIG0_SHIFT			0
+#define		WEIGHT_DB_SIG1(x)			((x) << 6)
+#define		WEIGHT_DB_SIG1_MASK			(0x3f << 6)
+#define		WEIGHT_DB_SIG1_SHIFT			6
+#define		WEIGHT_DB_SIG2(x)			((x) << 12)
+#define		WEIGHT_DB_SIG2_MASK			(0x3f << 12)
+#define		WEIGHT_DB_SIG2_SHIFT			12
+#define		WEIGHT_DB_SIG3(x)			((x) << 18)
+#define		WEIGHT_DB_SIG3_MASK			(0x3f << 18)
+#define		WEIGHT_DB_SIG3_SHIFT			18
+#define CG_CAC_REGION_2_WEIGHT_2                        0x87
+#define		WEIGHT_SXM_SIG0(x)			((x) << 0)
+#define		WEIGHT_SXM_SIG0_MASK			(0x3f << 0)
+#define		WEIGHT_SXM_SIG0_SHIFT			0
+#define		WEIGHT_SXM_SIG1(x)			((x) << 6)
+#define		WEIGHT_SXM_SIG1_MASK			(0x3f << 6)
+#define		WEIGHT_SXM_SIG1_SHIFT			6
+#define		WEIGHT_SXM_SIG2(x)			((x) << 12)
+#define		WEIGHT_SXM_SIG2_MASK			(0x3f << 12)
+#define		WEIGHT_SXM_SIG2_SHIFT			12
+#define		WEIGHT_SXS_SIG0(x)			((x) << 18)
+#define		WEIGHT_SXS_SIG0_MASK			(0x3f << 18)
+#define		WEIGHT_SXS_SIG0_SHIFT			18
+#define		WEIGHT_SXS_SIG1(x)			((x) << 24)
+#define		WEIGHT_SXS_SIG1_MASK			(0x3f << 24)
+#define		WEIGHT_SXS_SIG1_SHIFT			24
+#define CG_CAC_REGION_3_WEIGHT_0                        0x88
+#define		WEIGHT_XBR_0(x)				((x) << 0)
+#define		WEIGHT_XBR_0_MASK			(0x3f << 0)
+#define		WEIGHT_XBR_0_SHIFT			0
+#define		WEIGHT_XBR_1(x)				((x) << 6)
+#define		WEIGHT_XBR_1_MASK			(0x3f << 6)
+#define		WEIGHT_XBR_1_SHIFT			6
+#define		WEIGHT_XBR_2(x)				((x) << 12)
+#define		WEIGHT_XBR_2_MASK			(0x3f << 12)
+#define		WEIGHT_XBR_2_SHIFT			12
+#define		WEIGHT_SPI_SIG0(x)			((x) << 18)
+#define		WEIGHT_SPI_SIG0_MASK			(0x3f << 18)
+#define		WEIGHT_SPI_SIG0_SHIFT			18
+#define CG_CAC_REGION_3_WEIGHT_1                        0x89
+#define		WEIGHT_SPI_SIG1(x)			((x) << 0)
+#define		WEIGHT_SPI_SIG1_MASK			(0x3f << 0)
+#define		WEIGHT_SPI_SIG1_SHIFT			0
+#define		WEIGHT_SPI_SIG2(x)			((x) << 6)
+#define		WEIGHT_SPI_SIG2_MASK			(0x3f << 6)
+#define		WEIGHT_SPI_SIG2_SHIFT			6
+#define		WEIGHT_SPI_SIG3(x)			((x) << 12)
+#define		WEIGHT_SPI_SIG3_MASK			(0x3f << 12)
+#define		WEIGHT_SPI_SIG3_SHIFT			12
+#define		WEIGHT_SPI_SIG4(x)			((x) << 18)
+#define		WEIGHT_SPI_SIG4_MASK			(0x3f << 18)
+#define		WEIGHT_SPI_SIG4_SHIFT			18
+#define		WEIGHT_SPI_SIG5(x)			((x) << 24)
+#define		WEIGHT_SPI_SIG5_MASK			(0x3f << 24)
+#define		WEIGHT_SPI_SIG5_SHIFT			24
+#define CG_CAC_REGION_4_WEIGHT_0                        0x8a
+#define		WEIGHT_LDS_SIG0(x)			((x) << 0)
+#define		WEIGHT_LDS_SIG0_MASK			(0x3f << 0)
+#define		WEIGHT_LDS_SIG0_SHIFT			0
+#define		WEIGHT_LDS_SIG1(x)			((x) << 6)
+#define		WEIGHT_LDS_SIG1_MASK			(0x3f << 6)
+#define		WEIGHT_LDS_SIG1_SHIFT			6
+#define		WEIGHT_SC(x)				((x) << 24)
+#define		WEIGHT_SC_MASK				(0x3f << 24)
+#define		WEIGHT_SC_SHIFT				24
+#define CG_CAC_REGION_4_WEIGHT_1                        0x8b
+#define		WEIGHT_BIF(x)				((x) << 0)
+#define		WEIGHT_BIF_MASK				(0x3f << 0)
+#define		WEIGHT_BIF_SHIFT			0
+#define		WEIGHT_CP(x)				((x) << 6)
+#define		WEIGHT_CP_MASK				(0x3f << 6)
+#define		WEIGHT_CP_SHIFT				6
+#define		WEIGHT_PA_SIG0(x)			((x) << 12)
+#define		WEIGHT_PA_SIG0_MASK			(0x3f << 12)
+#define		WEIGHT_PA_SIG0_SHIFT			12
+#define		WEIGHT_PA_SIG1(x)			((x) << 18)
+#define		WEIGHT_PA_SIG1_MASK			(0x3f << 18)
+#define		WEIGHT_PA_SIG1_SHIFT			18
+#define		WEIGHT_VGT_SIG0(x)			((x) << 24)
+#define		WEIGHT_VGT_SIG0_MASK			(0x3f << 24)
+#define		WEIGHT_VGT_SIG0_SHIFT			24
+#define CG_CAC_REGION_4_WEIGHT_2                        0x8c
+#define		WEIGHT_VGT_SIG1(x)			((x) << 0)
+#define		WEIGHT_VGT_SIG1_MASK			(0x3f << 0)
+#define		WEIGHT_VGT_SIG1_SHIFT			0
+#define		WEIGHT_VGT_SIG2(x)			((x) << 6)
+#define		WEIGHT_VGT_SIG2_MASK			(0x3f << 6)
+#define		WEIGHT_VGT_SIG2_SHIFT			6
+#define		WEIGHT_DC_SIG0(x)			((x) << 12)
+#define		WEIGHT_DC_SIG0_MASK			(0x3f << 12)
+#define		WEIGHT_DC_SIG0_SHIFT			12
+#define		WEIGHT_DC_SIG1(x)			((x) << 18)
+#define		WEIGHT_DC_SIG1_MASK			(0x3f << 18)
+#define		WEIGHT_DC_SIG1_SHIFT			18
+#define		WEIGHT_DC_SIG2(x)			((x) << 24)
+#define		WEIGHT_DC_SIG2_MASK			(0x3f << 24)
+#define		WEIGHT_DC_SIG2_SHIFT			24
+#define CG_CAC_REGION_4_WEIGHT_3                        0x8d
+#define		WEIGHT_DC_SIG3(x)			((x) << 0)
+#define		WEIGHT_DC_SIG3_MASK			(0x3f << 0)
+#define		WEIGHT_DC_SIG3_SHIFT			0
+#define		WEIGHT_UVD_SIG0(x)			((x) << 6)
+#define		WEIGHT_UVD_SIG0_MASK			(0x3f << 6)
+#define		WEIGHT_UVD_SIG0_SHIFT			6
+#define		WEIGHT_UVD_SIG1(x)			((x) << 12)
+#define		WEIGHT_UVD_SIG1_MASK			(0x3f << 12)
+#define		WEIGHT_UVD_SIG1_SHIFT			12
+#define		WEIGHT_SPARE0(x)			((x) << 18)
+#define		WEIGHT_SPARE0_MASK			(0x3f << 18)
+#define		WEIGHT_SPARE0_SHIFT			18
+#define		WEIGHT_SPARE1(x)			((x) << 24)
+#define		WEIGHT_SPARE1_MASK			(0x3f << 24)
+#define		WEIGHT_SPARE1_SHIFT			24
+#define CG_CAC_REGION_5_WEIGHT_0                        0x8e
+#define		WEIGHT_SQ_VSP(x)			((x) << 0)
+#define		WEIGHT_SQ_VSP_MASK			(0x3fff << 0)
+#define		WEIGHT_SQ_VSP_SHIFT			0
+#define		WEIGHT_SQ_VSP0(x)			((x) << 14)
+#define		WEIGHT_SQ_VSP0_MASK			(0x3fff << 14)
+#define		WEIGHT_SQ_VSP0_SHIFT			14
+#define CG_CAC_REGION_4_OVERRIDE_4                      0xab
+#define		OVR_MODE_SPARE_0(x)			((x) << 16)
+#define		OVR_MODE_SPARE_0_MASK			(0x1 << 16)
+#define		OVR_MODE_SPARE_0_SHIFT			16
+#define		OVR_VAL_SPARE_0(x)			((x) << 17)
+#define		OVR_VAL_SPARE_0_MASK			(0x1 << 17)
+#define		OVR_VAL_SPARE_0_SHIFT			17
+#define		OVR_MODE_SPARE_1(x)			((x) << 18)
+#define		OVR_MODE_SPARE_1_MASK			(0x3f << 18)
+#define		OVR_MODE_SPARE_1_SHIFT			18
+#define		OVR_VAL_SPARE_1(x)			((x) << 19)
+#define		OVR_VAL_SPARE_1_MASK			(0x3f << 19)
+#define		OVR_VAL_SPARE_1_SHIFT			19
+#define CG_CAC_REGION_5_WEIGHT_1                        0xb7
+#define		WEIGHT_SQ_GPR(x)			((x) << 0)
+#define		WEIGHT_SQ_GPR_MASK			(0x3fff << 0)
+#define		WEIGHT_SQ_GPR_SHIFT			0
+#define		WEIGHT_SQ_LDS(x)			((x) << 14)
+#define		WEIGHT_SQ_LDS_MASK			(0x3fff << 14)
+#define		WEIGHT_SQ_LDS_SHIFT			14
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_HW_VOLTAGE_IF_CONTROL(x)                ((x) << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_MASK              (3 << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_SHIFT             12
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW				0xEF00
+#define UVD_SEMA_ADDR_HIGH				0xEF04
+#define UVD_SEMA_CMD					0xEF08
+#define UVD_UDEC_ADDR_CONFIG				0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_NO_OP					0xEFFC
+#define UVD_RBC_RB_RPTR					0xF690
+#define UVD_RBC_RB_WPTR					0xF694
+#define UVD_STATUS					0xf6bc
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DEALLOC_STATE				0x14
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_OFFSET			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define		WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#define		WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+		 * 1 - mem
+		 */
+#define		WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+		 * 1 - pfp
+		 */
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
+#              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
+#              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#              define PACKET3_ENGINE_ME            (1 << 31)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - TS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ALU_PS_CONST_BUFFER_COPY		0x4C
+#define	PACKET3_ALU_VS_CONST_BUFFER_COPY		0x4D
+#define	PACKET3_ALU_PS_CONST_UPDATE		        0x4E
+#define	PACKET3_ALU_VS_CONST_UPDATE		        0x4F
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+/* alu const buffers only; no reg file */
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_START			0x0003a500
+#define		PACKET3_SET_BOOL_CONST_END			0x0003a518
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_START			0x0003a200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003a500
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_START			0x00030000
+#define		PACKET3_SET_RESOURCE_END			0x00038000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_START			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003c600
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_START			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003ff0c
+#define	PACKET3_SET_RESOURCE_OFFSET			0x70
+#define	PACKET3_SET_ALU_CONST_VS			0x71
+#define	PACKET3_SET_ALU_CONST_DI			0x72
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_APPEND_CNT			        0x75
+#define	PACKET3_ME_WRITE				0x7A
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_TILING_CONFIG  				  0xd0b8
+#define DMA_MODE                                          0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_SRBM_POLL_PACKET		((9 << 28) |			\
+					 (1 << 27) |			\
+					 (1 << 26))
+
+#define DMA_SRBM_READ_PACKET		((9 << 28) |			\
+					 (1 << 27))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+#endif
diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h
new file mode 100644
index 0000000..3cf8fc0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nislands_smc.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __NISLANDS_SMC_H__
+#define __NISLANDS_SMC_H__
+
+#pragma pack(push, 1)
+
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_NIslands_Dpm2PerfLevel
+{
+    uint8_t     MaxPS;
+    uint8_t     TgtAct;
+    uint8_t     MaxPS_StepInc;
+    uint8_t     MaxPS_StepDec;
+    uint8_t     PSST;
+    uint8_t     NearTDPDec;
+    uint8_t     AboveSafeInc;
+    uint8_t     BelowSafeInc;
+    uint8_t     PSDeltaLimit;
+    uint8_t     PSDeltaWin;
+    uint8_t     Reserved[6];
+};
+
+typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
+
+struct PP_NIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+};
+typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
+
+struct NISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_FUNC_CNTL_4;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
+
+struct NISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL_1;
+    uint32_t        vMPLL_FUNC_CNTL_2;
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
+
+struct NISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
+
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     arbValue;
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     reserved1;
+    uint8_t                     reserved2;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    NISLANDS_SMC_SCLK_VALUE     sclk;
+    NISLANDS_SMC_MCLK_VALUE     mclk;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    NISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    NISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint32_t                    powergate_en;
+    uint8_t                     hUp;
+    uint8_t                     hDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    reserved[2];
+    PP_NIslands_Dpm2PerfLevel   dpm2;
+};
+
+#define NISLANDS_SMC_STROBE_RATIO    0x0F
+#define NISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define NISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define NISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define NISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define NISLANDS_SMC_MC_STUTTER_EN   0x08
+
+typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct NISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+
+#define NISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define NISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define NISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+struct NISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define NISLANDS_MAX_NO_VREG_STEPS 32
+
+struct NISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint8_t                             highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    uint32_t                            lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    NISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    PP_NIslands_DPM2Parameters          dpm2Params;
+    NISLANDS_SMC_SWSTATE                initialState;
+    NISLANDS_SMC_SWSTATE                ACPIState;
+    NISLANDS_SMC_SWSTATE                ULVState;
+    NISLANDS_SMC_SWSTATE                driverState;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
+
+#define NI_SMC_SOFT_REGISTERS_START        0x108
+
+#define NI_SMC_SOFT_REGISTER_mclk_chg_timeout        0x0
+#define NI_SMC_SOFT_REGISTER_delay_bbias             0xC
+#define NI_SMC_SOFT_REGISTER_delay_vreg              0x10
+#define NI_SMC_SOFT_REGISTER_delay_acpi              0x2C
+#define NI_SMC_SOFT_REGISTER_seq_index               0x64
+#define NI_SMC_SOFT_REGISTER_mvdd_chg_time           0x68
+#define NI_SMC_SOFT_REGISTER_mclk_switch_lim         0x78
+#define NI_SMC_SOFT_REGISTER_watermark_threshold     0x80
+#define NI_SMC_SOFT_REGISTER_mc_block_delay          0x84
+#define NI_SMC_SOFT_REGISTER_uvd_enabled             0x98
+
+#define SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES 16
+#define SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+#define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4
+
+struct SMC_NISLANDS_MC_TPP_CAC_TABLE
+{
+    uint32_t    tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
+    uint32_t    cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
+};
+
+typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE;
+
+
+struct PP_NIslands_CACTABLES
+{
+    uint32_t                cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
+    uint32_t                cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+
+    uint32_t                pwr_const;
+
+    uint32_t                dc_cacValue;
+    uint32_t                bif_cacValue;
+    uint32_t                lkge_pwr;
+
+    uint8_t                 cac_width;
+    uint8_t                 window_size_p2;
+
+    uint8_t                 num_drop_lsb;
+    uint8_t                 padding_0;
+
+    uint32_t                last_power;
+
+    uint8_t                 AllowOvrflw;
+    uint8_t                 MCWrWeight;
+    uint8_t                 MCRdWeight;
+    uint8_t                 padding_1[9];
+
+    uint8_t                 enableWinAvg;
+    uint8_t                 numWin_TDP;
+    uint8_t                 l2numWin_TDP;
+    uint8_t                 WinIndex;
+
+    uint32_t                dynPwr_TDP[4];
+    uint32_t                lkgePwr_TDP[4];
+    uint32_t                power_TDP[4];
+    uint32_t                avg_dynPwr_TDP;
+    uint32_t                avg_lkgePwr_TDP;
+    uint32_t                avg_power_TDP;
+    uint32_t                lts_power_TDP;
+    uint8_t                 lts_truncate_n;
+    uint8_t                 padding_2[7];
+};
+
+typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES;
+
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_NIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
+
+
+struct SMC_NIslands_MCRegisterSet
+{
+    uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
+
+struct SMC_NIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_NIslands_MCRegisterAddress      address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_NIslands_MCRegisterSet          data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
+
+struct SMC_NIslands_MCArbDramTimingRegisterSet
+{
+    uint32_t mc_arb_dram_timing;
+    uint32_t mc_arb_dram_timing2;
+    uint8_t  mc_arb_rfsh_rate;
+    uint8_t  padding[3];
+};
+
+typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_NIslands_MCArbDramTimingRegisters
+{
+    uint8_t                                     arb_current;
+    uint8_t                                     reserved[3];
+    SMC_NIslands_MCArbDramTimingRegisterSet     data[20];
+};
+
+typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters;
+
+struct SMC_NISLANDS_SPLL_DIV_TABLE
+{
+    uint32_t    freq[256];
+    uint32_t    ss[256];
+};
+
+#define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK  0x01ffffff
+#define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK   0xfe000000
+#define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT  25
+#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK   0x000fffff
+#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT  0
+#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK   0xfff00000
+#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT  20
+
+typedef struct SMC_NISLANDS_SPLL_DIV_TABLE SMC_NISLANDS_SPLL_DIV_TABLE;
+
+#define NISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x100
+
+#define NISLANDS_SMC_FIRMWARE_HEADER_version                   0x0
+#define NISLANDS_SMC_FIRMWARE_HEADER_flags                     0x4
+#define NISLANDS_SMC_FIRMWARE_HEADER_softRegisters             0x8
+#define NISLANDS_SMC_FIRMWARE_HEADER_stateTable                0xC
+#define NISLANDS_SMC_FIRMWARE_HEADER_fanTable                  0x10
+#define NISLANDS_SMC_FIRMWARE_HEADER_cacTable                  0x14
+#define NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable           0x20
+#define NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x2C
+#define NISLANDS_SMC_FIRMWARE_HEADER_spllTable                 0x30
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
new file mode 100644
index 0000000..7e5724a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SMC_H
+#define PP_SMC_H
+
+#pragma pack(push, 1)
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+#define PPSMC_SWSTATE_FLAG_PCIE_X1                      0x08
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO        0x40
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH      0x02
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP    0x01
+#define PPSMC_STATEFLAG_POWERBOOST         0x02
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   0x40
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+	FAN_CONTROL_FUZZY,
+	FAN_CONTROL_TABLE
+};
+
+#define PPSMC_Result_OK             ((uint8_t)0x01)
+#define PPSMC_Result_Failed         ((uint8_t)0xFF)
+
+typedef uint8_t PPSMC_Result;
+
+#define PPSMC_MSG_Halt                      ((uint8_t)0x10)
+#define PPSMC_MSG_Resume                    ((uint8_t)0x11)
+#define PPSMC_MSG_ZeroLevelsDisabled        ((uint8_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled         ((uint8_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled         ((uint8_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt    ((uint8_t)0x16)
+#define PPSMC_MSG_RunningOnAC               ((uint8_t)0x17)
+#define PPSMC_MSG_SwitchToSwState           ((uint8_t)0x20)
+#define PPSMC_MSG_SwitchToInitialState      ((uint8_t)0x40)
+#define PPSMC_MSG_NoForcedLevel             ((uint8_t)0x41)
+#define PPSMC_MSG_ForceHigh                 ((uint8_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh         ((uint8_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower      ((uint8_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower    ((uint8_t)0x52)
+#define PPSMC_MSG_EnableCac                 ((uint8_t)0x53)
+#define PPSMC_MSG_DisableCac                ((uint8_t)0x54)
+#define PPSMC_TDPClampingActive             ((uint8_t)0x59)
+#define PPSMC_TDPClampingInactive           ((uint8_t)0x5A)
+#define PPSMC_StartFanControl               ((uint8_t)0x5B)
+#define PPSMC_StopFanControl                ((uint8_t)0x5C)
+#define PPSMC_MSG_NoDisplay                 ((uint8_t)0x5D)
+#define PPSMC_MSG_HasDisplay                ((uint8_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF               ((uint8_t)0x60)
+#define PPSMC_MSG_UVDPowerON                ((uint8_t)0x61)
+#define PPSMC_MSG_EnableULV                 ((uint8_t)0x62)
+#define PPSMC_MSG_DisableULV                ((uint8_t)0x63)
+#define PPSMC_MSG_EnterULV                  ((uint8_t)0x64)
+#define PPSMC_MSG_ExitULV                   ((uint8_t)0x65)
+#define PPSMC_CACLongTermAvgEnable          ((uint8_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable         ((uint8_t)0x6F)
+#define PPSMC_MSG_CollectCAC_PowerCorreln   ((uint8_t)0x7A)
+#define PPSMC_FlushDataCache                ((uint8_t)0x80)
+#define PPSMC_MSG_SetEnabledLevels          ((uint8_t)0x82)
+#define PPSMC_MSG_SetForcedLevels           ((uint8_t)0x83)
+#define PPSMC_MSG_ResetToDefaults           ((uint8_t)0x84)
+#define PPSMC_MSG_EnableDTE                 ((uint8_t)0x87)
+#define PPSMC_MSG_DisableDTE                ((uint8_t)0x88)
+#define PPSMC_MSG_ThrottleOVRDSCLKDS        ((uint8_t)0x96)
+#define PPSMC_MSG_CancelThrottleOVRDSCLKDS  ((uint8_t)0x97)
+
+/* CI/KV/KB */
+#define PPSMC_MSG_UVDDPM_SetEnabledMask       ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask       ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask       ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask      ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState          ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel       ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable        ((uint16_t) 0x133)
+#define PPSMC_MSG_Voltage_Cntl_Disable        ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable              ((uint16_t) 0x136)
+#define PPSMC_MSG_PCIeDPM_Disable             ((uint16_t) 0x13d)
+#define PPSMC_MSG_ACPPowerOFF                 ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON                  ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF                 ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON                  ((uint16_t) 0x13a)
+#define PPSMC_MSG_PCIeDPM_Disable             ((uint16_t) 0x13d)
+#define PPSMC_MSG_NBDPM_Enable                ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable               ((uint16_t) 0x141)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask      ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask      ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel          ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel        ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt    ((uint16_t) 0x14a)
+#define PPSMC_MSG_DPM_Enable                  ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable                 ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable              ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable             ((uint16_t) 0x151)
+#define PPSMC_MSG_UVDDPM_Enable               ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable              ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable              ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable             ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable               ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable              ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable               ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable              ((uint16_t) 0x15b)
+#define PPSMC_MSG_VddC_Request                ((uint16_t) 0x15f)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask      ((uint16_t) 0x162)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask      ((uint16_t) 0x167)
+#define PPSMC_MSG_TDCLimitEnable              ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable             ((uint16_t) 0x16a)
+#define PPSMC_MSG_PkgPwrLimitEnable           ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable          ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit              ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp       ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel         ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel       ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel         ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel       ((uint16_t) 0x18C)
+#define PPSMC_MSG_MASTER_DeepSleep_ON         ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF        ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp             ((uint16_t) 0x191)
+#define PPSMC_MSG_SetFanPwmMax                ((uint16_t) 0x19A)
+
+#define PPSMC_MSG_ENABLE_THERMAL_DPM          ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM         ((uint16_t) 0x19D)
+
+#define PPSMC_MSG_API_GetSclkFrequency        ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency        ((uint16_t) 0x201)
+
+/* TN */
+#define PPSMC_MSG_DPM_Config                ((uint32_t) 0x102)
+#define PPSMC_MSG_DPM_ForceState            ((uint32_t) 0x104)
+#define PPSMC_MSG_PG_SIMD_Config            ((uint32_t) 0x108)
+#define PPSMC_MSG_Thermal_Cntl_Enable       ((uint32_t) 0x10a)
+#define PPSMC_MSG_Voltage_Cntl_Enable       ((uint32_t) 0x109)
+#define PPSMC_MSG_VCEPowerOFF               ((uint32_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON                ((uint32_t) 0x10f)
+#define PPSMC_MSG_DPM_N_LevelsDisabled      ((uint32_t) 0x112)
+#define PPSMC_MSG_DCE_RemoveVoltageAdjustment   ((uint32_t) 0x11d)
+#define PPSMC_MSG_DCE_AllowVoltageAdjustment    ((uint32_t) 0x11e)
+#define PPSMC_MSG_EnableBAPM                ((uint32_t) 0x120)
+#define PPSMC_MSG_DisableBAPM               ((uint32_t) 0x121)
+#define PPSMC_MSG_UVD_DPM_Config            ((uint32_t) 0x124)
+
+
+typedef uint16_t PPSMC_Msg;
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
new file mode 100644
index 0000000..4c2eec4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -0,0 +1,690 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PPTABLE_H
+#define _PPTABLE_H
+
+#pragma pack(1)
+
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
+    UCHAR ucI2cLine;        // as interpreted by DAL I2C
+    UCHAR ucI2cAddress;
+    UCHAR ucFanParameters;  // Fan Control Parameters.
+    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
+    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
+    UCHAR ucReserved;       // ----
+    UCHAR ucFlags;          // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE      0
+#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64      5
+#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
+#define ATOM_PP_THERMALCONTROLLER_RV770     8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+#define ATOM_PP_THERMALCONTROLLER_KONG      10
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
+#define ATOM_PP_THERMALCONTROLLER_LM96163   17
+#define ATOM_PP_THERMALCONTROLLER_CISLANDS  18
+#define ATOM_PP_THERMALCONTROLLER_KAVERI    19
+
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+    UCHAR ucNonClockStateIndex;
+    UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+    USHORT  usTMed;                          // The middle temperature where we change slopes.
+    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+    USHORT  usPWMHigh;                       // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+    ATOM_PPLIB_FANTABLE basicTable;
+    USHORT  usTMax;                          // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_FANTABLE3
+{
+	ATOM_PPLIB_FANTABLE2 basicTable2;
+	UCHAR ucFanControlMode;
+	USHORT usFanPWMMax;
+	USHORT usFanOutputSensitivity;
+} ATOM_PPLIB_FANTABLE3;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+    USHORT  usSize;
+    ULONG   ulMaxEngineClock;   // For Overdrive.
+    ULONG   ulMaxMemoryClock;   // For Overdrive.
+    // Add extra system parameters here, always adjust size to include all fields.
+    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
+    USHORT  usSAMUTableOffset;  //points to ATOM_PPLIB_SAMU_Table
+    USHORT  usPPMTableOffset;   //points to ATOM_PPLIB_PPM_Table
+    USHORT  usACPTableOffset;  //points to ATOM_PPLIB_ACP_Table   
+    USHORT  usPowerTuneTableOffset; //points to ATOM_PPLIB_POWERTUNE_Table   
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
+#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE   0x00040000           // Does the driver supports new CAC voltage table.
+#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY   0x00080000     // Does the driver supports revert GPIO5 polarity.
+#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17   0x00100000     // Does the driver supports thermal2GPIO17.
+#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE   0x00200000   // Does the driver supports VR HOT GPIO Configurable.
+#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION   0x00400000            // Does the driver supports Temp Inversion feature.
+#define ATOM_PP_PLATFORM_CAP_EVV    0x00800000
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+      ATOM_COMMON_TABLE_HEADER sHeader;
+
+      UCHAR ucDataRevision;
+
+      UCHAR ucNumStates;
+      UCHAR ucStateEntrySize;
+      UCHAR ucClockInfoSize;
+      UCHAR ucNonClockSize;
+
+      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+      USHORT usStateArrayOffset;
+
+      // offset from start of this table to array of ASIC-specific structures,
+      // currently ATOM_PPLIB_CLOCK_INFO.
+      USHORT usClockInfoArrayOffset;
+
+      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+      USHORT usNonClockInfoArrayOffset;
+
+      USHORT usBackbiasTime;    // in microseconds
+      USHORT usVoltageTime;     // in microseconds
+      USHORT usTableSize;       //the size of this structure, or the extended structure
+
+      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
+
+      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
+
+      USHORT usBootClockInfoOffset;
+      USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+    ATOM_PPLIB_POWERPLAYTABLE basicTable;
+    UCHAR   ucNumCustomThermalPolicy;
+    USHORT  usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+    USHORT                     usFanTableOffset;
+    USHORT                     usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+    ULONG                      ulGoldenPPID;                    // PPGen use only     
+    ULONG                      ulGoldenRevision;                // PPGen use only
+    USHORT                     usVddcDependencyOnSCLKOffset;
+    USHORT                     usVddciDependencyOnMCLKOffset;
+    USHORT                     usVddcDependencyOnMCLKOffset;
+    USHORT                     usMaxClockVoltageOnDCOffset;
+    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+    USHORT                     usMvddDependencyOnMCLKOffset;  
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+    ULONG                      ulTDPLimit;
+    ULONG                      ulNearTDPLimit;
+    ULONG                      ulSQRampingThreshold;
+    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
+    USHORT                     usTDPODLimit;
+    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+
+//M3 Arb    //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT                      17
+
+#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+    UCHAR   ucMinTemperature;
+    UCHAR   ucMaxTemperature;
+    UCHAR   ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+      USHORT usClassification;
+      UCHAR  ucMinTemperature;
+      UCHAR  ucMaxTemperature;
+      ULONG  ulCapsAndSettings;
+      UCHAR  ucRequiredPower;
+      USHORT usClassification2;
+      ULONG  ulVCLK;
+      ULONG  ulDCLK;
+      UCHAR  ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usUnused1;
+      USHORT usUnused2;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
+      UCHAR  ucLowEngineClockHigh;
+      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
+      UCHAR  ucHighEngineClockHigh;
+      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
+      UCHAR  ucPadding;                   // For proper alignment and size.
+      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
+      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
+      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could 
+      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+      ULONG  ulFlags; 
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0 
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1 
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2 
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3 
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      USHORT usUnused;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      UCHAR  ucPCIEGen;
+      UCHAR  ucUnused1;
+
+      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+      
+      UCHAR  ucPCIEGen;
+      USHORT usPCIELane;
+} ATOM_PPLIB_CI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+      UCHAR  vddcIndex;         //2-bit vddc index;
+      USHORT tdpLimit;
+      //please initalize to 0
+      USHORT rsv1;
+      //please initialize to 0s
+      ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
+      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      UCHAR ucNumDPMLevels;
+      
+      //a index to the array of nonClockInfos
+      UCHAR nonClockInfoIndex;
+      /**
+      * Driver will read the first ucNumDPMLevels in this array
+      */
+      UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+    //how many states we have 
+    UCHAR ucNumEntries;
+    
+    ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+    //how many clock levels we have
+    UCHAR ucNumEntries;
+    
+    //sizeof(ATOM_PPLIB_CLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+    //how many non-clock levels we have. normally should be same as number of states
+    UCHAR ucNumEntries;
+    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+    USHORT usClockLow;
+    UCHAR  ucClockHigh;
+    USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+    USHORT usVddc;
+    USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+union _ATOM_PPLIB_CAC_Leakage_Record
+{
+    struct
+    {
+        USHORT usVddc;          // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
+        ULONG  ulLeakageValue;  // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
+
+    };
+    struct
+     {
+        USHORT usVddc1;
+        USHORT usVddc2;
+        USHORT usVddc3;
+     };
+};
+
+typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+    USHORT usVoltage;
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+    USHORT usEVClkLow;
+    UCHAR  ucEVClkHigh;
+    USHORT usECClkLow;
+    UCHAR  ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+    UCHAR ucNumEntries;
+    VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+    UCHAR  ucVCEClockInfoIndex;
+    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+      UCHAR revid;
+//    VCEClockInfoArray array;
+//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+//    ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+    USHORT usVClkLow;
+    UCHAR  ucVClkHigh;
+    USHORT usDClkLow;
+    UCHAR  ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+    UCHAR ucNumEntries;
+    UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+      UCHAR revid;
+//    UVDClockInfoArray array;
+//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+}ATOM_PPLIB_UVD_Table;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
+{
+      USHORT usVoltage;
+      USHORT usSAMClockLow;
+      UCHAR  ucSAMClockHigh;
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
+    UCHAR numEntries;
+    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_SAMU_Table
+{
+      UCHAR revid;
+      ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_SAMU_Table;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
+{
+      USHORT usVoltage;
+      USHORT usACPClockLow;
+      UCHAR  ucACPClockHigh;
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
+    UCHAR numEntries;
+    ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_ACP_Table
+{
+      UCHAR revid;
+      ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_ACP_Table;
+
+typedef struct _ATOM_PowerTune_Table{
+    USHORT usTDP;
+    USHORT usConfigurableTDP;
+    USHORT usTDC;
+    USHORT usBatteryPowerLimit;
+    USHORT usSmallPowerLimit;
+    USHORT usLowCACLeakage;
+    USHORT usHighCACLeakage;
+}ATOM_PowerTune_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table
+{
+      UCHAR revid;
+      ATOM_PowerTune_Table power_tune_table;
+}ATOM_PPLIB_POWERTUNE_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
+{
+      UCHAR revid;
+      ATOM_PowerTune_Table power_tune_table;
+      USHORT usMaximumPowerDeliveryLimit;
+      USHORT usReserve[7];
+} ATOM_PPLIB_POWERTUNE_Table_V1;
+
+#define ATOM_PPM_A_A    1
+#define ATOM_PPM_A_I    2
+typedef struct _ATOM_PPLIB_PPM_Table
+{
+      UCHAR  ucRevId;
+      UCHAR  ucPpmDesign;          //A+I or A+A
+      USHORT usCpuCoreNumber;
+      ULONG  ulPlatformTDP;
+      ULONG  ulSmallACPlatformTDP;
+      ULONG  ulPlatformTDC;
+      ULONG  ulSmallACPlatformTDC;
+      ULONG  ulApuTDP;
+      ULONG  ulDGpuTDP;  
+      ULONG  ulDGpuUlvPower;
+      ULONG  ulTjmax;
+} ATOM_PPLIB_PPM_Table;
+
+#pragma pack()
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
new file mode 100644
index 0000000..7d39ed6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -0,0 +1,4148 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r100d.h"
+#include "rs100d.h"
+#include "rv200d.h"
+#include "rv250d.h"
+#include "atom.h"
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "r100_reg_safe.h"
+#include "rn50_reg_safe.h"
+
+/* Firmware Names */
+#define FIRMWARE_R100		"radeon/R100_cp.bin"
+#define FIRMWARE_R200		"radeon/R200_cp.bin"
+#define FIRMWARE_R300		"radeon/R300_cp.bin"
+#define FIRMWARE_R420		"radeon/R420_cp.bin"
+#define FIRMWARE_RS690		"radeon/RS690_cp.bin"
+#define FIRMWARE_RS600		"radeon/RS600_cp.bin"
+#define FIRMWARE_R520		"radeon/R520_cp.bin"
+
+MODULE_FIRMWARE(FIRMWARE_R100);
+MODULE_FIRMWARE(FIRMWARE_R200);
+MODULE_FIRMWARE(FIRMWARE_R300);
+MODULE_FIRMWARE(FIRMWARE_R420);
+MODULE_FIRMWARE(FIRMWARE_RS690);
+MODULE_FIRMWARE(FIRMWARE_RS600);
+MODULE_FIRMWARE(FIRMWARE_R520);
+
+#include "r100_track.h"
+
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * and others in some cases.
+ */
+
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0) {
+		if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+			return true;
+		else
+			return false;
+	} else {
+		if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+			return true;
+		else
+			return false;
+	}
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 vline1, vline2;
+
+	if (crtc == 0) {
+		vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+		vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+	} else {
+		vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+		vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+	}
+	if (vline1 != vline2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * r100_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r1xx-r4xx).
+ */
+void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (crtc == 0) {
+		if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+			return;
+	} else {
+		if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+			return;
+	}
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (r100_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!r100_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!r100_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!r100_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+/**
+ * r100_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (r1xx-r4xx).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ */
+void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	int i;
+
+	/* Lock the graphics update lock */
+	/* update the scanout addresses */
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+}
+
+/**
+ * r100_page_flip_pending - check if page flip is still pending
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to check
+ *
+ * Check if the last pagefilp is still pending (r1xx-r4xx).
+ * Returns the current update pending status.
+ */
+bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
+	/* Return current update_pending status: */
+	return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
+		RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
+}
+
+/**
+ * r100_pm_get_dynpm_state - look up dynpm power state callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the optimal power state based on the
+ * current state of the GPU (r1xx-r5xx).
+ * Used for dynpm only.
+ */
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	switch (rdev->pm.dynpm_planned_action) {
+	case DYNPM_ACTION_MINIMUM:
+		rdev->pm.requested_power_state_index = 0;
+		rdev->pm.dynpm_can_downclock = false;
+		break;
+	case DYNPM_ACTION_DOWNCLOCK:
+		if (rdev->pm.current_power_state_index == 0) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_downclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = 0; i < rdev->pm.num_power_states; i++) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i >= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index - 1;
+		}
+		/* don't use the power state if crtcs are active and no display flag is set */
+		if ((rdev->pm.active_crtc_count > 0) &&
+		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+		     RADEON_PM_MODE_NO_DISPLAY)) {
+			rdev->pm.requested_power_state_index++;
+		}
+		break;
+	case DYNPM_ACTION_UPCLOCK:
+		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_upclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i <= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index + 1;
+		}
+		break;
+	case DYNPM_ACTION_DEFAULT:
+		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+		rdev->pm.dynpm_can_upclock = false;
+		break;
+	case DYNPM_ACTION_NONE:
+	default:
+		DRM_ERROR("Requested mode for not defined action\n");
+		return;
+	}
+	/* only one clock mode per power state */
+	rdev->pm.requested_clock_mode_index = 0;
+
+	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+/**
+ * r100_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (r1xx-r3xx).
+ * Used for profile mode only.
+ */
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+/**
+ * r100_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, pcie lanes, etc.) (r1xx-r4xx).
+ */
+void r100_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	}
+
+	sclk_cntl = RREG32_PLL(SCLK_CNTL);
+	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+		else
+			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+	} else
+		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+			switch (voltage->delay) {
+			case 33:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+				break;
+			case 66:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+				break;
+			case 99:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+				break;
+			case 132:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+				break;
+			}
+		} else
+			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		sclk_cntl &= ~FORCE_HDP;
+	else
+		sclk_cntl |= FORCE_HDP;
+
+	WREG32_PLL(SCLK_CNTL, sclk_cntl);
+	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->pm.set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+/**
+ * r100_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (r1xx-r4xx).
+ */
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+/**
+ * r100_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (r1xx-r4xx).
+ */
+void r100_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+/**
+ * r100_gui_idle - gui idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
+ * Returns true if idle, false if not.
+ */
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+		return false;
+	else
+		return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+/**
+ * r100_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (r1xx-r4xx).
+ * Returns true if connected, false if not connected.
+ */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+/**
+ * r100_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (r1xx-r4xx).
+ */
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r100_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(RADEON_FP_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP_DETECT_INT_POL;
+		WREG32(RADEON_FP_GEN_CNTL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(RADEON_FP2_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP2_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP2_DETECT_INT_POL;
+		WREG32(RADEON_FP2_GEN_CNTL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * r100_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (r1xx-r4xx).
+ * Set the polarity, and enable the hpd interrupts.
+ */
+void r100_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+/**
+ * r100_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (r1xx-r4xx).
+ * Disable the hpd interrupts.
+ */
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * PCI GART
+ */
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* TODO: can we do somethings here ? */
+	/* It seems hw only cache one entry so we should discard this
+	 * entry otherwise if first GPU GART read hit this entry it
+	 * could end up in wrong address. */
+}
+
+int r100_pci_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.ptr) {
+		WARN(1, "R100 PCI GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+	rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
+	rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+	return radeon_gart_table_ram_alloc(rdev);
+}
+
+int r100_pci_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	/* discard memory request outside of configured range */
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32(RADEON_AIC_CNTL, tmp);
+	/* set address range for PCI address translate */
+	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
+	WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
+	/* set PCI GART page-table base address */
+	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
+	WREG32(RADEON_AIC_CNTL, tmp);
+	r100_pci_gart_tlb_flush(rdev);
+	DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void r100_pci_gart_disable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	/* discard memory request outside of configured range */
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+	WREG32(RADEON_AIC_LO_ADDR, 0);
+	WREG32(RADEON_AIC_HI_ADDR, 0);
+}
+
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+	return addr;
+}
+
+void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
+			    uint64_t entry)
+{
+	u32 *gtt = rdev->gart.ptr;
+	gtt[i] = cpu_to_le32(lower_32_bits(entry));
+}
+
+void r100_pci_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	r100_pci_gart_disable(rdev);
+	radeon_gart_table_ram_free(rdev);
+}
+
+int r100_irq_set(struct radeon_device *rdev)
+{
+	uint32_t tmp = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		tmp |= RADEON_SW_INT_ENABLE;
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		tmp |= RADEON_CRTC_VBLANK_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		tmp |= RADEON_CRTC2_VBLANK_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		tmp |= RADEON_FP_DETECT_MASK;
+	}
+	if (rdev->irq.hpd[1]) {
+		tmp |= RADEON_FP2_DETECT_MASK;
+	}
+	WREG32(RADEON_GEN_INT_CNTL, tmp);
+
+	/* read back to post the write */
+	RREG32(RADEON_GEN_INT_CNTL);
+
+	return 0;
+}
+
+void r100_irq_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(R_000040_GEN_INT_CNTL, 0);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	tmp = RREG32(R_000044_GEN_INT_STATUS);
+	WREG32(R_000044_GEN_INT_STATUS, tmp);
+}
+
+static uint32_t r100_irq_ack(struct radeon_device *rdev)
+{
+	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+	uint32_t irq_mask = RADEON_SW_INT_TEST |
+		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+
+	if (irqs) {
+		WREG32(RADEON_GEN_INT_STATUS, irqs);
+	}
+	return irqs & irq_mask;
+}
+
+int r100_irq_process(struct radeon_device *rdev)
+{
+	uint32_t status, msi_rearm;
+	bool queue_hotplug = false;
+
+	status = r100_irq_ack(rdev);
+	if (!status) {
+		return IRQ_NONE;
+	}
+	if (rdev->shutdown) {
+		return IRQ_NONE;
+	}
+	while (status) {
+		/* SW interrupt */
+		if (status & RADEON_SW_INT_TEST) {
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+		}
+		/* Vertical blank interrupts */
+		if (status & RADEON_CRTC_VBLANK_STAT) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[0]))
+				radeon_crtc_handle_vblank(rdev, 0);
+		}
+		if (status & RADEON_CRTC2_VBLANK_STAT) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[1]))
+				radeon_crtc_handle_vblank(rdev, 1);
+		}
+		if (status & RADEON_FP_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (status & RADEON_FP2_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
+		status = r100_irq_ack(rdev);
+	}
+	if (queue_hotplug)
+		schedule_delayed_work(&rdev->hotplug_work, 0);
+	if (rdev->msi_enabled) {
+		switch (rdev->family) {
+		case CHIP_RS400:
+		case CHIP_RS480:
+			msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
+			WREG32(RADEON_AIC_CNTL, msi_rearm);
+			WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+			break;
+		default:
+			WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+			break;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0)
+		return RREG32(RADEON_CRTC_CRNT_FRAME);
+	else
+		return RREG32(RADEON_CRTC2_CRNT_FRAME);
+}
+
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
+void r100_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+	/* We have to make sure that caches are flushed before
+	 * CPU might read something from VRAM. */
+	radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
+	/* Wait until IDLE & CLEAN */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+	r100_ring_hdp_flush(rdev, ring);
+	/* Emit fence sequence & fire IRQ */
+	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *ring,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait)
+{
+	/* Unused on older asics, since we don't have semaphores or multiple rings */
+	BUG();
+	return false;
+}
+
+struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
+				    uint64_t src_offset,
+				    uint64_t dst_offset,
+				    unsigned num_gpu_pages,
+				    struct reservation_object *resv)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	struct radeon_fence *fence;
+	uint32_t cur_pages;
+	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
+	uint32_t pitch;
+	uint32_t stride_pixels;
+	unsigned ndw;
+	int num_loops;
+	int r = 0;
+
+	/* radeon limited to 16k stride */
+	stride_bytes &= 0x3fff;
+	/* radeon pitch is /64 */
+	pitch = stride_bytes / 64;
+	stride_pixels = stride_bytes / 4;
+	num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
+
+	/* Ask for enough room for blit + flush + fence */
+	ndw = 64 + (10 * num_loops);
+	r = radeon_ring_lock(rdev, ring, ndw);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
+		return ERR_PTR(-EINVAL);
+	}
+	while (num_gpu_pages > 0) {
+		cur_pages = num_gpu_pages;
+		if (cur_pages > 8191) {
+			cur_pages = 8191;
+		}
+		num_gpu_pages -= cur_pages;
+
+		/* pages are in Y direction - height
+		   page width in X direction - width */
+		radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+		radeon_ring_write(ring,
+				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+				  RADEON_GMC_SRC_CLIPPING |
+				  RADEON_GMC_DST_CLIPPING |
+				  RADEON_GMC_BRUSH_NONE |
+				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
+				  RADEON_GMC_SRC_DATATYPE_COLOR |
+				  RADEON_ROP3_S |
+				  RADEON_DP_SRC_SOURCE_MEMORY |
+				  RADEON_GMC_CLR_CMP_CNTL_DIS |
+				  RADEON_GMC_WR_MSK_DIS);
+		radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+		radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+		radeon_ring_write(ring, num_gpu_pages);
+		radeon_ring_write(ring, num_gpu_pages);
+		radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_HOST_IDLECLEAN |
+			  RADEON_WAIT_DMA_GUI_IDLE);
+	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return ERR_PTR(r);
+	}
+	radeon_ring_unlock_commit(rdev, ring, false);
+	return fence;
+}
+
+static int r100_cp_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(R_000E40_RBBM_STATUS);
+		if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
+			return 0;
+		}
+		udelay(1);
+	}
+	return -1;
+}
+
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	r = radeon_ring_lock(rdev, ring, 2);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  RADEON_ISYNC_ANY2D_IDLE3D |
+			  RADEON_ISYNC_ANY3D_IDLE2D |
+			  RADEON_ISYNC_WAIT_IDLEGUI |
+			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_unlock_commit(rdev, ring, false);
+}
+
+
+/* Load the microcode for the CP */
+static int r100_cp_init_microcode(struct radeon_device *rdev)
+{
+	const char *fw_name = NULL;
+	int err;
+
+	DRM_DEBUG_KMS("\n");
+
+	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
+	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		DRM_INFO("Loading R100 Microcode\n");
+		fw_name = FIRMWARE_R100;
+	} else if ((rdev->family == CHIP_R200) ||
+		   (rdev->family == CHIP_RV250) ||
+		   (rdev->family == CHIP_RV280) ||
+		   (rdev->family == CHIP_RS300)) {
+		DRM_INFO("Loading R200 Microcode\n");
+		fw_name = FIRMWARE_R200;
+	} else if ((rdev->family == CHIP_R300) ||
+		   (rdev->family == CHIP_R350) ||
+		   (rdev->family == CHIP_RV350) ||
+		   (rdev->family == CHIP_RV380) ||
+		   (rdev->family == CHIP_RS400) ||
+		   (rdev->family == CHIP_RS480)) {
+		DRM_INFO("Loading R300 Microcode\n");
+		fw_name = FIRMWARE_R300;
+	} else if ((rdev->family == CHIP_R420) ||
+		   (rdev->family == CHIP_R423) ||
+		   (rdev->family == CHIP_RV410)) {
+		DRM_INFO("Loading R400 Microcode\n");
+		fw_name = FIRMWARE_R420;
+	} else if ((rdev->family == CHIP_RS690) ||
+		   (rdev->family == CHIP_RS740)) {
+		DRM_INFO("Loading RS690/RS740 Microcode\n");
+		fw_name = FIRMWARE_RS690;
+	} else if (rdev->family == CHIP_RS600) {
+		DRM_INFO("Loading RS600 Microcode\n");
+		fw_name = FIRMWARE_RS600;
+	} else if ((rdev->family == CHIP_RV515) ||
+		   (rdev->family == CHIP_R520) ||
+		   (rdev->family == CHIP_RV530) ||
+		   (rdev->family == CHIP_R580) ||
+		   (rdev->family == CHIP_RV560) ||
+		   (rdev->family == CHIP_RV570)) {
+		DRM_INFO("Loading R500 Microcode\n");
+		fw_name = FIRMWARE_R520;
+	}
+
+	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+	if (err) {
+		pr_err("radeon_cp: Failed to load firmware \"%s\"\n", fw_name);
+	} else if (rdev->me_fw->size % 8) {
+		pr_err("radeon_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+	}
+	return err;
+}
+
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+	else
+		rptr = RREG32(RADEON_CP_RB_RPTR);
+
+	return rptr;
+}
+
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	return RREG32(RADEON_CP_RB_WPTR);
+}
+
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+	(void)RREG32(RADEON_CP_RB_WPTR);
+}
+
+static void r100_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i, size;
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
+	}
+
+	if (rdev->me_fw) {
+		size = rdev->me_fw->size / 4;
+		fw_data = (const __be32 *)&rdev->me_fw->data[0];
+		WREG32(RADEON_CP_ME_RAM_ADDR, 0);
+		for (i = 0; i < size; i += 2) {
+			WREG32(RADEON_CP_ME_RAM_DATAH,
+			       be32_to_cpup(&fw_data[i]));
+			WREG32(RADEON_CP_ME_RAM_DATAL,
+			       be32_to_cpup(&fw_data[i + 1]));
+		}
+	}
+}
+
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	unsigned rb_bufsz;
+	unsigned rb_blksz;
+	unsigned max_fetch;
+	unsigned pre_write_timer;
+	unsigned pre_write_limit;
+	unsigned indirect2_start;
+	unsigned indirect1_start;
+	uint32_t tmp;
+	int r;
+
+	if (r100_debugfs_cp_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for CP !\n");
+	}
+	if (!rdev->me_fw) {
+		r = r100_cp_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Align ring size */
+	rb_bufsz = order_base_2(ring_size / 8);
+	ring_size = (1 << (rb_bufsz + 1)) * 4;
+	r100_cp_load_microcode(rdev);
+	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r) {
+		return r;
+	}
+	/* Each time the cp read 1024 bytes (16 dword/quadword) update
+	 * the rptr copy in system ram */
+	rb_blksz = 9;
+	/* cp will read 128bytes at a time (4 dwords) */
+	max_fetch = 1;
+	ring->align_mask = 16 - 1;
+	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
+	pre_write_timer = 64;
+	/* Force CP_RB_WPTR write if written more than one time before the
+	 * delay expire
+	 */
+	pre_write_limit = 0;
+	/* Setup the cp cache like this (cache size is 96 dwords) :
+	 *	RING		0  to 15
+	 *	INDIRECT1	16 to 79
+	 *	INDIRECT2	80 to 95
+	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
+	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+	 * Idea being that most of the gpu cmd will be through indirect1 buffer
+	 * so it gets the bigger cache.
+	 */
+	indirect2_start = 80;
+	indirect1_start = 16;
+	/* cp setup */
+	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
+	tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
+	       REG_SET(RADEON_MAX_FETCH, max_fetch));
+#ifdef __BIG_ENDIAN
+	tmp |= RADEON_BUF_SWAP_32BIT;
+#endif
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
+
+	/* Set ring address */
+	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+	WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
+	/* Force read & write ptr to 0 */
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(R_00070C_CP_RB_RPTR_ADDR,
+		S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
+	WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
+
+	if (rdev->wb.enabled)
+		WREG32(R_000770_SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RADEON_RB_NO_UPDATE;
+		WREG32(R_000770_SCRATCH_UMSK, 0);
+	}
+
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	udelay(10);
+	/* Set cp mode to bus mastering & enable cp*/
+	WREG32(RADEON_CP_CSQ_MODE,
+	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
+	WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+	WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
+	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+
+	/* at this point everything should be setup correctly to enable master */
+	pci_set_master(rdev->pdev);
+
+	radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
+		return r;
+	}
+	ring->ready = true;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	if (!ring->rptr_save_reg /* not resuming from suspend */
+	    && radeon_ring_supports_scratch_reg(rdev, ring)) {
+		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+		if (r) {
+			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+			ring->rptr_save_reg = 0;
+		}
+	}
+	return 0;
+}
+
+void r100_cp_fini(struct radeon_device *rdev)
+{
+	if (r100_cp_wait_for_idle(rdev)) {
+		DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
+	}
+	/* Disable ring */
+	r100_cp_disable(rdev);
+	radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
+	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	DRM_INFO("radeon: cp finalized\n");
+}
+
+void r100_cp_disable(struct radeon_device *rdev)
+{
+	/* Disable ring */
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	WREG32(RADEON_CP_CSQ_MODE, 0);
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	WREG32(R_000770_SCRATCH_UMSK, 0);
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
+	}
+}
+
+/*
+ * CS functions
+ */
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+			    struct radeon_cs_packet *pkt,
+			    unsigned idx,
+			    unsigned reg)
+{
+	int r;
+	u32 tile_flags = 0;
+	u32 tmp;
+	struct radeon_bo_list *reloc;
+	u32 value;
+
+	r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+	if (r) {
+		DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+			  idx, reg);
+		radeon_cs_dump_packet(p, pkt);
+		return r;
+	}
+
+	value = radeon_get_ib_value(p, idx);
+	tmp = value & 0x003fffff;
+	tmp += (((u32)reloc->gpu_offset) >> 10);
+
+	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+		if (reloc->tiling_flags & RADEON_TILING_MACRO)
+			tile_flags |= RADEON_DST_TILE_MACRO;
+		if (reloc->tiling_flags & RADEON_TILING_MICRO) {
+			if (reg == RADEON_SRC_PITCH_OFFSET) {
+				DRM_ERROR("Cannot src blit from microtiled surface\n");
+				radeon_cs_dump_packet(p, pkt);
+				return -EINVAL;
+			}
+			tile_flags |= RADEON_DST_TILE_MICRO;
+		}
+
+		tmp |= tile_flags;
+		p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
+	} else
+		p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
+	return 0;
+}
+
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int idx)
+{
+	unsigned c, i;
+	struct radeon_bo_list *reloc;
+	struct r100_cs_track *track;
+	int r = 0;
+	volatile uint32_t *ib;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	c = radeon_get_ib_value(p, idx++) & 0x1F;
+	if (c > 16) {
+	    DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+		      pkt->opcode);
+	    radeon_cs_dump_packet(p, pkt);
+	    return -EINVAL;
+	}
+	track->num_arrays = c;
+	for (i = 0; i < (c - 1); i+=2, idx+=3) {
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+				  pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		idx_value = radeon_get_ib_value(p, idx);
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
+
+		track->arrays[i + 0].esize = idx_value >> 8;
+		track->arrays[i + 0].robj = reloc->robj;
+		track->arrays[i + 0].esize &= 0x7F;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+				  pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
+		track->arrays[i + 1].robj = reloc->robj;
+		track->arrays[i + 1].esize = idx_value >> 24;
+		track->arrays[i + 1].esize &= 0x7F;
+	}
+	if (c & 1) {
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+					  pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		idx_value = radeon_get_ib_value(p, idx);
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
+		track->arrays[i + 0].robj = reloc->robj;
+		track->arrays[i + 0].esize = idx_value >> 8;
+		track->arrays[i + 0].esize &= 0x7F;
+	}
+	return r;
+}
+
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+			  struct radeon_cs_packet *pkt,
+			  const unsigned *auth, unsigned n,
+			  radeon_packet0_check_t check)
+{
+	unsigned reg;
+	unsigned i, j, m;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	/* Check that register fall into register range
+	 * determined by the number of entry (n) in the
+	 * safe register bitmap.
+	 */
+	if (pkt->one_reg_wr) {
+		if ((reg >> 7) > n) {
+			return -EINVAL;
+		}
+	} else {
+		if (((reg + (pkt->count << 2)) >> 7) > n) {
+			return -EINVAL;
+		}
+	}
+	for (i = 0; i <= pkt->count; i++, idx++) {
+		j = (reg >> 7);
+		m = 1 << ((reg >> 2) & 31);
+		if (auth[j] & m) {
+			r = check(p, pkt, idx, reg);
+			if (r) {
+				return r;
+			}
+		}
+		if (pkt->one_reg_wr) {
+			if (!(auth[j] & m)) {
+				break;
+			}
+		} else {
+			reg += 4;
+		}
+	}
+	return 0;
+}
+
+/**
+ * r100_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET0 - WAIT_UNTIL +_value
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT UNTIL packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_cs_packet p3reloc, waitreloc;
+	int crtc_id;
+	int r;
+	uint32_t header, h_idx, reg;
+	volatile uint32_t *ib;
+
+	ib = p->ib.ptr;
+
+	/* parse the wait until */
+	r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
+	if (r)
+		return r;
+
+	/* check its a wait until and only 1 count */
+	if (waitreloc.reg != RADEON_WAIT_UNTIL ||
+	    waitreloc.count != 0) {
+		DRM_ERROR("vline wait had illegal wait until segment\n");
+		return -EINVAL;
+	}
+
+	if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
+		DRM_ERROR("vline wait had illegal wait until\n");
+		return -EINVAL;
+	}
+
+	/* jump over the NOP */
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+	if (r)
+		return r;
+
+	h_idx = p->idx - 2;
+	p->idx += waitreloc.count + 2;
+	p->idx += p3reloc.count + 2;
+
+	header = radeon_get_ib_value(p, h_idx);
+	crtc_id = radeon_get_ib_value(p, h_idx + 5);
+	reg = R100_CP_PACKET0_GET_REG(header);
+	crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
+	if (!crtc) {
+		DRM_ERROR("cannot find crtc %d\n", crtc_id);
+		return -ENOENT;
+	}
+	radeon_crtc = to_radeon_crtc(crtc);
+	crtc_id = radeon_crtc->crtc_id;
+
+	if (!crtc->enabled) {
+		/* if the CRTC isn't enabled - we need to nop out the wait until */
+		ib[h_idx + 2] = PACKET2(0);
+		ib[h_idx + 3] = PACKET2(0);
+	} else if (crtc_id == 1) {
+		switch (reg) {
+		case AVIVO_D1MODE_VLINE_START_END:
+			header &= ~R300_CP_PACKET0_REG_MASK;
+			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
+			break;
+		case RADEON_CRTC_GUI_TRIG_VLINE:
+			header &= ~R300_CP_PACKET0_REG_MASK;
+			header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
+			break;
+		default:
+			DRM_ERROR("unknown crtc reloc\n");
+			return -EINVAL;
+		}
+		ib[h_idx] = header;
+		ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
+	}
+
+	return 0;
+}
+
+static int r100_get_vtx_size(uint32_t vtx_fmt)
+{
+	int vtx_size;
+	vtx_size = 2;
+	/* ordered according to bits in spec */
+	if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
+		vtx_size++;
+	/* blend weight */
+	if (vtx_fmt & (0x7 << 15))
+		vtx_size += (vtx_fmt >> 15) & 0x7;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
+		vtx_size++;
+	return vtx_size;
+}
+
+static int r100_packet0_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt,
+			      unsigned idx, unsigned reg)
+{
+	struct radeon_bo_list *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp;
+	int r;
+	int i, face;
+	u32 tile_flags = 0;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (reg) {
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+		/* FIXME: only allow PACKET3 blit? easier to check for out of
+		 * range access */
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case RADEON_RB3D_DEPTHOFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case RADEON_RB3D_COLOROFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[0].robj = reloc->robj;
+		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case RADEON_PP_TXOFFSET_0:
+	case RADEON_PP_TXOFFSET_1:
+	case RADEON_PP_TXOFFSET_2:
+		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_TXO_MACRO_TILE;
+			if (reloc->tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_TXO_MICRO_TILE_X2;
+
+			tmp = idx_value & ~(0x7 << 2);
+			tmp |= tile_flags;
+			ib[idx] = tmp + ((u32)reloc->gpu_offset);
+		} else
+			ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T0_0:
+	case RADEON_PP_CUBIC_OFFSET_T0_1:
+	case RADEON_PP_CUBIC_OFFSET_T0_2:
+	case RADEON_PP_CUBIC_OFFSET_T0_3:
+	case RADEON_PP_CUBIC_OFFSET_T0_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[0].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		track->textures[0].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T1_0:
+	case RADEON_PP_CUBIC_OFFSET_T1_1:
+	case RADEON_PP_CUBIC_OFFSET_T1_2:
+	case RADEON_PP_CUBIC_OFFSET_T1_3:
+	case RADEON_PP_CUBIC_OFFSET_T1_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[1].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		track->textures[1].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T2_0:
+	case RADEON_PP_CUBIC_OFFSET_T2_1:
+	case RADEON_PP_CUBIC_OFFSET_T2_2:
+	case RADEON_PP_CUBIC_OFFSET_T2_3:
+	case RADEON_PP_CUBIC_OFFSET_T2_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[2].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		track->textures[2].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_RE_WIDTH_HEIGHT:
+		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_COLORPITCH:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_COLOR_TILE_ENABLE;
+			if (reloc->tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		} else
+			ib[idx] = idx_value;
+
+		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
+		break;
+	case RADEON_RB3D_DEPTHPITCH:
+		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_CNTL:
+		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+		case 7:
+		case 8:
+		case 9:
+		case 11:
+		case 12:
+			track->cb[0].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 15:
+			track->cb[0].cpp = 2;
+			break;
+		case 6:
+			track->cb[0].cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+			return -EINVAL;
+		}
+		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZSTENCILCNTL:
+		switch (idx_value & 0xf) {
+		case 0:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+		case 3:
+		case 4:
+		case 5:
+		case 9:
+		case 11:
+			track->zb.cpp = 4;
+			break;
+		default:
+			break;
+		}
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZPASS_ADDR:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case RADEON_PP_CNTL:
+		{
+			uint32_t temp = idx_value >> 4;
+			for (i = 0; i < track->num_texture; i++)
+				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
+		}
+		break;
+	case RADEON_SE_VF_CNTL:
+		track->vap_vf_cntl = idx_value;
+		break;
+	case RADEON_SE_VTX_FMT:
+		track->vtx_size = r100_get_vtx_size(idx_value);
+		break;
+	case RADEON_PP_TEX_SIZE_0:
+	case RADEON_PP_TEX_SIZE_1:
+	case RADEON_PP_TEX_SIZE_2:
+		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
+		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TEX_PITCH_0:
+	case RADEON_PP_TEX_PITCH_1:
+	case RADEON_PP_TEX_PITCH_2:
+		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
+		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TXFILTER_0:
+	case RADEON_PP_TXFILTER_1:
+	case RADEON_PP_TXFILTER_2:
+		i = (reg - RADEON_PP_TXFILTER_0) / 24;
+		track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
+						 >> RADEON_MAX_MIP_LEVEL_SHIFT);
+		tmp = (idx_value >> 23) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_w = false;
+		tmp = (idx_value >> 27) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TXFORMAT_0:
+	case RADEON_PP_TXFORMAT_1:
+	case RADEON_PP_TXFORMAT_2:
+		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
+		if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
+			track->textures[i].use_pitch = 1;
+		} else {
+			track->textures[i].use_pitch = 0;
+			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+		}
+		if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+			track->textures[i].tex_coord_type = 2;
+		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+		case RADEON_TXFORMAT_I8:
+		case RADEON_TXFORMAT_RGB332:
+		case RADEON_TXFORMAT_Y8:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_AI88:
+		case RADEON_TXFORMAT_ARGB1555:
+		case RADEON_TXFORMAT_RGB565:
+		case RADEON_TXFORMAT_ARGB4444:
+		case RADEON_TXFORMAT_VYUY422:
+		case RADEON_TXFORMAT_YVYU422:
+		case RADEON_TXFORMAT_SHADOW16:
+		case RADEON_TXFORMAT_LDUDV655:
+		case RADEON_TXFORMAT_DUDV88:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_ARGB8888:
+		case RADEON_TXFORMAT_RGBA8888:
+		case RADEON_TXFORMAT_SHADOW32:
+		case RADEON_TXFORMAT_LDUDUV8888:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case RADEON_TXFORMAT_DXT23:
+		case RADEON_TXFORMAT_DXT45:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+			break;
+		}
+		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_FACES_0:
+	case RADEON_PP_CUBIC_FACES_1:
+	case RADEON_PP_CUBIC_FACES_2:
+		tmp = idx_value;
+		i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
+		for (face = 0; face < 4; face++) {
+			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+		}
+		track->tex_dirty = true;
+		break;
+	default:
+		pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+					 struct radeon_cs_packet *pkt,
+					 struct radeon_bo *robj)
+{
+	unsigned idx;
+	u32 value;
+	idx = pkt->idx + 1;
+	value = radeon_get_ib_value(p, idx + 2);
+	if ((value + 1) > radeon_bo_size(robj)) {
+		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
+			  "(need %u have %lu) !\n",
+			  value + 1,
+			  radeon_bo_size(robj));
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r100_packet3_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt)
+{
+	struct radeon_bo_list *reloc;
+	struct r100_cs_track *track;
+	unsigned idx;
+	volatile uint32_t *ib;
+	int r;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	track = (struct r100_cs_track *)p->track;
+	switch (pkt->opcode) {
+	case PACKET3_3D_LOAD_VBPNTR:
+		r = r100_packet3_load_vbpntr(p, pkt, idx);
+		if (r)
+			return r;
+		break;
+	case PACKET3_INDX_BUFFER:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
+		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+		if (r) {
+			return r;
+		}
+		break;
+	case 0x23:
+		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
+		track->num_arrays = 1;
+		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
+
+		track->arrays[0].robj = reloc->robj;
+		track->arrays[0].esize = track->vtx_size;
+
+		track->max_indx = radeon_get_ib_value(p, idx+1);
+
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+	case PACKET3_3D_DRAW_IMMD:
+		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using in-packet vertex data */
+	case PACKET3_3D_DRAW_IMMD_2:
+		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		track->immd_dwords = pkt->count;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using in-packet vertex data */
+	case PACKET3_3D_DRAW_VBUF_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing of vertex buffers setup elsewhere */
+	case PACKET3_3D_DRAW_INDX_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using indices to vertex buffer */
+	case PACKET3_3D_DRAW_VBUF:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing of vertex buffers setup elsewhere */
+	case PACKET3_3D_DRAW_INDX:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using indices to vertex buffer */
+	case PACKET3_3D_CLEAR_HIZ:
+	case PACKET3_3D_CLEAR_ZMASK:
+		if (p->rdev->hyperz_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r100_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r100_cs_track *track;
+	int r;
+
+	track = kzalloc(sizeof(*track), GFP_KERNEL);
+	if (!track)
+		return -ENOMEM;
+	r100_cs_track_clear(p->rdev, track);
+	p->track = track;
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			if (p->rdev->family >= CHIP_R200)
+				r = r100_cs_parse_packet0(p, &pkt,
+					p->rdev->config.r100.reg_safe_bm,
+					p->rdev->config.r100.reg_safe_bm_size,
+					&r200_packet0_check);
+			else
+				r = r100_cs_parse_packet0(p, &pkt,
+					p->rdev->config.r100.reg_safe_bm,
+					p->rdev->config.r100.reg_safe_bm_size,
+					&r100_packet0_check);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = r100_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n",
+				  pkt.type);
+			return -EINVAL;
+		}
+		if (r)
+			return r;
+	} while (p->idx < p->chunk_ib->length_dw);
+	return 0;
+}
+
+static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+{
+	DRM_ERROR("pitch                      %d\n", t->pitch);
+	DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
+	DRM_ERROR("width                      %d\n", t->width);
+	DRM_ERROR("width_11                   %d\n", t->width_11);
+	DRM_ERROR("height                     %d\n", t->height);
+	DRM_ERROR("height_11                  %d\n", t->height_11);
+	DRM_ERROR("num levels                 %d\n", t->num_levels);
+	DRM_ERROR("depth                      %d\n", t->txdepth);
+	DRM_ERROR("bpp                        %d\n", t->cpp);
+	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
+	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
+	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+	DRM_ERROR("compress format            %d\n", t->compress_format);
+}
+
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+	int block_width, block_height, block_bytes;
+	int wblocks, hblocks;
+	int min_wblocks;
+	int sz;
+
+	block_width = 4;
+	block_height = 4;
+
+	switch (compress_format) {
+	case R100_TRACK_COMP_DXT1:
+		block_bytes = 8;
+		min_wblocks = 4;
+		break;
+	default:
+	case R100_TRACK_COMP_DXT35:
+		block_bytes = 16;
+		min_wblocks = 2;
+		break;
+	}
+
+	hblocks = (h + block_height - 1) / block_height;
+	wblocks = (w + block_width - 1) / block_width;
+	if (wblocks < min_wblocks)
+		wblocks = min_wblocks;
+	sz = wblocks * hblocks * block_bytes;
+	return sz;
+}
+
+static int r100_cs_track_cube(struct radeon_device *rdev,
+			      struct r100_cs_track *track, unsigned idx)
+{
+	unsigned face, w, h;
+	struct radeon_bo *cube_robj;
+	unsigned long size;
+	unsigned compress_format = track->textures[idx].compress_format;
+
+	for (face = 0; face < 5; face++) {
+		cube_robj = track->textures[idx].cube_info[face].robj;
+		w = track->textures[idx].cube_info[face].width;
+		h = track->textures[idx].cube_info[face].height;
+
+		if (compress_format) {
+			size = r100_track_compress_size(compress_format, w, h);
+		} else
+			size = w * h;
+		size *= track->textures[idx].cpp;
+
+		size += track->textures[idx].cube_info[face].offset;
+
+		if (size > radeon_bo_size(cube_robj)) {
+			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+				  size, radeon_bo_size(cube_robj));
+			r100_cs_track_texture_print(&track->textures[idx]);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int r100_cs_track_texture_check(struct radeon_device *rdev,
+				       struct r100_cs_track *track)
+{
+	struct radeon_bo *robj;
+	unsigned long size;
+	unsigned u, i, w, h, d;
+	int ret;
+
+	for (u = 0; u < track->num_texture; u++) {
+		if (!track->textures[u].enabled)
+			continue;
+		if (track->textures[u].lookup_disable)
+			continue;
+		robj = track->textures[u].robj;
+		if (robj == NULL) {
+			DRM_ERROR("No texture bound to unit %u\n", u);
+			return -EINVAL;
+		}
+		size = 0;
+		for (i = 0; i <= track->textures[u].num_levels; i++) {
+			if (track->textures[u].use_pitch) {
+				if (rdev->family < CHIP_R300)
+					w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
+				else
+					w = track->textures[u].pitch / (1 << i);
+			} else {
+				w = track->textures[u].width;
+				if (rdev->family >= CHIP_RV515)
+					w |= track->textures[u].width_11;
+				w = w / (1 << i);
+				if (track->textures[u].roundup_w)
+					w = roundup_pow_of_two(w);
+			}
+			h = track->textures[u].height;
+			if (rdev->family >= CHIP_RV515)
+				h |= track->textures[u].height_11;
+			h = h / (1 << i);
+			if (track->textures[u].roundup_h)
+				h = roundup_pow_of_two(h);
+			if (track->textures[u].tex_coord_type == 1) {
+				d = (1 << track->textures[u].txdepth) / (1 << i);
+				if (!d)
+					d = 1;
+			} else {
+				d = 1;
+			}
+			if (track->textures[u].compress_format) {
+
+				size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
+				/* compressed textures are block based */
+			} else
+				size += w * h * d;
+		}
+		size *= track->textures[u].cpp;
+
+		switch (track->textures[u].tex_coord_type) {
+		case 0:
+		case 1:
+			break;
+		case 2:
+			if (track->separate_cube) {
+				ret = r100_cs_track_cube(rdev, track, u);
+				if (ret)
+					return ret;
+			} else
+				size *= 6;
+			break;
+		default:
+			DRM_ERROR("Invalid texture coordinate type %u for unit "
+				  "%u\n", track->textures[u].tex_coord_type, u);
+			return -EINVAL;
+		}
+		if (size > radeon_bo_size(robj)) {
+			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
+				  "%lu\n", u, size, radeon_bo_size(robj));
+			r100_cs_track_texture_print(&track->textures[u]);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+	unsigned i;
+	unsigned long size;
+	unsigned prim_walk;
+	unsigned nverts;
+	unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
+
+	if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
+	    !track->blend_read_enable)
+		num_cb = 0;
+
+	for (i = 0; i < num_cb; i++) {
+		if (track->cb[i].robj == NULL) {
+			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+			return -EINVAL;
+		}
+		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+		size += track->cb[i].offset;
+		if (size > radeon_bo_size(track->cb[i].robj)) {
+			DRM_ERROR("[drm] Buffer too small for color buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->cb[i].robj));
+			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+				  i, track->cb[i].pitch, track->cb[i].cpp,
+				  track->cb[i].offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->cb_dirty = false;
+
+	if (track->zb_dirty && track->z_enabled) {
+		if (track->zb.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for z buffer !\n");
+			return -EINVAL;
+		}
+		size = track->zb.pitch * track->zb.cpp * track->maxy;
+		size += track->zb.offset;
+		if (size > radeon_bo_size(track->zb.robj)) {
+			DRM_ERROR("[drm] Buffer too small for z buffer "
+				  "(need %lu have %lu) !\n", size,
+				  radeon_bo_size(track->zb.robj));
+			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
+				  track->zb.pitch, track->zb.cpp,
+				  track->zb.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->zb_dirty = false;
+
+	if (track->aa_dirty && track->aaresolve) {
+		if (track->aa.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+			return -EINVAL;
+		}
+		/* I believe the format comes from colorbuffer0. */
+		size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+		size += track->aa.offset;
+		if (size > radeon_bo_size(track->aa.robj)) {
+			DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->aa.robj));
+			DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+				  i, track->aa.pitch, track->cb[0].cpp,
+				  track->aa.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->aa_dirty = false;
+
+	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
+	if (track->vap_vf_cntl & (1 << 14)) {
+		nverts = track->vap_alt_nverts;
+	} else {
+		nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+	}
+	switch (prim_walk) {
+	case 1:
+		for (i = 0; i < track->num_arrays; i++) {
+			size = track->arrays[i].esize * track->max_indx * 4;
+			if (track->arrays[i].robj == NULL) {
+				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+					  "bound\n", prim_walk, i);
+				return -EINVAL;
+			}
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
+				DRM_ERROR("Max indices %u\n", track->max_indx);
+				return -EINVAL;
+			}
+		}
+		break;
+	case 2:
+		for (i = 0; i < track->num_arrays; i++) {
+			size = track->arrays[i].esize * (nverts - 1) * 4;
+			if (track->arrays[i].robj == NULL) {
+				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+					  "bound\n", prim_walk, i);
+				return -EINVAL;
+			}
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
+				return -EINVAL;
+			}
+		}
+		break;
+	case 3:
+		size = track->vtx_size * nverts;
+		if (size != track->immd_dwords) {
+			DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
+				  track->immd_dwords, size);
+			DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+				  nverts, track->vtx_size);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+			  prim_walk);
+		return -EINVAL;
+	}
+
+	if (track->tex_dirty) {
+		track->tex_dirty = false;
+		return r100_cs_track_texture_check(rdev, track);
+	}
+	return 0;
+}
+
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+	unsigned i, face;
+
+	track->cb_dirty = true;
+	track->zb_dirty = true;
+	track->tex_dirty = true;
+	track->aa_dirty = true;
+
+	if (rdev->family < CHIP_R300) {
+		track->num_cb = 1;
+		if (rdev->family <= CHIP_RS200)
+			track->num_texture = 3;
+		else
+			track->num_texture = 6;
+		track->maxy = 2048;
+		track->separate_cube = 1;
+	} else {
+		track->num_cb = 4;
+		track->num_texture = 16;
+		track->maxy = 4096;
+		track->separate_cube = 0;
+		track->aaresolve = false;
+		track->aa.robj = NULL;
+	}
+
+	for (i = 0; i < track->num_cb; i++) {
+		track->cb[i].robj = NULL;
+		track->cb[i].pitch = 8192;
+		track->cb[i].cpp = 16;
+		track->cb[i].offset = 0;
+	}
+	track->z_enabled = true;
+	track->zb.robj = NULL;
+	track->zb.pitch = 8192;
+	track->zb.cpp = 4;
+	track->zb.offset = 0;
+	track->vtx_size = 0x7F;
+	track->immd_dwords = 0xFFFFFFFFUL;
+	track->num_arrays = 11;
+	track->max_indx = 0x00FFFFFFUL;
+	for (i = 0; i < track->num_arrays; i++) {
+		track->arrays[i].robj = NULL;
+		track->arrays[i].esize = 0x7F;
+	}
+	for (i = 0; i < track->num_texture; i++) {
+		track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+		track->textures[i].pitch = 16536;
+		track->textures[i].width = 16536;
+		track->textures[i].height = 16536;
+		track->textures[i].width_11 = 1 << 11;
+		track->textures[i].height_11 = 1 << 11;
+		track->textures[i].num_levels = 12;
+		if (rdev->family <= CHIP_RS200) {
+			track->textures[i].tex_coord_type = 0;
+			track->textures[i].txdepth = 0;
+		} else {
+			track->textures[i].txdepth = 16;
+			track->textures[i].tex_coord_type = 1;
+		}
+		track->textures[i].cpp = 64;
+		track->textures[i].robj = NULL;
+		/* CS IB emission code makes sure texture unit are disabled */
+		track->textures[i].enabled = false;
+		track->textures[i].lookup_disable = false;
+		track->textures[i].roundup_w = true;
+		track->textures[i].roundup_h = true;
+		if (track->separate_cube)
+			for (face = 0; face < 5; face++) {
+				track->textures[i].cube_info[face].robj = NULL;
+				track->textures[i].cube_info[face].width = 16536;
+				track->textures[i].cube_info[face].height = 16536;
+				track->textures[i].cube_info[face].offset = 0;
+			}
+	}
+}
+
+/*
+ * Global GPU functions
+ */
+static void r100_errata(struct radeon_device *rdev)
+{
+	rdev->pll_errata = 0;
+
+	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
+		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
+	}
+
+	if (rdev->family == CHIP_RV100 ||
+	    rdev->family == CHIP_RS100 ||
+	    rdev->family == CHIP_RS200) {
+		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
+	}
+}
+
+static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
+		if (tmp >= n) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+int r100_gui_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
+		pr_warn("radeon: wait for empty RBBM fifo failed! Bad things might happen.\n");
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(RADEON_RBBM_STATUS);
+		if (!(tmp & RADEON_RBBM_ACTIVE)) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+int r100_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 rbbm_status;
+
+	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	/* Enable bus mastering */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+}
+
+void r100_bm_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* disable bus mastering */
+	tmp = RREG32(R_000030_BUS_CNTL);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+	mdelay(1);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+	mdelay(1);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	mdelay(1);
+	pci_clear_master(rdev->pdev);
+	mdelay(1);
+}
+
+int r100_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	struct r100_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+					S_0000F0_SOFT_RESET_RE(1) |
+					S_0000F0_SOFT_RESET_PP(1) |
+					S_0000F0_SOFT_RESET_RB(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset CP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	r100_enable_bm(rdev);
+	/* Check if GPU is idle */
+	if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	r100_mc_resume(rdev, &save);
+	return ret;
+}
+
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	bool force_dac2 = false;
+	u32 tmp;
+
+	/* set these so they don't interfere with anything */
+	WREG32(RADEON_OV0_SCALE_CNTL, 0);
+	WREG32(RADEON_SUBPIC_CNTL, 0);
+	WREG32(RADEON_VIPH_CONTROL, 0);
+	WREG32(RADEON_I2C_CNTL_1, 0);
+	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+
+	/* always set up dac2 on rn50 and some rv100 as lots
+	 * of servers seem to wire it up to a VGA port but
+	 * don't report it in the bios connector
+	 * table.
+	 */
+	switch (dev->pdev->device) {
+		/* RN50 */
+	case 0x515e:
+	case 0x5969:
+		force_dac2 = true;
+		break;
+		/* RV100*/
+	case 0x5159:
+	case 0x515a:
+		/* DELL triple head servers */
+		if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+		    ((dev->pdev->subsystem_device == 0x016c) ||
+		     (dev->pdev->subsystem_device == 0x016d) ||
+		     (dev->pdev->subsystem_device == 0x016e) ||
+		     (dev->pdev->subsystem_device == 0x016f) ||
+		     (dev->pdev->subsystem_device == 0x0170) ||
+		     (dev->pdev->subsystem_device == 0x017d) ||
+		     (dev->pdev->subsystem_device == 0x017e) ||
+		     (dev->pdev->subsystem_device == 0x0183) ||
+		     (dev->pdev->subsystem_device == 0x018a) ||
+		     (dev->pdev->subsystem_device == 0x019a)))
+			force_dac2 = true;
+		break;
+	}
+
+	if (force_dac2) {
+		u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+		u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+		u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+
+		/* For CRT on DAC2, don't turn it on if BIOS didn't
+		   enable it, even it's detected.
+		*/
+
+		/* force it to crtc0 */
+		dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+		dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+		disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+
+		/* set up the TV DAC */
+		tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
+				 RADEON_TV_DAC_STD_MASK |
+				 RADEON_TV_DAC_RDACPD |
+				 RADEON_TV_DAC_GDACPD |
+				 RADEON_TV_DAC_BDACPD |
+				 RADEON_TV_DAC_BGADJ_MASK |
+				 RADEON_TV_DAC_DACADJ_MASK);
+		tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+				RADEON_TV_DAC_NHOLD |
+				RADEON_TV_DAC_STD_PS2 |
+				(0x58 << 16));
+
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	}
+
+	/* switch PM block to ACPI mode */
+	tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+	tmp &= ~RADEON_PM_MODE_SEL;
+	WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+
+}
+
+/*
+ * VRAM info
+ */
+static void r100_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_is_ddr = false;
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->mc.vram_is_ddr = true;
+	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
+		rdev->mc.vram_is_ddr = true;
+	if ((rdev->family == CHIP_RV100) ||
+	    (rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		tmp = RREG32(RADEON_MEM_CNTL);
+		if (tmp & RV100_HALF_MODE) {
+			rdev->mc.vram_width = 32;
+		} else {
+			rdev->mc.vram_width = 64;
+		}
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			rdev->mc.vram_width /= 4;
+			rdev->mc.vram_is_ddr = true;
+		}
+	} else if (rdev->family <= CHIP_RV280) {
+		tmp = RREG32(RADEON_MEM_CNTL);
+		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
+			rdev->mc.vram_width = 128;
+		} else {
+			rdev->mc.vram_width = 64;
+		}
+	} else {
+		/* newer IGPs */
+		rdev->mc.vram_width = 128;
+	}
+}
+
+static u32 r100_get_accessible_vram(struct radeon_device *rdev)
+{
+	u32 aper_size;
+	u8 byte;
+
+	aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+
+	/* Set HDP_APER_CNTL only on cards that are known not to be broken,
+	 * that is has the 2nd generation multifunction PCI interface
+	 */
+	if (rdev->family == CHIP_RV280 ||
+	    rdev->family >= CHIP_RV350) {
+		WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
+		       ~RADEON_HDP_APER_CNTL);
+		DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
+		return aper_size * 2;
+	}
+
+	/* Older cards have all sorts of funny issues to deal with. First
+	 * check if it's a multifunction card by reading the PCI config
+	 * header type... Limit those to one aperture size
+	 */
+	pci_read_config_byte(rdev->pdev, 0xe, &byte);
+	if (byte & 0x80) {
+		DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
+		DRM_INFO("Limiting VRAM to one aperture\n");
+		return aper_size;
+	}
+
+	/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
+	 * have set it up. We don't write this as it's broken on some ASICs but
+	 * we expect the BIOS to have done the right thing (might be too optimistic...)
+	 */
+	if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
+		return aper_size * 2;
+	return aper_size;
+}
+
+void r100_vram_init_sizes(struct radeon_device *rdev)
+{
+	u64 config_aper_size;
+
+	/* work out accessible VRAM */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
+	/* FIXME we don't use the second aperture yet when we could use it */
+	if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
+		rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+	if (rdev->flags & RADEON_IS_IGP) {
+		uint32_t tom;
+		/* read NB_TOM to get the amount of ram stolen for the GPU */
+		tom = RREG32(RADEON_NB_TOM);
+		rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	} else {
+		rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+		/* Some production boards of m6 will report 0
+		 * if it's 8 MB
+		 */
+		if (rdev->mc.real_vram_size == 0) {
+			rdev->mc.real_vram_size = 8192 * 1024;
+			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+		}
+		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
+		 * Novell bug 204882 + along with lots of ubuntu ones
+		 */
+		if (rdev->mc.aper_size > config_aper_size)
+			config_aper_size = rdev->mc.aper_size;
+
+		if (config_aper_size > rdev->mc.real_vram_size)
+			rdev->mc.mc_vram_size = config_aper_size;
+		else
+			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	}
+}
+
+void r100_vga_set_state(struct radeon_device *rdev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(RADEON_CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~RADEON_CFG_VGA_RAM_EN;
+		temp |= RADEON_CFG_VGA_IO_DIS;
+	} else {
+		temp &= ~RADEON_CFG_VGA_IO_DIS;
+	}
+	WREG32(RADEON_CONFIG_CNTL, temp);
+}
+
+static void r100_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	r100_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	base = rdev->mc.aper_base;
+	if (rdev->flags & RADEON_IS_IGP)
+		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+void r100_pll_errata_after_index(struct radeon_device *rdev)
+{
+	if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
+		(void)RREG32(RADEON_CLOCK_CNTL_DATA);
+		(void)RREG32(RADEON_CRTC_GEN_CNTL);
+	}
+}
+
+static void r100_pll_errata_after_data(struct radeon_device *rdev)
+{
+	/* This workarounds is necessary on RV100, RS100 and RS200 chips
+	 * or the chip could hang on a subsequent access
+	 */
+	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
+		mdelay(5);
+	}
+
+	/* This function is required to workaround a hardware bug in some (all?)
+	 * revisions of the R300.  This workaround should be called after every
+	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
+	 * may not be correct.
+	 */
+	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
+		uint32_t save, tmp;
+
+		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
+		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
+		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
+		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
+		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
+	}
+}
+
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	uint32_t data;
+
+	spin_lock_irqsave(&rdev->pll_idx_lock, flags);
+	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
+	r100_pll_errata_after_index(rdev);
+	data = RREG32(RADEON_CLOCK_CNTL_DATA);
+	r100_pll_errata_after_data(rdev);
+	spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
+	return data;
+}
+
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pll_idx_lock, flags);
+	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
+	r100_pll_errata_after_index(rdev);
+	WREG32(RADEON_CLOCK_CNTL_DATA, v);
+	r100_pll_errata_after_data(rdev);
+	spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
+}
+
+static void r100_set_safe_registers(struct radeon_device *rdev)
+{
+	if (ASIC_IS_RN50(rdev)) {
+		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
+		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
+	} else if (rdev->family < CHIP_R200) {
+		rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
+		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
+	} else {
+		r200_set_safe_registers(rdev);
+	}
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t reg, value;
+	unsigned i;
+
+	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
+	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	for (i = 0; i < 64; i++) {
+		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
+		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
+		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
+		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
+		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
+	}
+	return 0;
+}
+
+static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t rdp, wdp;
+	unsigned count, i, j;
+
+	radeon_ring_free_size(rdev, ring);
+	rdp = RREG32(RADEON_CP_RB_RPTR);
+	wdp = RREG32(RADEON_CP_RB_WPTR);
+	count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+	seq_printf(m, "%u dwords in ring\n", count);
+	if (ring->ready) {
+		for (j = 0; j <= count; j++) {
+			i = (rdp + j) & ring->ptr_mask;
+			seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+		}
+	}
+	return 0;
+}
+
+
+static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t csq_stat, csq2_stat, tmp;
+	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
+	unsigned i;
+
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
+	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
+	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
+	r_rptr = (csq_stat >> 0) & 0x3ff;
+	r_wptr = (csq_stat >> 10) & 0x3ff;
+	ib1_rptr = (csq_stat >> 20) & 0x3ff;
+	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
+	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
+	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
+	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
+	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
+	seq_printf(m, "Ring rptr %u\n", r_rptr);
+	seq_printf(m, "Ring wptr %u\n", r_wptr);
+	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
+	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
+	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
+	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
+	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
+	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
+	seq_printf(m, "Ring fifo:\n");
+	for (i = 0; i < 256; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
+	}
+	seq_printf(m, "Indirect1 fifo:\n");
+	for (i = 256; i <= 512; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
+	}
+	seq_printf(m, "Indirect2 fifo:\n");
+	for (i = 640; i < ib1_wptr; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
+	}
+	return 0;
+}
+
+static int r100_debugfs_mc_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
+	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_MC_FB_LOCATION);
+	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_MC_AGP_LOCATION);
+	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AGP_BASE);
+	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(0x01D0);
+	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AIC_LO_ADDR);
+	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AIC_HI_ADDR);
+	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
+	tmp = RREG32(0x01E4);
+	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list r100_debugfs_rbbm_list[] = {
+	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_cp_list[] = {
+	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
+	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_mc_info_list[] = {
+	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r100_debugfs_rbbm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
+#else
+	return 0;
+#endif
+}
+
+int r100_debugfs_cp_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
+#else
+	return 0;
+#endif
+}
+
+int r100_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size)
+{
+	int surf_index = reg * 16;
+	int flags = 0;
+
+	if (rdev->family <= CHIP_RS200) {
+		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+			flags |= RADEON_SURF_TILE_COLOR_BOTH;
+		if (tiling_flags & RADEON_TILING_MACRO)
+			flags |= RADEON_SURF_TILE_COLOR_MACRO;
+		/* setting pitch to 0 disables tiling */
+		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+				== 0)
+			pitch = 0;
+	} else if (rdev->family <= CHIP_RV280) {
+		if (tiling_flags & (RADEON_TILING_MACRO))
+			flags |= R200_SURF_TILE_COLOR_MACRO;
+		if (tiling_flags & RADEON_TILING_MICRO)
+			flags |= R200_SURF_TILE_COLOR_MICRO;
+	} else {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			flags |= R300_SURF_TILE_MACRO;
+		if (tiling_flags & RADEON_TILING_MICRO)
+			flags |= R300_SURF_TILE_MICRO;
+	}
+
+	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
+		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
+	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
+		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
+
+	/* r100/r200 divide by 16 */
+	if (rdev->family < CHIP_R300)
+		flags |= pitch / 16;
+	else
+		flags |= pitch / 8;
+
+
+	DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
+	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
+	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
+	WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
+	return 0;
+}
+
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+	int surf_index = reg * 16;
+	WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
+}
+
+void r100_bandwidth_update(struct radeon_device *rdev)
+{
+	fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
+	fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
+	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
+	fixed20_12 crit_point_ff = {0};
+	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
+	fixed20_12 memtcas_ff[8] = {
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init(0),
+	};
+	fixed20_12 memtcas_rs480_ff[8] = {
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init_half(3),
+	};
+	fixed20_12 memtcas2_ff[8] = {
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
+	};
+	fixed20_12 memtrbs[8] = {
+		dfixed_init(1),
+		dfixed_init_half(1),
+		dfixed_init(2),
+		dfixed_init_half(2),
+		dfixed_init(3),
+		dfixed_init_half(3),
+		dfixed_init(4),
+		dfixed_init_half(4)
+	};
+	fixed20_12 memtrbs_r4xx[8] = {
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
+		dfixed_init(8),
+		dfixed_init(9),
+		dfixed_init(10),
+		dfixed_init(11)
+	};
+	fixed20_12 min_mem_eff;
+	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
+	fixed20_12 cur_latency_mclk, cur_latency_sclk;
+	fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0},
+		disp_drain_rate2, read_return_rate;
+	fixed20_12 time_disp1_drop_priority;
+	int c;
+	int cur_size = 16;       /* in octawords */
+	int critical_point = 0, critical_point2;
+/* 	uint32_t read_return_rate, time_disp1_drop_priority; */
+	int stop_req, max_stop_req;
+	struct drm_display_mode *mode1 = NULL;
+	struct drm_display_mode *mode2 = NULL;
+	uint32_t pixel_bytes1 = 0;
+	uint32_t pixel_bytes2 = 0;
+
+	/* Guess line buffer size to be 8192 pixels */
+	u32 lb_size = 8192;
+
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled) {
+		const struct drm_framebuffer *fb =
+			rdev->mode_info.crtcs[0]->base.primary->fb;
+
+		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
+		pixel_bytes1 = fb->format->cpp[0];
+	}
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		if (rdev->mode_info.crtcs[1]->base.enabled) {
+			const struct drm_framebuffer *fb =
+				rdev->mode_info.crtcs[1]->base.primary->fb;
+
+			mode2 = &rdev->mode_info.crtcs[1]->base.mode;
+			pixel_bytes2 = fb->format->cpp[0];
+		}
+	}
+
+	min_mem_eff.full = dfixed_const_8(0);
+	/* get modes */
+	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
+		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
+		mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
+		mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
+		/* check crtc enables */
+		if (mode2)
+			mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+		if (mode1)
+			mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+		WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
+	}
+
+	/*
+	 * determine is there is enough bw for current mode
+	 */
+	sclk_ff = rdev->pm.sclk;
+	mclk_ff = rdev->pm.mclk;
+
+	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+	temp_ff.full = dfixed_const(temp);
+	mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
+
+	pix_clk.full = 0;
+	pix_clk2.full = 0;
+	peak_disp_bw.full = 0;
+	if (mode1) {
+		temp_ff.full = dfixed_const(1000);
+		pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+		pix_clk.full = dfixed_div(pix_clk, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes1);
+		peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
+	}
+	if (mode2) {
+		temp_ff.full = dfixed_const(1000);
+		pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+		pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes2);
+		peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
+	}
+
+	mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
+	if (peak_disp_bw.full >= mem_bw.full) {
+		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
+			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+	}
+
+	/*  Get values from the EXT_MEM_CNTL register...converting its contents. */
+	temp = RREG32(RADEON_MEM_TIMING_CNTL);
+	if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
+		mem_trcd = ((temp >> 2) & 0x3) + 1;
+		mem_trp  = ((temp & 0x3)) + 1;
+		mem_tras = ((temp & 0x70) >> 4) + 1;
+	} else if (rdev->family == CHIP_R300 ||
+		   rdev->family == CHIP_R350) { /* r300, r350 */
+		mem_trcd = (temp & 0x7) + 1;
+		mem_trp = ((temp >> 8) & 0x7) + 1;
+		mem_tras = ((temp >> 11) & 0xf) + 4;
+	} else if (rdev->family == CHIP_RV350 ||
+		   rdev->family == CHIP_RV380) {
+		/* rv3x0 */
+		mem_trcd = (temp & 0x7) + 3;
+		mem_trp = ((temp >> 8) & 0x7) + 3;
+		mem_tras = ((temp >> 11) & 0xf) + 6;
+	} else if (rdev->family == CHIP_R420 ||
+		   rdev->family == CHIP_R423 ||
+		   rdev->family == CHIP_RV410) {
+		/* r4xx */
+		mem_trcd = (temp & 0xf) + 3;
+		if (mem_trcd > 15)
+			mem_trcd = 15;
+		mem_trp = ((temp >> 8) & 0xf) + 3;
+		if (mem_trp > 15)
+			mem_trp = 15;
+		mem_tras = ((temp >> 12) & 0x1f) + 6;
+		if (mem_tras > 31)
+			mem_tras = 31;
+	} else { /* RV200, R200 */
+		mem_trcd = (temp & 0x7) + 1;
+		mem_trp = ((temp >> 8) & 0x7) + 1;
+		mem_tras = ((temp >> 12) & 0xf) + 4;
+	}
+	/* convert to FF */
+	trcd_ff.full = dfixed_const(mem_trcd);
+	trp_ff.full = dfixed_const(mem_trp);
+	tras_ff.full = dfixed_const(mem_tras);
+
+	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
+	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+	data = (temp & (7 << 20)) >> 20;
+	if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family == CHIP_RS480) /* don't think rs400 */
+			tcas_ff = memtcas_rs480_ff[data];
+		else
+			tcas_ff = memtcas_ff[data];
+	} else
+		tcas_ff = memtcas2_ff[data];
+
+	if (rdev->family == CHIP_RS400 ||
+	    rdev->family == CHIP_RS480) {
+		/* extra cas latency stored in bits 23-25 0-4 clocks */
+		data = (temp >> 23) & 0x7;
+		if (data < 5)
+			tcas_ff.full += dfixed_const(data);
+	}
+
+	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+		/* on the R300, Tcas is included in Trbs.
+		 */
+		temp = RREG32(RADEON_MEM_CNTL);
+		data = (R300_MEM_NUM_CHANNELS_MASK & temp);
+		if (data == 1) {
+			if (R300_MEM_USE_CD_CH_ONLY & temp) {
+				temp = RREG32(R300_MC_IND_INDEX);
+				temp &= ~R300_MC_IND_ADDR_MASK;
+				temp |= R300_MC_READ_CNTL_CD_mcind;
+				WREG32(R300_MC_IND_INDEX, temp);
+				temp = RREG32(R300_MC_IND_DATA);
+				data = (R300_MEM_RBS_POSITION_C_MASK & temp);
+			} else {
+				temp = RREG32(R300_MC_READ_CNTL_AB);
+				data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+			}
+		} else {
+			temp = RREG32(R300_MC_READ_CNTL_AB);
+			data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+		}
+		if (rdev->family == CHIP_RV410 ||
+		    rdev->family == CHIP_R420 ||
+		    rdev->family == CHIP_R423)
+			trbs_ff = memtrbs_r4xx[data];
+		else
+			trbs_ff = memtrbs[data];
+		tcas_ff.full += trbs_ff.full;
+	}
+
+	sclk_eff_ff.full = sclk_ff.full;
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		fixed20_12 agpmode_ff;
+		agpmode_ff.full = dfixed_const(radeon_agpmode);
+		temp_ff.full = dfixed_const_666(16);
+		sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
+	}
+	/* TODO PCIE lanes may affect this - agpmode == 16?? */
+
+	if (ASIC_IS_R300(rdev)) {
+		sclk_delay_ff.full = dfixed_const(250);
+	} else {
+		if ((rdev->family == CHIP_RV100) ||
+		    rdev->flags & RADEON_IS_IGP) {
+			if (rdev->mc.vram_is_ddr)
+				sclk_delay_ff.full = dfixed_const(41);
+			else
+				sclk_delay_ff.full = dfixed_const(33);
+		} else {
+			if (rdev->mc.vram_width == 128)
+				sclk_delay_ff.full = dfixed_const(57);
+			else
+				sclk_delay_ff.full = dfixed_const(41);
+		}
+	}
+
+	mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
+
+	if (rdev->mc.vram_is_ddr) {
+		if (rdev->mc.vram_width == 32) {
+			k1.full = dfixed_const(40);
+			c  = 3;
+		} else {
+			k1.full = dfixed_const(20);
+			c  = 1;
+		}
+	} else {
+		k1.full = dfixed_const(40);
+		c  = 3;
+	}
+
+	temp_ff.full = dfixed_const(2);
+	mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+	temp_ff.full = dfixed_const(c);
+	mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+	temp_ff.full = dfixed_const(4);
+	mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+	mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
+	mc_latency_mclk.full += k1.full;
+
+	mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+	mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
+
+	/*
+	  HW cursor time assuming worst case of full size colour cursor.
+	*/
+	temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+	temp_ff.full += trcd_ff.full;
+	if (temp_ff.full < tras_ff.full)
+		temp_ff.full = tras_ff.full;
+	cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
+
+	temp_ff.full = dfixed_const(cur_size);
+	cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
+	/*
+	  Find the total latency for the display data.
+	*/
+	disp_latency_overhead.full = dfixed_const(8);
+	disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
+	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
+	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
+
+	if (mc_latency_mclk.full > mc_latency_sclk.full)
+		disp_latency.full = mc_latency_mclk.full;
+	else
+		disp_latency.full = mc_latency_sclk.full;
+
+	/* setup Max GRPH_STOP_REQ default value */
+	if (ASIC_IS_RV100(rdev))
+		max_stop_req = 0x5c;
+	else
+		max_stop_req = 0x7c;
+
+	if (mode1) {
+		/*  CRTC1
+		    Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
+		    GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
+		*/
+		stop_req = mode1->hdisplay * pixel_bytes1 / 16;
+
+		if (stop_req > max_stop_req)
+			stop_req = max_stop_req;
+
+		/*
+		  Find the drain rate of the display buffer.
+		*/
+		temp_ff.full = dfixed_const((16/pixel_bytes1));
+		disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
+
+		/*
+		  Find the critical point of the display buffer.
+		*/
+		crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+		crit_point_ff.full += dfixed_const_half(0);
+
+		critical_point = dfixed_trunc(crit_point_ff);
+
+		if (rdev->disp_priority == 2) {
+			critical_point = 0;
+		}
+
+		/*
+		  The critical point should never be above max_stop_req-4.  Setting
+		  GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
+		*/
+		if (max_stop_req - critical_point < 4)
+			critical_point = 0;
+
+		if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
+			/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
+			critical_point = 0x10;
+		}
+
+		temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
+		temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
+		temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+		temp &= ~(RADEON_GRPH_START_REQ_MASK);
+		if ((rdev->family == CHIP_R350) &&
+		    (stop_req > 0x15)) {
+			stop_req -= 0x10;
+		}
+		temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+		temp |= RADEON_GRPH_BUFFER_SIZE;
+		temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+			  RADEON_GRPH_CRITICAL_AT_SOF |
+			  RADEON_GRPH_STOP_CNTL);
+		/*
+		  Write the result into the register.
+		*/
+		WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+						       (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+#if 0
+		if ((rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+			/* attempt to program RS400 disp regs correctly ??? */
+			temp = RREG32(RS400_DISP1_REG_CNTL);
+			temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
+				  RS400_DISP1_STOP_REQ_LEVEL_MASK);
+			WREG32(RS400_DISP1_REQ_CNTL1, (temp |
+						       (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+						       (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+			temp = RREG32(RS400_DMIF_MEM_CNTL1);
+			temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
+				  RS400_DISP1_CRITICAL_POINT_STOP_MASK);
+			WREG32(RS400_DMIF_MEM_CNTL1, (temp |
+						      (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
+						      (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
+		}
+#endif
+
+		DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
+			  /* 	  (unsigned int)info->SavedReg->grph_buffer_cntl, */
+			  (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
+	}
+
+	if (mode2) {
+		u32 grph2_cntl;
+		stop_req = mode2->hdisplay * pixel_bytes2 / 16;
+
+		if (stop_req > max_stop_req)
+			stop_req = max_stop_req;
+
+		/*
+		  Find the drain rate of the display buffer.
+		*/
+		temp_ff.full = dfixed_const((16/pixel_bytes2));
+		disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
+
+		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
+		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
+		grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+		grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
+		if ((rdev->family == CHIP_R350) &&
+		    (stop_req > 0x15)) {
+			stop_req -= 0x10;
+		}
+		grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+		grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
+		grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+			  RADEON_GRPH_CRITICAL_AT_SOF |
+			  RADEON_GRPH_STOP_CNTL);
+
+		if ((rdev->family == CHIP_RS100) ||
+		    (rdev->family == CHIP_RS200))
+			critical_point2 = 0;
+		else {
+			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
+			temp_ff.full = dfixed_const(temp);
+			temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
+			if (sclk_ff.full < temp_ff.full)
+				temp_ff.full = sclk_ff.full;
+
+			read_return_rate.full = temp_ff.full;
+
+			if (mode1) {
+				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
+				time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
+			} else {
+				time_disp1_drop_priority.full = 0;
+			}
+			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
+			crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+			crit_point_ff.full += dfixed_const_half(0);
+
+			critical_point2 = dfixed_trunc(crit_point_ff);
+
+			if (rdev->disp_priority == 2) {
+				critical_point2 = 0;
+			}
+
+			if (max_stop_req - critical_point2 < 4)
+				critical_point2 = 0;
+
+		}
+
+		if (critical_point2 == 0 && rdev->family == CHIP_R300) {
+			/* some R300 cards have problem with this set to 0 */
+			critical_point2 = 0x10;
+		}
+
+		WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+						  (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+		if ((rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+#if 0
+			/* attempt to program RS400 disp2 regs correctly ??? */
+			temp = RREG32(RS400_DISP2_REQ_CNTL1);
+			temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
+				  RS400_DISP2_STOP_REQ_LEVEL_MASK);
+			WREG32(RS400_DISP2_REQ_CNTL1, (temp |
+						       (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+						       (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+			temp = RREG32(RS400_DISP2_REQ_CNTL2);
+			temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
+				  RS400_DISP2_CRITICAL_POINT_STOP_MASK);
+			WREG32(RS400_DISP2_REQ_CNTL2, (temp |
+						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
+						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
+#endif
+			WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
+			WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
+			WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
+			WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
+		}
+
+		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
+			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
+	}
+
+	/* Save number of lines the linebuffer leads before the scanout */
+	if (mode1)
+	    rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+	if (mode2)
+	    rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
+}
+
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 2);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET0(scratch, 0));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring, false);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF) {
+			break;
+		}
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test succeeded in %d usecs\n", i);
+	} else {
+		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	if (ring->rptr_save_reg) {
+		u32 next_rptr = ring->wptr + 2 + 3;
+		radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+	radeon_ring_write(ring, ib->gpu_addr);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		goto free_scratch;
+	}
+	ib.ptr[0] = PACKET0(scratch, 0);
+	ib.ptr[1] = 0xDEADBEEF;
+	ib.ptr[2] = PACKET2(0);
+	ib.ptr[3] = PACKET2(0);
+	ib.ptr[4] = PACKET2(0);
+	ib.ptr[5] = PACKET2(0);
+	ib.ptr[6] = PACKET2(0);
+	ib.ptr[7] = PACKET2(0);
+	ib.length_dw = 8;
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		goto free_ib;
+	}
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto free_ib;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
+		goto free_ib;
+	}
+	r = 0;
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF) {
+			break;
+		}
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test succeeded in %u usecs\n", i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+free_ib:
+	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+	/* Shutdown CP we shouldn't need to do that but better be safe than
+	 * sorry
+	 */
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	WREG32(R_000740_CP_CSQ_CNTL, 0);
+
+	/* Save few CRTC registers */
+	save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
+	save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
+	save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
+	save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
+		save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
+	}
+
+	/* Disable VGA aperture access */
+	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
+	/* Disable cursor, overlay, crtc */
+	WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
+	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
+					S_000054_CRTC_DISPLAY_DIS(1));
+	WREG32(R_000050_CRTC_GEN_CNTL,
+			(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
+			S_000050_CRTC_DISP_REQ_EN_B(1));
+	WREG32(R_000420_OV0_SCALE_CNTL,
+		C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
+	WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
+						S_000360_CUR2_LOCK(1));
+		WREG32(R_0003F8_CRTC2_GEN_CNTL,
+			(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
+			S_0003F8_CRTC2_DISPLAY_DIS(1) |
+			S_0003F8_CRTC2_DISP_REQ_EN_B(1));
+		WREG32(R_000360_CUR2_OFFSET,
+			C_000360_CUR2_LOCK & save->CUR2_OFFSET);
+	}
+}
+
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+	/* Update base address for crtc */
+	WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+	}
+	/* Restore CRTC registers */
+	WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
+	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
+	WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
+	}
+}
+
+void r100_vga_render_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG8(R_0003C2_GENMO_WT);
+	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
+}
+
+static void r100_debugfs(struct radeon_device *rdev)
+{
+	int r;
+
+	r = r100_debugfs_mc_info_init(rdev);
+	if (r)
+		dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+}
+
+static void r100_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(R_00014C_MC_AGP_LOCATION,
+			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		if (rdev->family > CHIP_RV200)
+			WREG32(R_00015C_AGP_BASE_2,
+				upper_32_bits(rdev->mc.agp_base) & 0xff);
+	} else {
+		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+		WREG32(R_000170_AGP_BASE, 0);
+		if (rdev->family > CHIP_RV200)
+			WREG32(R_00015C_AGP_BASE_2, 0);
+	}
+	/* Wait for mc idle */
+	if (r100_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	r100_mc_resume(rdev, &save);
+}
+
+static void r100_clock_startup(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_legacy_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
+		tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r100_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r100_mc_program(rdev);
+	/* Resume clock */
+	r100_clock_startup(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r100_enable_bm(rdev);
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r100_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r100_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r100_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r100_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r100_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r100_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+/*
+ * Due to how kexec works, it can leave the hw fully initialised when it
+ * boots the new kernel. However doing our init sequence with the CP and
+ * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
+ * do some quick sanity checks and restore sane values to avoid this
+ * problem.
+ */
+void r100_restore_sanity(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(RADEON_CP_CSQ_CNTL);
+	if (tmp) {
+		WREG32(RADEON_CP_CSQ_CNTL, 0);
+	}
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	if (tmp) {
+		WREG32(RADEON_CP_RB_CNTL, 0);
+	}
+	tmp = RREG32(RADEON_SCRATCH_UMSK);
+	if (tmp) {
+		WREG32(RADEON_SCRATCH_UMSK, 0);
+	}
+}
+
+int r100_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Register debugfs file specific to this group of asics */
+	r100_debugfs(rdev);
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* sanity check some register to avoid hangs like after kexec */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Set asic errata */
+	r100_errata(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize VRAM */
+	r100_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r100_set_safe_registers(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = r100_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	uint32_t ret;
+
+	spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+	writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+	ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+	spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+	return ret;
+}
+
+void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+	writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+	writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+	spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+}
+
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+	if (reg < rdev->rio_mem_size)
+		return ioread32(rdev->rio_mem + reg);
+	else {
+		iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+		return ioread32(rdev->rio_mem + RADEON_MM_DATA);
+	}
+}
+
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	if (reg < rdev->rio_mem_size)
+		iowrite32(v, rdev->rio_mem + reg);
+	else {
+		iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+		iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
new file mode 100644
index 0000000..ad16a92
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define R100_TRACK_MAX_TEXTURE 3
+#define R200_TRACK_MAX_TEXTURE 6
+#define R300_TRACK_MAX_TEXTURE 16
+
+#define R100_MAX_CB 1
+#define R300_MAX_CB 4
+
+/*
+ * CS functions
+ */
+struct r100_cs_track_cb {
+	struct radeon_bo	*robj;
+	unsigned		pitch;
+	unsigned		cpp;
+	unsigned		offset;
+};
+
+struct r100_cs_track_array {
+	struct radeon_bo	*robj;
+	unsigned		esize;
+};
+
+struct r100_cs_cube_info {
+	struct radeon_bo	*robj;
+	unsigned		offset;
+	unsigned		width;
+	unsigned		height;
+};
+
+#define R100_TRACK_COMP_NONE   0
+#define R100_TRACK_COMP_DXT1   1
+#define R100_TRACK_COMP_DXT35  2
+
+struct r100_cs_track_texture {
+	struct radeon_bo	*robj;
+	struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
+	unsigned		pitch;
+	unsigned		width;
+	unsigned		height;
+	unsigned		num_levels;
+	unsigned		cpp;
+	unsigned		tex_coord_type;
+	unsigned		txdepth;
+	unsigned		width_11;
+	unsigned		height_11;
+	bool			use_pitch;
+	bool			enabled;
+	bool                    lookup_disable;
+	bool			roundup_w;
+	bool			roundup_h;
+	unsigned                compress_format;
+};
+
+struct r100_cs_track {
+	unsigned			num_cb;
+	unsigned                        num_texture;
+	unsigned			maxy;
+	unsigned			vtx_size;
+	unsigned			vap_vf_cntl;
+	unsigned			vap_alt_nverts;
+	unsigned			immd_dwords;
+	unsigned			num_arrays;
+	unsigned			max_indx;
+	unsigned			color_channel_mask;
+	struct r100_cs_track_array	arrays[16];
+	struct r100_cs_track_cb 	cb[R300_MAX_CB];
+	struct r100_cs_track_cb 	zb;
+	struct r100_cs_track_cb 	aa;
+	struct r100_cs_track_texture	textures[R300_TRACK_MAX_TEXTURE];
+	bool				z_enabled;
+	bool                            separate_cube;
+	bool				zb_cb_clear;
+	bool				blend_read_enable;
+	bool				cb_dirty;
+	bool				zb_dirty;
+	bool				tex_dirty;
+	bool				aa_dirty;
+	bool				aaresolve;
+};
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
+
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+		       struct radeon_cs_packet *pkt,
+		       unsigned idx, unsigned reg);
+
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+			    struct radeon_cs_packet *pkt,
+			    unsigned idx,
+			    unsigned reg);
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int idx);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
new file mode 100644
index 0000000..f0f8ee6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -0,0 +1,869 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R100D_H__
+#define __R100D_H__
+
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_3D_CLEAR_ZMASK		0x32
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_SE(x)                    (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_SE(x)                    (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_SE                       0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define R_000030_BUS_CNTL                            0x000030
+#define   S_000030_BUS_DBL_RESYNC(x)                   (((x) & 0x1) << 0)
+#define   G_000030_BUS_DBL_RESYNC(x)                   (((x) >> 0) & 0x1)
+#define   C_000030_BUS_DBL_RESYNC                      0xFFFFFFFE
+#define   S_000030_BUS_MSTR_RESET(x)                   (((x) & 0x1) << 1)
+#define   G_000030_BUS_MSTR_RESET(x)                   (((x) >> 1) & 0x1)
+#define   C_000030_BUS_MSTR_RESET                      0xFFFFFFFD
+#define   S_000030_BUS_FLUSH_BUF(x)                    (((x) & 0x1) << 2)
+#define   G_000030_BUS_FLUSH_BUF(x)                    (((x) >> 2) & 0x1)
+#define   C_000030_BUS_FLUSH_BUF                       0xFFFFFFFB
+#define   S_000030_BUS_STOP_REQ_DIS(x)                 (((x) & 0x1) << 3)
+#define   G_000030_BUS_STOP_REQ_DIS(x)                 (((x) >> 3) & 0x1)
+#define   C_000030_BUS_STOP_REQ_DIS                    0xFFFFFFF7
+#define   S_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) & 0x1) << 4)
+#define   G_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) >> 4) & 0x1)
+#define   C_000030_BUS_PM4_READ_COMBINE_EN             0xFFFFFFEF
+#define   S_000030_BUS_WRT_COMBINE_EN(x)               (((x) & 0x1) << 5)
+#define   G_000030_BUS_WRT_COMBINE_EN(x)               (((x) >> 5) & 0x1)
+#define   C_000030_BUS_WRT_COMBINE_EN                  0xFFFFFFDF
+#define   S_000030_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 6)
+#define   G_000030_BUS_MASTER_DIS(x)                   (((x) >> 6) & 0x1)
+#define   C_000030_BUS_MASTER_DIS                      0xFFFFFFBF
+#define   S_000030_BIOS_ROM_WRT_EN(x)                  (((x) & 0x1) << 7)
+#define   G_000030_BIOS_ROM_WRT_EN(x)                  (((x) >> 7) & 0x1)
+#define   C_000030_BIOS_ROM_WRT_EN                     0xFFFFFF7F
+#define   S_000030_BM_DAC_CRIPPLE(x)                   (((x) & 0x1) << 8)
+#define   G_000030_BM_DAC_CRIPPLE(x)                   (((x) >> 8) & 0x1)
+#define   C_000030_BM_DAC_CRIPPLE                      0xFFFFFEFF
+#define   S_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) & 0x1) << 9)
+#define   G_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) >> 9) & 0x1)
+#define   C_000030_BUS_NON_PM4_READ_COMBINE_EN         0xFFFFFDFF
+#define   S_000030_BUS_XFERD_DISCARD_EN(x)             (((x) & 0x1) << 10)
+#define   G_000030_BUS_XFERD_DISCARD_EN(x)             (((x) >> 10) & 0x1)
+#define   C_000030_BUS_XFERD_DISCARD_EN                0xFFFFFBFF
+#define   S_000030_BUS_SGL_READ_DISABLE(x)             (((x) & 0x1) << 11)
+#define   G_000030_BUS_SGL_READ_DISABLE(x)             (((x) >> 11) & 0x1)
+#define   C_000030_BUS_SGL_READ_DISABLE                0xFFFFF7FF
+#define   S_000030_BIOS_DIS_ROM(x)                     (((x) & 0x1) << 12)
+#define   G_000030_BIOS_DIS_ROM(x)                     (((x) >> 12) & 0x1)
+#define   C_000030_BIOS_DIS_ROM                        0xFFFFEFFF
+#define   S_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) & 0x1) << 13)
+#define   G_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) >> 13) & 0x1)
+#define   C_000030_BUS_PCI_READ_RETRY_EN               0xFFFFDFFF
+#define   S_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) & 0x1) << 14)
+#define   G_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) >> 14) & 0x1)
+#define   C_000030_BUS_AGP_AD_STEPPING_EN              0xFFFFBFFF
+#define   S_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) & 0x1) << 15)
+#define   G_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) >> 15) & 0x1)
+#define   C_000030_BUS_PCI_WRT_RETRY_EN                0xFFFF7FFF
+#define   S_000030_BUS_RETRY_WS(x)                     (((x) & 0xF) << 16)
+#define   G_000030_BUS_RETRY_WS(x)                     (((x) >> 16) & 0xF)
+#define   C_000030_BUS_RETRY_WS                        0xFFF0FFFF
+#define   S_000030_BUS_MSTR_RD_MULT(x)                 (((x) & 0x1) << 20)
+#define   G_000030_BUS_MSTR_RD_MULT(x)                 (((x) >> 20) & 0x1)
+#define   C_000030_BUS_MSTR_RD_MULT                    0xFFEFFFFF
+#define   S_000030_BUS_MSTR_RD_LINE(x)                 (((x) & 0x1) << 21)
+#define   G_000030_BUS_MSTR_RD_LINE(x)                 (((x) >> 21) & 0x1)
+#define   C_000030_BUS_MSTR_RD_LINE                    0xFFDFFFFF
+#define   S_000030_BUS_SUSPEND(x)                      (((x) & 0x1) << 22)
+#define   G_000030_BUS_SUSPEND(x)                      (((x) >> 22) & 0x1)
+#define   C_000030_BUS_SUSPEND                         0xFFBFFFFF
+#define   S_000030_LAT_16X(x)                          (((x) & 0x1) << 23)
+#define   G_000030_LAT_16X(x)                          (((x) >> 23) & 0x1)
+#define   C_000030_LAT_16X                             0xFF7FFFFF
+#define   S_000030_BUS_RD_DISCARD_EN(x)                (((x) & 0x1) << 24)
+#define   G_000030_BUS_RD_DISCARD_EN(x)                (((x) >> 24) & 0x1)
+#define   C_000030_BUS_RD_DISCARD_EN                   0xFEFFFFFF
+#define   S_000030_ENFRCWRDY(x)                        (((x) & 0x1) << 25)
+#define   G_000030_ENFRCWRDY(x)                        (((x) >> 25) & 0x1)
+#define   C_000030_ENFRCWRDY                           0xFDFFFFFF
+#define   S_000030_BUS_MSTR_WS(x)                      (((x) & 0x1) << 26)
+#define   G_000030_BUS_MSTR_WS(x)                      (((x) >> 26) & 0x1)
+#define   C_000030_BUS_MSTR_WS                         0xFBFFFFFF
+#define   S_000030_BUS_PARKING_DIS(x)                  (((x) & 0x1) << 27)
+#define   G_000030_BUS_PARKING_DIS(x)                  (((x) >> 27) & 0x1)
+#define   C_000030_BUS_PARKING_DIS                     0xF7FFFFFF
+#define   S_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) & 0x1) << 28)
+#define   G_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) >> 28) & 0x1)
+#define   C_000030_BUS_MSTR_DISCONNECT_EN              0xEFFFFFFF
+#define   S_000030_SERR_EN(x)                          (((x) & 0x1) << 29)
+#define   G_000030_SERR_EN(x)                          (((x) >> 29) & 0x1)
+#define   C_000030_SERR_EN                             0xDFFFFFFF
+#define   S_000030_BUS_READ_BURST(x)                   (((x) & 0x1) << 30)
+#define   G_000030_BUS_READ_BURST(x)                   (((x) >> 30) & 0x1)
+#define   C_000030_BUS_READ_BURST                      0xBFFFFFFF
+#define   S_000030_BUS_RDY_READ_DLY(x)                 (((x) & 0x1) << 31)
+#define   G_000030_BUS_RDY_READ_DLY(x)                 (((x) >> 31) & 0x1)
+#define   C_000030_BUS_RDY_READ_DLY                    0x7FFFFFFF
+#define R_000040_GEN_INT_CNTL                        0x000040
+#define   S_000040_CRTC_VBLANK(x)                      (((x) & 0x1) << 0)
+#define   G_000040_CRTC_VBLANK(x)                      (((x) >> 0) & 0x1)
+#define   C_000040_CRTC_VBLANK                         0xFFFFFFFE
+#define   S_000040_CRTC_VLINE(x)                       (((x) & 0x1) << 1)
+#define   G_000040_CRTC_VLINE(x)                       (((x) >> 1) & 0x1)
+#define   C_000040_CRTC_VLINE                          0xFFFFFFFD
+#define   S_000040_CRTC_VSYNC(x)                       (((x) & 0x1) << 2)
+#define   G_000040_CRTC_VSYNC(x)                       (((x) >> 2) & 0x1)
+#define   C_000040_CRTC_VSYNC                          0xFFFFFFFB
+#define   S_000040_SNAPSHOT(x)                         (((x) & 0x1) << 3)
+#define   G_000040_SNAPSHOT(x)                         (((x) >> 3) & 0x1)
+#define   C_000040_SNAPSHOT                            0xFFFFFFF7
+#define   S_000040_FP_DETECT(x)                        (((x) & 0x1) << 4)
+#define   G_000040_FP_DETECT(x)                        (((x) >> 4) & 0x1)
+#define   C_000040_FP_DETECT                           0xFFFFFFEF
+#define   S_000040_CRTC2_VLINE(x)                      (((x) & 0x1) << 5)
+#define   G_000040_CRTC2_VLINE(x)                      (((x) >> 5) & 0x1)
+#define   C_000040_CRTC2_VLINE                         0xFFFFFFDF
+#define   S_000040_DMA_VIPH0_INT_EN(x)                 (((x) & 0x1) << 12)
+#define   G_000040_DMA_VIPH0_INT_EN(x)                 (((x) >> 12) & 0x1)
+#define   C_000040_DMA_VIPH0_INT_EN                    0xFFFFEFFF
+#define   S_000040_CRTC2_VSYNC(x)                      (((x) & 0x1) << 6)
+#define   G_000040_CRTC2_VSYNC(x)                      (((x) >> 6) & 0x1)
+#define   C_000040_CRTC2_VSYNC                         0xFFFFFFBF
+#define   S_000040_SNAPSHOT2(x)                        (((x) & 0x1) << 7)
+#define   G_000040_SNAPSHOT2(x)                        (((x) >> 7) & 0x1)
+#define   C_000040_SNAPSHOT2                           0xFFFFFF7F
+#define   S_000040_CRTC2_VBLANK(x)                     (((x) & 0x1) << 9)
+#define   G_000040_CRTC2_VBLANK(x)                     (((x) >> 9) & 0x1)
+#define   C_000040_CRTC2_VBLANK                        0xFFFFFDFF
+#define   S_000040_FP2_DETECT(x)                       (((x) & 0x1) << 10)
+#define   G_000040_FP2_DETECT(x)                       (((x) >> 10) & 0x1)
+#define   C_000040_FP2_DETECT                          0xFFFFFBFF
+#define   S_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) & 0x1) << 11)
+#define   G_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) >> 11) & 0x1)
+#define   C_000040_VSYNC_DIFF_OVER_LIMIT               0xFFFFF7FF
+#define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+#define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+#define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+#define   S_000040_DMA_VIPH2_INT_EN(x)                 (((x) & 0x1) << 14)
+#define   G_000040_DMA_VIPH2_INT_EN(x)                 (((x) >> 14) & 0x1)
+#define   C_000040_DMA_VIPH2_INT_EN                    0xFFFFBFFF
+#define   S_000040_DMA_VIPH3_INT_EN(x)                 (((x) & 0x1) << 15)
+#define   G_000040_DMA_VIPH3_INT_EN(x)                 (((x) >> 15) & 0x1)
+#define   C_000040_DMA_VIPH3_INT_EN                    0xFFFF7FFF
+#define   S_000040_I2C_INT_EN(x)                       (((x) & 0x1) << 17)
+#define   G_000040_I2C_INT_EN(x)                       (((x) >> 17) & 0x1)
+#define   C_000040_I2C_INT_EN                          0xFFFDFFFF
+#define   S_000040_GUI_IDLE(x)                         (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE(x)                         (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE                            0xFFF7FFFF
+#define   S_000040_VIPH_INT_EN(x)                      (((x) & 0x1) << 24)
+#define   G_000040_VIPH_INT_EN(x)                      (((x) >> 24) & 0x1)
+#define   C_000040_VIPH_INT_EN                         0xFEFFFFFF
+#define   S_000040_SW_INT_EN(x)                        (((x) & 0x1) << 25)
+#define   G_000040_SW_INT_EN(x)                        (((x) >> 25) & 0x1)
+#define   C_000040_SW_INT_EN                           0xFDFFFFFF
+#define   S_000040_GEYSERVILLE(x)                      (((x) & 0x1) << 27)
+#define   G_000040_GEYSERVILLE(x)                      (((x) >> 27) & 0x1)
+#define   C_000040_GEYSERVILLE                         0xF7FFFFFF
+#define   S_000040_HDCP_AUTHORIZED_INT(x)              (((x) & 0x1) << 28)
+#define   G_000040_HDCP_AUTHORIZED_INT(x)              (((x) >> 28) & 0x1)
+#define   C_000040_HDCP_AUTHORIZED_INT                 0xEFFFFFFF
+#define   S_000040_DVI_I2C_INT(x)                      (((x) & 0x1) << 29)
+#define   G_000040_DVI_I2C_INT(x)                      (((x) >> 29) & 0x1)
+#define   C_000040_DVI_I2C_INT                         0xDFFFFFFF
+#define   S_000040_GUIDMA(x)                           (((x) & 0x1) << 30)
+#define   G_000040_GUIDMA(x)                           (((x) >> 30) & 0x1)
+#define   C_000040_GUIDMA                              0xBFFFFFFF
+#define   S_000040_VIDDMA(x)                           (((x) & 0x1) << 31)
+#define   G_000040_VIDDMA(x)                           (((x) >> 31) & 0x1)
+#define   C_000040_VIDDMA                              0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS                      0x000044
+#define   S_000044_CRTC_VBLANK_STAT(x)                 (((x) & 0x1) << 0)
+#define   G_000044_CRTC_VBLANK_STAT(x)                 (((x) >> 0) & 0x1)
+#define   C_000044_CRTC_VBLANK_STAT                    0xFFFFFFFE
+#define   S_000044_CRTC_VBLANK_STAT_AK(x)              (((x) & 0x1) << 0)
+#define   G_000044_CRTC_VBLANK_STAT_AK(x)              (((x) >> 0) & 0x1)
+#define   C_000044_CRTC_VBLANK_STAT_AK                 0xFFFFFFFE
+#define   S_000044_CRTC_VLINE_STAT(x)                  (((x) & 0x1) << 1)
+#define   G_000044_CRTC_VLINE_STAT(x)                  (((x) >> 1) & 0x1)
+#define   C_000044_CRTC_VLINE_STAT                     0xFFFFFFFD
+#define   S_000044_CRTC_VLINE_STAT_AK(x)               (((x) & 0x1) << 1)
+#define   G_000044_CRTC_VLINE_STAT_AK(x)               (((x) >> 1) & 0x1)
+#define   C_000044_CRTC_VLINE_STAT_AK                  0xFFFFFFFD
+#define   S_000044_CRTC_VSYNC_STAT(x)                  (((x) & 0x1) << 2)
+#define   G_000044_CRTC_VSYNC_STAT(x)                  (((x) >> 2) & 0x1)
+#define   C_000044_CRTC_VSYNC_STAT                     0xFFFFFFFB
+#define   S_000044_CRTC_VSYNC_STAT_AK(x)               (((x) & 0x1) << 2)
+#define   G_000044_CRTC_VSYNC_STAT_AK(x)               (((x) >> 2) & 0x1)
+#define   C_000044_CRTC_VSYNC_STAT_AK                  0xFFFFFFFB
+#define   S_000044_SNAPSHOT_STAT(x)                    (((x) & 0x1) << 3)
+#define   G_000044_SNAPSHOT_STAT(x)                    (((x) >> 3) & 0x1)
+#define   C_000044_SNAPSHOT_STAT                       0xFFFFFFF7
+#define   S_000044_SNAPSHOT_STAT_AK(x)                 (((x) & 0x1) << 3)
+#define   G_000044_SNAPSHOT_STAT_AK(x)                 (((x) >> 3) & 0x1)
+#define   C_000044_SNAPSHOT_STAT_AK                    0xFFFFFFF7
+#define   S_000044_FP_DETECT_STAT(x)                   (((x) & 0x1) << 4)
+#define   G_000044_FP_DETECT_STAT(x)                   (((x) >> 4) & 0x1)
+#define   C_000044_FP_DETECT_STAT                      0xFFFFFFEF
+#define   S_000044_FP_DETECT_STAT_AK(x)                (((x) & 0x1) << 4)
+#define   G_000044_FP_DETECT_STAT_AK(x)                (((x) >> 4) & 0x1)
+#define   C_000044_FP_DETECT_STAT_AK                   0xFFFFFFEF
+#define   S_000044_CRTC2_VLINE_STAT(x)                 (((x) & 0x1) << 5)
+#define   G_000044_CRTC2_VLINE_STAT(x)                 (((x) >> 5) & 0x1)
+#define   C_000044_CRTC2_VLINE_STAT                    0xFFFFFFDF
+#define   S_000044_CRTC2_VLINE_STAT_AK(x)              (((x) & 0x1) << 5)
+#define   G_000044_CRTC2_VLINE_STAT_AK(x)              (((x) >> 5) & 0x1)
+#define   C_000044_CRTC2_VLINE_STAT_AK                 0xFFFFFFDF
+#define   S_000044_CRTC2_VSYNC_STAT(x)                 (((x) & 0x1) << 6)
+#define   G_000044_CRTC2_VSYNC_STAT(x)                 (((x) >> 6) & 0x1)
+#define   C_000044_CRTC2_VSYNC_STAT                    0xFFFFFFBF
+#define   S_000044_CRTC2_VSYNC_STAT_AK(x)              (((x) & 0x1) << 6)
+#define   G_000044_CRTC2_VSYNC_STAT_AK(x)              (((x) >> 6) & 0x1)
+#define   C_000044_CRTC2_VSYNC_STAT_AK                 0xFFFFFFBF
+#define   S_000044_SNAPSHOT2_STAT(x)                   (((x) & 0x1) << 7)
+#define   G_000044_SNAPSHOT2_STAT(x)                   (((x) >> 7) & 0x1)
+#define   C_000044_SNAPSHOT2_STAT                      0xFFFFFF7F
+#define   S_000044_SNAPSHOT2_STAT_AK(x)                (((x) & 0x1) << 7)
+#define   G_000044_SNAPSHOT2_STAT_AK(x)                (((x) >> 7) & 0x1)
+#define   C_000044_SNAPSHOT2_STAT_AK                   0xFFFFFF7F
+#define   S_000044_CAP0_INT_ACTIVE(x)                  (((x) & 0x1) << 8)
+#define   G_000044_CAP0_INT_ACTIVE(x)                  (((x) >> 8) & 0x1)
+#define   C_000044_CAP0_INT_ACTIVE                     0xFFFFFEFF
+#define   S_000044_CRTC2_VBLANK_STAT(x)                (((x) & 0x1) << 9)
+#define   G_000044_CRTC2_VBLANK_STAT(x)                (((x) >> 9) & 0x1)
+#define   C_000044_CRTC2_VBLANK_STAT                   0xFFFFFDFF
+#define   S_000044_CRTC2_VBLANK_STAT_AK(x)             (((x) & 0x1) << 9)
+#define   G_000044_CRTC2_VBLANK_STAT_AK(x)             (((x) >> 9) & 0x1)
+#define   C_000044_CRTC2_VBLANK_STAT_AK                0xFFFFFDFF
+#define   S_000044_FP2_DETECT_STAT(x)                  (((x) & 0x1) << 10)
+#define   G_000044_FP2_DETECT_STAT(x)                  (((x) >> 10) & 0x1)
+#define   C_000044_FP2_DETECT_STAT                     0xFFFFFBFF
+#define   S_000044_FP2_DETECT_STAT_AK(x)               (((x) & 0x1) << 10)
+#define   G_000044_FP2_DETECT_STAT_AK(x)               (((x) >> 10) & 0x1)
+#define   C_000044_FP2_DETECT_STAT_AK                  0xFFFFFBFF
+#define   S_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x)       (((x) & 0x1) << 11)
+#define   G_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x)       (((x) >> 11) & 0x1)
+#define   C_000044_VSYNC_DIFF_OVER_LIMIT_STAT          0xFFFFF7FF
+#define   S_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x)    (((x) & 0x1) << 11)
+#define   G_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x)    (((x) >> 11) & 0x1)
+#define   C_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK       0xFFFFF7FF
+#define   S_000044_DMA_VIPH0_INT(x)                    (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT(x)                    (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT                       0xFFFFEFFF
+#define   S_000044_DMA_VIPH0_INT_AK(x)                 (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT_AK(x)                 (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT_AK                    0xFFFFEFFF
+#define   S_000044_DMA_VIPH1_INT(x)                    (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT(x)                    (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT                       0xFFFFDFFF
+#define   S_000044_DMA_VIPH1_INT_AK(x)                 (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT_AK(x)                 (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT_AK                    0xFFFFDFFF
+#define   S_000044_DMA_VIPH2_INT(x)                    (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT(x)                    (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT                       0xFFFFBFFF
+#define   S_000044_DMA_VIPH2_INT_AK(x)                 (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT_AK(x)                 (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT_AK                    0xFFFFBFFF
+#define   S_000044_DMA_VIPH3_INT(x)                    (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT(x)                    (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT                       0xFFFF7FFF
+#define   S_000044_DMA_VIPH3_INT_AK(x)                 (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT_AK(x)                 (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT_AK                    0xFFFF7FFF
+#define   S_000044_I2C_INT(x)                          (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT(x)                          (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT                             0xFFFDFFFF
+#define   S_000044_I2C_INT_AK(x)                       (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT_AK(x)                       (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT_AK                          0xFFFDFFFF
+#define   S_000044_GUI_IDLE_STAT(x)                    (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT(x)                    (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT                       0xFFF7FFFF
+#define   S_000044_GUI_IDLE_STAT_AK(x)                 (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT_AK(x)                 (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT_AK                    0xFFF7FFFF
+#define   S_000044_VIPH_INT(x)                         (((x) & 0x1) << 24)
+#define   G_000044_VIPH_INT(x)                         (((x) >> 24) & 0x1)
+#define   C_000044_VIPH_INT                            0xFEFFFFFF
+#define   S_000044_SW_INT(x)                           (((x) & 0x1) << 25)
+#define   G_000044_SW_INT(x)                           (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT                              0xFDFFFFFF
+#define   S_000044_SW_INT_AK(x)                        (((x) & 0x1) << 25)
+#define   G_000044_SW_INT_AK(x)                        (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT_AK                           0xFDFFFFFF
+#define   S_000044_SW_INT_SET(x)                       (((x) & 0x1) << 26)
+#define   G_000044_SW_INT_SET(x)                       (((x) >> 26) & 0x1)
+#define   C_000044_SW_INT_SET                          0xFBFFFFFF
+#define   S_000044_GEYSERVILLE_STAT(x)                 (((x) & 0x1) << 27)
+#define   G_000044_GEYSERVILLE_STAT(x)                 (((x) >> 27) & 0x1)
+#define   C_000044_GEYSERVILLE_STAT                    0xF7FFFFFF
+#define   S_000044_GEYSERVILLE_STAT_AK(x)              (((x) & 0x1) << 27)
+#define   G_000044_GEYSERVILLE_STAT_AK(x)              (((x) >> 27) & 0x1)
+#define   C_000044_GEYSERVILLE_STAT_AK                 0xF7FFFFFF
+#define   S_000044_HDCP_AUTHORIZED_INT_STAT(x)         (((x) & 0x1) << 28)
+#define   G_000044_HDCP_AUTHORIZED_INT_STAT(x)         (((x) >> 28) & 0x1)
+#define   C_000044_HDCP_AUTHORIZED_INT_STAT            0xEFFFFFFF
+#define   S_000044_HDCP_AUTHORIZED_INT_AK(x)           (((x) & 0x1) << 28)
+#define   G_000044_HDCP_AUTHORIZED_INT_AK(x)           (((x) >> 28) & 0x1)
+#define   C_000044_HDCP_AUTHORIZED_INT_AK              0xEFFFFFFF
+#define   S_000044_DVI_I2C_INT_STAT(x)                 (((x) & 0x1) << 29)
+#define   G_000044_DVI_I2C_INT_STAT(x)                 (((x) >> 29) & 0x1)
+#define   C_000044_DVI_I2C_INT_STAT                    0xDFFFFFFF
+#define   S_000044_DVI_I2C_INT_AK(x)                   (((x) & 0x1) << 29)
+#define   G_000044_DVI_I2C_INT_AK(x)                   (((x) >> 29) & 0x1)
+#define   C_000044_DVI_I2C_INT_AK                      0xDFFFFFFF
+#define   S_000044_GUIDMA_STAT(x)                      (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_STAT(x)                      (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_STAT                         0xBFFFFFFF
+#define   S_000044_GUIDMA_AK(x)                        (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_AK(x)                        (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_AK                           0xBFFFFFFF
+#define   S_000044_VIDDMA_STAT(x)                      (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_STAT(x)                      (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_STAT                         0x7FFFFFFF
+#define   S_000044_VIDDMA_AK(x)                        (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_AK(x)                        (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_AK                           0x7FFFFFFF
+#define R_000050_CRTC_GEN_CNTL                       0x000050
+#define   S_000050_CRTC_DBL_SCAN_EN(x)                 (((x) & 0x1) << 0)
+#define   G_000050_CRTC_DBL_SCAN_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_000050_CRTC_DBL_SCAN_EN                    0xFFFFFFFE
+#define   S_000050_CRTC_INTERLACE_EN(x)                (((x) & 0x1) << 1)
+#define   G_000050_CRTC_INTERLACE_EN(x)                (((x) >> 1) & 0x1)
+#define   C_000050_CRTC_INTERLACE_EN                   0xFFFFFFFD
+#define   S_000050_CRTC_C_SYNC_EN(x)                   (((x) & 0x1) << 4)
+#define   G_000050_CRTC_C_SYNC_EN(x)                   (((x) >> 4) & 0x1)
+#define   C_000050_CRTC_C_SYNC_EN                      0xFFFFFFEF
+#define   S_000050_CRTC_PIX_WIDTH(x)                   (((x) & 0xF) << 8)
+#define   G_000050_CRTC_PIX_WIDTH(x)                   (((x) >> 8) & 0xF)
+#define   C_000050_CRTC_PIX_WIDTH                      0xFFFFF0FF
+#define   S_000050_CRTC_ICON_EN(x)                     (((x) & 0x1) << 15)
+#define   G_000050_CRTC_ICON_EN(x)                     (((x) >> 15) & 0x1)
+#define   C_000050_CRTC_ICON_EN                        0xFFFF7FFF
+#define   S_000050_CRTC_CUR_EN(x)                      (((x) & 0x1) << 16)
+#define   G_000050_CRTC_CUR_EN(x)                      (((x) >> 16) & 0x1)
+#define   C_000050_CRTC_CUR_EN                         0xFFFEFFFF
+#define   S_000050_CRTC_VSTAT_MODE(x)                  (((x) & 0x3) << 17)
+#define   G_000050_CRTC_VSTAT_MODE(x)                  (((x) >> 17) & 0x3)
+#define   C_000050_CRTC_VSTAT_MODE                     0xFFF9FFFF
+#define   S_000050_CRTC_CUR_MODE(x)                    (((x) & 0x7) << 20)
+#define   G_000050_CRTC_CUR_MODE(x)                    (((x) >> 20) & 0x7)
+#define   C_000050_CRTC_CUR_MODE                       0xFF8FFFFF
+#define   S_000050_CRTC_EXT_DISP_EN(x)                 (((x) & 0x1) << 24)
+#define   G_000050_CRTC_EXT_DISP_EN(x)                 (((x) >> 24) & 0x1)
+#define   C_000050_CRTC_EXT_DISP_EN                    0xFEFFFFFF
+#define   S_000050_CRTC_EN(x)                          (((x) & 0x1) << 25)
+#define   G_000050_CRTC_EN(x)                          (((x) >> 25) & 0x1)
+#define   C_000050_CRTC_EN                             0xFDFFFFFF
+#define   S_000050_CRTC_DISP_REQ_EN_B(x)               (((x) & 0x1) << 26)
+#define   G_000050_CRTC_DISP_REQ_EN_B(x)               (((x) >> 26) & 0x1)
+#define   C_000050_CRTC_DISP_REQ_EN_B                  0xFBFFFFFF
+#define R_000054_CRTC_EXT_CNTL                       0x000054
+#define   S_000054_CRTC_VGA_XOVERSCAN(x)               (((x) & 0x1) << 0)
+#define   G_000054_CRTC_VGA_XOVERSCAN(x)               (((x) >> 0) & 0x1)
+#define   C_000054_CRTC_VGA_XOVERSCAN                  0xFFFFFFFE
+#define   S_000054_VGA_BLINK_RATE(x)                   (((x) & 0x3) << 1)
+#define   G_000054_VGA_BLINK_RATE(x)                   (((x) >> 1) & 0x3)
+#define   C_000054_VGA_BLINK_RATE                      0xFFFFFFF9
+#define   S_000054_VGA_ATI_LINEAR(x)                   (((x) & 0x1) << 3)
+#define   G_000054_VGA_ATI_LINEAR(x)                   (((x) >> 3) & 0x1)
+#define   C_000054_VGA_ATI_LINEAR                      0xFFFFFFF7
+#define   S_000054_VGA_128KAP_PAGING(x)                (((x) & 0x1) << 4)
+#define   G_000054_VGA_128KAP_PAGING(x)                (((x) >> 4) & 0x1)
+#define   C_000054_VGA_128KAP_PAGING                   0xFFFFFFEF
+#define   S_000054_VGA_TEXT_132(x)                     (((x) & 0x1) << 5)
+#define   G_000054_VGA_TEXT_132(x)                     (((x) >> 5) & 0x1)
+#define   C_000054_VGA_TEXT_132                        0xFFFFFFDF
+#define   S_000054_VGA_XCRT_CNT_EN(x)                  (((x) & 0x1) << 6)
+#define   G_000054_VGA_XCRT_CNT_EN(x)                  (((x) >> 6) & 0x1)
+#define   C_000054_VGA_XCRT_CNT_EN                     0xFFFFFFBF
+#define   S_000054_CRTC_HSYNC_DIS(x)                   (((x) & 0x1) << 8)
+#define   G_000054_CRTC_HSYNC_DIS(x)                   (((x) >> 8) & 0x1)
+#define   C_000054_CRTC_HSYNC_DIS                      0xFFFFFEFF
+#define   S_000054_CRTC_VSYNC_DIS(x)                   (((x) & 0x1) << 9)
+#define   G_000054_CRTC_VSYNC_DIS(x)                   (((x) >> 9) & 0x1)
+#define   C_000054_CRTC_VSYNC_DIS                      0xFFFFFDFF
+#define   S_000054_CRTC_DISPLAY_DIS(x)                 (((x) & 0x1) << 10)
+#define   G_000054_CRTC_DISPLAY_DIS(x)                 (((x) >> 10) & 0x1)
+#define   C_000054_CRTC_DISPLAY_DIS                    0xFFFFFBFF
+#define   S_000054_CRTC_SYNC_TRISTATE(x)               (((x) & 0x1) << 11)
+#define   G_000054_CRTC_SYNC_TRISTATE(x)               (((x) >> 11) & 0x1)
+#define   C_000054_CRTC_SYNC_TRISTATE                  0xFFFFF7FF
+#define   S_000054_CRTC_HSYNC_TRISTATE(x)              (((x) & 0x1) << 12)
+#define   G_000054_CRTC_HSYNC_TRISTATE(x)              (((x) >> 12) & 0x1)
+#define   C_000054_CRTC_HSYNC_TRISTATE                 0xFFFFEFFF
+#define   S_000054_CRTC_VSYNC_TRISTATE(x)              (((x) & 0x1) << 13)
+#define   G_000054_CRTC_VSYNC_TRISTATE(x)              (((x) >> 13) & 0x1)
+#define   C_000054_CRTC_VSYNC_TRISTATE                 0xFFFFDFFF
+#define   S_000054_CRT_ON(x)                           (((x) & 0x1) << 15)
+#define   G_000054_CRT_ON(x)                           (((x) >> 15) & 0x1)
+#define   C_000054_CRT_ON                              0xFFFF7FFF
+#define   S_000054_VGA_CUR_B_TEST(x)                   (((x) & 0x1) << 17)
+#define   G_000054_VGA_CUR_B_TEST(x)                   (((x) >> 17) & 0x1)
+#define   C_000054_VGA_CUR_B_TEST                      0xFFFDFFFF
+#define   S_000054_VGA_PACK_DIS(x)                     (((x) & 0x1) << 18)
+#define   G_000054_VGA_PACK_DIS(x)                     (((x) >> 18) & 0x1)
+#define   C_000054_VGA_PACK_DIS                        0xFFFBFFFF
+#define   S_000054_VGA_MEM_PS_EN(x)                    (((x) & 0x1) << 19)
+#define   G_000054_VGA_MEM_PS_EN(x)                    (((x) >> 19) & 0x1)
+#define   C_000054_VGA_MEM_PS_EN                       0xFFF7FFFF
+#define   S_000054_VCRTC_IDX_MASTER(x)                 (((x) & 0x7F) << 24)
+#define   G_000054_VCRTC_IDX_MASTER(x)                 (((x) >> 24) & 0x7F)
+#define   C_000054_VCRTC_IDX_MASTER                    0x80FFFFFF
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION                     0x00014C
+#define   S_00014C_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_00014C_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_00014C_MC_AGP_START                        0xFFFF0000
+#define   S_00014C_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_00014C_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_00014C_MC_AGP_TOP                          0x0000FFFF
+#define R_000170_AGP_BASE                            0x000170
+#define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000170_AGP_BASE_ADDR                       0x00000000
+#define R_00023C_DISPLAY_BASE_ADDR                   0x00023C
+#define   S_00023C_DISPLAY_BASE_ADDR(x)                (((x) & 0xFFFFFFFF) << 0)
+#define   G_00023C_DISPLAY_BASE_ADDR(x)                (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00023C_DISPLAY_BASE_ADDR                   0x00000000
+#define R_000260_CUR_OFFSET                          0x000260
+#define   S_000260_CUR_OFFSET(x)                       (((x) & 0x7FFFFFF) << 0)
+#define   G_000260_CUR_OFFSET(x)                       (((x) >> 0) & 0x7FFFFFF)
+#define   C_000260_CUR_OFFSET                          0xF8000000
+#define   S_000260_CUR_LOCK(x)                         (((x) & 0x1) << 31)
+#define   G_000260_CUR_LOCK(x)                         (((x) >> 31) & 0x1)
+#define   C_000260_CUR_LOCK                            0x7FFFFFFF
+#define R_00033C_CRTC2_DISPLAY_BASE_ADDR             0x00033C
+#define   S_00033C_CRTC2_DISPLAY_BASE_ADDR(x)          (((x) & 0xFFFFFFFF) << 0)
+#define   G_00033C_CRTC2_DISPLAY_BASE_ADDR(x)          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00033C_CRTC2_DISPLAY_BASE_ADDR             0x00000000
+#define R_000360_CUR2_OFFSET                         0x000360
+#define   S_000360_CUR2_OFFSET(x)                      (((x) & 0x7FFFFFF) << 0)
+#define   G_000360_CUR2_OFFSET(x)                      (((x) >> 0) & 0x7FFFFFF)
+#define   C_000360_CUR2_OFFSET                         0xF8000000
+#define   S_000360_CUR2_LOCK(x)                        (((x) & 0x1) << 31)
+#define   G_000360_CUR2_LOCK(x)                        (((x) >> 31) & 0x1)
+#define   C_000360_CUR2_LOCK                           0x7FFFFFFF
+#define R_0003C2_GENMO_WT                            0x0003C2
+#define   S_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) & 0x1) << 0)
+#define   G_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) >> 0) & 0x1)
+#define   C_0003C2_GENMO_MONO_ADDRESS_B                0xFE
+#define   S_0003C2_VGA_RAM_EN(x)                       (((x) & 0x1) << 1)
+#define   G_0003C2_VGA_RAM_EN(x)                       (((x) >> 1) & 0x1)
+#define   C_0003C2_VGA_RAM_EN                          0xFD
+#define   S_0003C2_VGA_CKSEL(x)                        (((x) & 0x3) << 2)
+#define   G_0003C2_VGA_CKSEL(x)                        (((x) >> 2) & 0x3)
+#define   C_0003C2_VGA_CKSEL                           0xF3
+#define   S_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) & 0x1) << 5)
+#define   G_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) >> 5) & 0x1)
+#define   C_0003C2_ODD_EVEN_MD_PGSEL                   0xDF
+#define   S_0003C2_VGA_HSYNC_POL(x)                    (((x) & 0x1) << 6)
+#define   G_0003C2_VGA_HSYNC_POL(x)                    (((x) >> 6) & 0x1)
+#define   C_0003C2_VGA_HSYNC_POL                       0xBF
+#define   S_0003C2_VGA_VSYNC_POL(x)                    (((x) & 0x1) << 7)
+#define   G_0003C2_VGA_VSYNC_POL(x)                    (((x) >> 7) & 0x1)
+#define   C_0003C2_VGA_VSYNC_POL                       0x7F
+#define R_0003F8_CRTC2_GEN_CNTL                      0x0003F8
+#define   S_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) & 0x1) << 0)
+#define   G_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) >> 0) & 0x1)
+#define   C_0003F8_CRTC2_DBL_SCAN_EN                   0xFFFFFFFE
+#define   S_0003F8_CRTC2_INTERLACE_EN(x)               (((x) & 0x1) << 1)
+#define   G_0003F8_CRTC2_INTERLACE_EN(x)               (((x) >> 1) & 0x1)
+#define   C_0003F8_CRTC2_INTERLACE_EN                  0xFFFFFFFD
+#define   S_0003F8_CRTC2_SYNC_TRISTATE(x)              (((x) & 0x1) << 4)
+#define   G_0003F8_CRTC2_SYNC_TRISTATE(x)              (((x) >> 4) & 0x1)
+#define   C_0003F8_CRTC2_SYNC_TRISTATE                 0xFFFFFFEF
+#define   S_0003F8_CRTC2_HSYNC_TRISTATE(x)             (((x) & 0x1) << 5)
+#define   G_0003F8_CRTC2_HSYNC_TRISTATE(x)             (((x) >> 5) & 0x1)
+#define   C_0003F8_CRTC2_HSYNC_TRISTATE                0xFFFFFFDF
+#define   S_0003F8_CRTC2_VSYNC_TRISTATE(x)             (((x) & 0x1) << 6)
+#define   G_0003F8_CRTC2_VSYNC_TRISTATE(x)             (((x) >> 6) & 0x1)
+#define   C_0003F8_CRTC2_VSYNC_TRISTATE                0xFFFFFFBF
+#define   S_0003F8_CRT2_ON(x)                          (((x) & 0x1) << 7)
+#define   G_0003F8_CRT2_ON(x)                          (((x) >> 7) & 0x1)
+#define   C_0003F8_CRT2_ON                             0xFFFFFF7F
+#define   S_0003F8_CRTC2_PIX_WIDTH(x)                  (((x) & 0xF) << 8)
+#define   G_0003F8_CRTC2_PIX_WIDTH(x)                  (((x) >> 8) & 0xF)
+#define   C_0003F8_CRTC2_PIX_WIDTH                     0xFFFFF0FF
+#define   S_0003F8_CRTC2_ICON_EN(x)                    (((x) & 0x1) << 15)
+#define   G_0003F8_CRTC2_ICON_EN(x)                    (((x) >> 15) & 0x1)
+#define   C_0003F8_CRTC2_ICON_EN                       0xFFFF7FFF
+#define   S_0003F8_CRTC2_CUR_EN(x)                     (((x) & 0x1) << 16)
+#define   G_0003F8_CRTC2_CUR_EN(x)                     (((x) >> 16) & 0x1)
+#define   C_0003F8_CRTC2_CUR_EN                        0xFFFEFFFF
+#define   S_0003F8_CRTC2_CUR_MODE(x)                   (((x) & 0x7) << 20)
+#define   G_0003F8_CRTC2_CUR_MODE(x)                   (((x) >> 20) & 0x7)
+#define   C_0003F8_CRTC2_CUR_MODE                      0xFF8FFFFF
+#define   S_0003F8_CRTC2_DISPLAY_DIS(x)                (((x) & 0x1) << 23)
+#define   G_0003F8_CRTC2_DISPLAY_DIS(x)                (((x) >> 23) & 0x1)
+#define   C_0003F8_CRTC2_DISPLAY_DIS                   0xFF7FFFFF
+#define   S_0003F8_CRTC2_EN(x)                         (((x) & 0x1) << 25)
+#define   G_0003F8_CRTC2_EN(x)                         (((x) >> 25) & 0x1)
+#define   C_0003F8_CRTC2_EN                            0xFDFFFFFF
+#define   S_0003F8_CRTC2_DISP_REQ_EN_B(x)              (((x) & 0x1) << 26)
+#define   G_0003F8_CRTC2_DISP_REQ_EN_B(x)              (((x) >> 26) & 0x1)
+#define   C_0003F8_CRTC2_DISP_REQ_EN_B                 0xFBFFFFFF
+#define   S_0003F8_CRTC2_C_SYNC_EN(x)                  (((x) & 0x1) << 27)
+#define   G_0003F8_CRTC2_C_SYNC_EN(x)                  (((x) >> 27) & 0x1)
+#define   C_0003F8_CRTC2_C_SYNC_EN                     0xF7FFFFFF
+#define   S_0003F8_CRTC2_HSYNC_DIS(x)                  (((x) & 0x1) << 28)
+#define   G_0003F8_CRTC2_HSYNC_DIS(x)                  (((x) >> 28) & 0x1)
+#define   C_0003F8_CRTC2_HSYNC_DIS                     0xEFFFFFFF
+#define   S_0003F8_CRTC2_VSYNC_DIS(x)                  (((x) & 0x1) << 29)
+#define   G_0003F8_CRTC2_VSYNC_DIS(x)                  (((x) >> 29) & 0x1)
+#define   C_0003F8_CRTC2_VSYNC_DIS                     0xDFFFFFFF
+#define R_000420_OV0_SCALE_CNTL                      0x000420
+#define   S_000420_OV0_NO_READ_BEHIND_SCAN(x)          (((x) & 0x1) << 1)
+#define   G_000420_OV0_NO_READ_BEHIND_SCAN(x)          (((x) >> 1) & 0x1)
+#define   C_000420_OV0_NO_READ_BEHIND_SCAN             0xFFFFFFFD
+#define   S_000420_OV0_HORZ_PICK_NEAREST(x)            (((x) & 0x1) << 2)
+#define   G_000420_OV0_HORZ_PICK_NEAREST(x)            (((x) >> 2) & 0x1)
+#define   C_000420_OV0_HORZ_PICK_NEAREST               0xFFFFFFFB
+#define   S_000420_OV0_VERT_PICK_NEAREST(x)            (((x) & 0x1) << 3)
+#define   G_000420_OV0_VERT_PICK_NEAREST(x)            (((x) >> 3) & 0x1)
+#define   C_000420_OV0_VERT_PICK_NEAREST               0xFFFFFFF7
+#define   S_000420_OV0_SIGNED_UV(x)                    (((x) & 0x1) << 4)
+#define   G_000420_OV0_SIGNED_UV(x)                    (((x) >> 4) & 0x1)
+#define   C_000420_OV0_SIGNED_UV                       0xFFFFFFEF
+#define   S_000420_OV0_GAMMA_SEL(x)                    (((x) & 0x7) << 5)
+#define   G_000420_OV0_GAMMA_SEL(x)                    (((x) >> 5) & 0x7)
+#define   C_000420_OV0_GAMMA_SEL                       0xFFFFFF1F
+#define   S_000420_OV0_SURFACE_FORMAT(x)               (((x) & 0xF) << 8)
+#define   G_000420_OV0_SURFACE_FORMAT(x)               (((x) >> 8) & 0xF)
+#define   C_000420_OV0_SURFACE_FORMAT                  0xFFFFF0FF
+#define   S_000420_OV0_ADAPTIVE_DEINT(x)               (((x) & 0x1) << 12)
+#define   G_000420_OV0_ADAPTIVE_DEINT(x)               (((x) >> 12) & 0x1)
+#define   C_000420_OV0_ADAPTIVE_DEINT                  0xFFFFEFFF
+#define   S_000420_OV0_CRTC_SEL(x)                     (((x) & 0x1) << 14)
+#define   G_000420_OV0_CRTC_SEL(x)                     (((x) >> 14) & 0x1)
+#define   C_000420_OV0_CRTC_SEL                        0xFFFFBFFF
+#define   S_000420_OV0_BURST_PER_PLANE(x)              (((x) & 0x7F) << 16)
+#define   G_000420_OV0_BURST_PER_PLANE(x)              (((x) >> 16) & 0x7F)
+#define   C_000420_OV0_BURST_PER_PLANE                 0xFF80FFFF
+#define   S_000420_OV0_DOUBLE_BUFFER_REGS(x)           (((x) & 0x1) << 24)
+#define   G_000420_OV0_DOUBLE_BUFFER_REGS(x)           (((x) >> 24) & 0x1)
+#define   C_000420_OV0_DOUBLE_BUFFER_REGS              0xFEFFFFFF
+#define   S_000420_OV0_BANDWIDTH(x)                    (((x) & 0x1) << 26)
+#define   G_000420_OV0_BANDWIDTH(x)                    (((x) >> 26) & 0x1)
+#define   C_000420_OV0_BANDWIDTH                       0xFBFFFFFF
+#define   S_000420_OV0_LIN_TRANS_BYPASS(x)             (((x) & 0x1) << 28)
+#define   G_000420_OV0_LIN_TRANS_BYPASS(x)             (((x) >> 28) & 0x1)
+#define   C_000420_OV0_LIN_TRANS_BYPASS                0xEFFFFFFF
+#define   S_000420_OV0_INT_EMU(x)                      (((x) & 0x1) << 29)
+#define   G_000420_OV0_INT_EMU(x)                      (((x) >> 29) & 0x1)
+#define   C_000420_OV0_INT_EMU                         0xDFFFFFFF
+#define   S_000420_OV0_OVERLAY_EN(x)                   (((x) & 0x1) << 30)
+#define   G_000420_OV0_OVERLAY_EN(x)                   (((x) >> 30) & 0x1)
+#define   C_000420_OV0_OVERLAY_EN                      0xBFFFFFFF
+#define   S_000420_OV0_SOFT_RESET(x)                   (((x) & 0x1) << 31)
+#define   G_000420_OV0_SOFT_RESET(x)                   (((x) >> 31) & 0x1)
+#define   C_000420_OV0_SOFT_RESET                      0x7FFFFFFF
+#define R_00070C_CP_RB_RPTR_ADDR                     0x00070C
+#define   S_00070C_RB_RPTR_SWAP(x)                     (((x) & 0x3) << 0)
+#define   G_00070C_RB_RPTR_SWAP(x)                     (((x) >> 0) & 0x3)
+#define   C_00070C_RB_RPTR_SWAP                        0xFFFFFFFC
+#define   S_00070C_RB_RPTR_ADDR(x)                     (((x) & 0x3FFFFFFF) << 2)
+#define   G_00070C_RB_RPTR_ADDR(x)                     (((x) >> 2) & 0x3FFFFFFF)
+#define   C_00070C_RB_RPTR_ADDR                        0x00000003
+#define R_000740_CP_CSQ_CNTL                         0x000740
+#define   S_000740_CSQ_CNT_PRIMARY(x)                  (((x) & 0xFF) << 0)
+#define   G_000740_CSQ_CNT_PRIMARY(x)                  (((x) >> 0) & 0xFF)
+#define   C_000740_CSQ_CNT_PRIMARY                     0xFFFFFF00
+#define   S_000740_CSQ_CNT_INDIRECT(x)                 (((x) & 0xFF) << 8)
+#define   G_000740_CSQ_CNT_INDIRECT(x)                 (((x) >> 8) & 0xFF)
+#define   C_000740_CSQ_CNT_INDIRECT                    0xFFFF00FF
+#define   S_000740_CSQ_MODE(x)                         (((x) & 0xF) << 28)
+#define   G_000740_CSQ_MODE(x)                         (((x) >> 28) & 0xF)
+#define   C_000740_CSQ_MODE                            0x0FFFFFFF
+#define R_000770_SCRATCH_UMSK                        0x000770
+#define   S_000770_SCRATCH_UMSK(x)                     (((x) & 0x3F) << 0)
+#define   G_000770_SCRATCH_UMSK(x)                     (((x) >> 0) & 0x3F)
+#define   C_000770_SCRATCH_UMSK                        0xFFFFFFC0
+#define   S_000770_SCRATCH_SWAP(x)                     (((x) & 0x3) << 16)
+#define   G_000770_SCRATCH_SWAP(x)                     (((x) >> 16) & 0x3)
+#define   C_000770_SCRATCH_SWAP                        0xFFFCFFFF
+#define R_000774_SCRATCH_ADDR                        0x000774
+#define   S_000774_SCRATCH_ADDR(x)                     (((x) & 0x7FFFFFF) << 5)
+#define   G_000774_SCRATCH_ADDR(x)                     (((x) >> 5) & 0x7FFFFFF)
+#define   C_000774_SCRATCH_ADDR                        0x0000001F
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_SE_BUSY(x)                          (((x) & 0x1) << 20)
+#define   G_000E40_SE_BUSY(x)                          (((x) >> 20) & 0x1)
+#define   C_000E40_SE_BUSY                             0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_TCLK_SRC_SEL(x)                     (((x) & 0x7) << 8)
+#define   G_00000D_TCLK_SRC_SEL(x)                     (((x) >> 8) & 0x7)
+#define   C_00000D_TCLK_SRC_SEL                        0xFFFFF8FF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP(x)                       (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP(x)                       (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP                          0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+
+/* PLL regs */
+#define SCLK_CNTL                                      0xd
+#define   FORCE_HDP                                    (1 << 17)
+#define CLK_PWRMGT_CNTL                                0x14
+#define   GLOBAL_PMAN_EN                               (1 << 10)
+#define   DISP_PM                                      (1 << 20)
+#define PLL_PWRMGT_CNTL                                0x15
+#define   MPLL_TURNOFF                                 (1 << 0)
+#define   SPLL_TURNOFF                                 (1 << 1)
+#define   PPLL_TURNOFF                                 (1 << 2)
+#define   P2PLL_TURNOFF                                (1 << 3)
+#define   TVPLL_TURNOFF                                (1 << 4)
+#define   MOBILE_SU                                    (1 << 16)
+#define   SU_SCLK_USE_BCLK                             (1 << 17)
+#define SCLK_CNTL2                                     0x1e
+#define   REDUCED_SPEED_SCLK_MODE                      (1 << 16)
+#define   REDUCED_SPEED_SCLK_SEL(x)                    ((x) << 17)
+#define MCLK_MISC                                      0x1f
+#define   EN_MCLK_TRISTATE_IN_SUSPEND                  (1 << 18)
+#define SCLK_MORE_CNTL                                 0x35
+#define   REDUCED_SPEED_SCLK_EN                        (1 << 16)
+#define   IO_CG_VOLTAGE_DROP                           (1 << 17)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 20)
+#define   VOLTAGE_DROP_SYNC                            (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN                                   0xd08
+#define   DISP_D3_GRPH_RST                             (1 << 18)
+#define   DISP_D3_SUBPIC_RST                           (1 << 19)
+#define   DISP_D3_OV0_RST                              (1 << 20)
+#define   DISP_D1D2_GRPH_RST                           (1 << 21)
+#define   DISP_D1D2_SUBPIC_RST                         (1 << 22)
+#define   DISP_D1D2_OV0_RST                            (1 << 23)
+#define   DISP_DVO_ENABLE_RST                          (1 << 24)
+#define   TV_ENABLE_RST                                (1 << 25)
+#define   AUTO_PWRUP_EN                                (1 << 26)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
new file mode 100644
index 0000000..c22321c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+
+#include "r100d.h"
+#include "r200_reg_safe.h"
+
+#include "r100_track.h"
+
+static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
+{
+	int vtx_size, i;
+	vtx_size = 2;
+
+	if (vtx_fmt_0 & R200_VTX_Z0)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_W0)
+		vtx_size++;
+	/* blend weight */
+	if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
+		vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
+	if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_N0)
+		vtx_size += 3;
+	if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_SHININESS_0)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_SHININESS_1)
+		vtx_size++;
+	for (i = 0; i < 8; i++) {
+		int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
+		switch (color_size) {
+		case 0: break;
+		case 1: vtx_size++; break;
+		case 2: vtx_size += 3; break;
+		case 3: vtx_size += 4; break;
+		}
+	}
+	if (vtx_fmt_0 & R200_VTX_XY1)
+		vtx_size += 2;
+	if (vtx_fmt_0 & R200_VTX_Z1)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_W1)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_N1)
+		vtx_size += 3;
+	return vtx_size;
+}
+
+struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
+				   uint64_t src_offset,
+				   uint64_t dst_offset,
+				   unsigned num_gpu_pages,
+				   struct reservation_object *resv)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	struct radeon_fence *fence;
+	uint32_t size;
+	uint32_t cur_size;
+	int i, num_loops;
+	int r = 0;
+
+	/* radeon pitch is /64 */
+	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
+	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return ERR_PTR(r);
+	}
+	/* Must wait for 2D idle & clean before DMA or hangs might happen */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, (1 << 16));
+	for (i = 0; i < num_loops; i++) {
+		cur_size = size;
+		if (cur_size > 0x1FFFFF) {
+			cur_size = 0x1FFFFF;
+		}
+		size -= cur_size;
+		radeon_ring_write(ring, PACKET0(0x720, 2));
+		radeon_ring_write(ring, src_offset);
+		radeon_ring_write(ring, dst_offset);
+		radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
+		src_offset += cur_size;
+		dst_offset += cur_size;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
+	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return ERR_PTR(r);
+	}
+	radeon_ring_unlock_commit(rdev, ring, false);
+	return fence;
+}
+
+
+static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
+{
+	int vtx_size, i, tex_size;
+	vtx_size = 0;
+	for (i = 0; i < 6; i++) {
+		tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
+		if (tex_size > 4)
+			continue;
+		vtx_size += tex_size;
+	}
+	return vtx_size;
+}
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+		       struct radeon_cs_packet *pkt,
+		       unsigned idx, unsigned reg)
+{
+	struct radeon_bo_list *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp;
+	int r;
+	int i;
+	int face;
+	u32 tile_flags = 0;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	idx_value = radeon_get_ib_value(p, idx);
+	switch (reg) {
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+		/* FIXME: only allow PACKET3 blit? easier to check for out of
+		 * range access */
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case RADEON_RB3D_DEPTHOFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case RADEON_RB3D_COLOROFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[0].robj = reloc->robj;
+		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case R200_PP_TXOFFSET_0:
+	case R200_PP_TXOFFSET_1:
+	case R200_PP_TXOFFSET_2:
+	case R200_PP_TXOFFSET_3:
+	case R200_PP_TXOFFSET_4:
+	case R200_PP_TXOFFSET_5:
+		i = (reg - R200_PP_TXOFFSET_0) / 24;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R200_TXO_MACRO_TILE;
+			if (reloc->tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R200_TXO_MICRO_TILE;
+
+			tmp = idx_value & ~(0x7 << 2);
+			tmp |= tile_flags;
+			ib[idx] = tmp + ((u32)reloc->gpu_offset);
+		} else
+			ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_CUBIC_OFFSET_F1_0:
+	case R200_PP_CUBIC_OFFSET_F2_0:
+	case R200_PP_CUBIC_OFFSET_F3_0:
+	case R200_PP_CUBIC_OFFSET_F4_0:
+	case R200_PP_CUBIC_OFFSET_F5_0:
+	case R200_PP_CUBIC_OFFSET_F1_1:
+	case R200_PP_CUBIC_OFFSET_F2_1:
+	case R200_PP_CUBIC_OFFSET_F3_1:
+	case R200_PP_CUBIC_OFFSET_F4_1:
+	case R200_PP_CUBIC_OFFSET_F5_1:
+	case R200_PP_CUBIC_OFFSET_F1_2:
+	case R200_PP_CUBIC_OFFSET_F2_2:
+	case R200_PP_CUBIC_OFFSET_F3_2:
+	case R200_PP_CUBIC_OFFSET_F4_2:
+	case R200_PP_CUBIC_OFFSET_F5_2:
+	case R200_PP_CUBIC_OFFSET_F1_3:
+	case R200_PP_CUBIC_OFFSET_F2_3:
+	case R200_PP_CUBIC_OFFSET_F3_3:
+	case R200_PP_CUBIC_OFFSET_F4_3:
+	case R200_PP_CUBIC_OFFSET_F5_3:
+	case R200_PP_CUBIC_OFFSET_F1_4:
+	case R200_PP_CUBIC_OFFSET_F2_4:
+	case R200_PP_CUBIC_OFFSET_F3_4:
+	case R200_PP_CUBIC_OFFSET_F4_4:
+	case R200_PP_CUBIC_OFFSET_F5_4:
+	case R200_PP_CUBIC_OFFSET_F1_5:
+	case R200_PP_CUBIC_OFFSET_F2_5:
+	case R200_PP_CUBIC_OFFSET_F3_5:
+	case R200_PP_CUBIC_OFFSET_F4_5:
+	case R200_PP_CUBIC_OFFSET_F5_5:
+		i = (reg - R200_PP_TXOFFSET_0) / 24;
+		face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[i].cube_info[face - 1].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		track->textures[i].cube_info[face - 1].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_RE_WIDTH_HEIGHT:
+		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_COLORPITCH:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_COLOR_TILE_ENABLE;
+			if (reloc->tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		} else
+			ib[idx] = idx_value;
+
+		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
+		break;
+	case RADEON_RB3D_DEPTHPITCH:
+		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_CNTL:
+		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+		case 7:
+		case 8:
+		case 9:
+		case 11:
+		case 12:
+			track->cb[0].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 15:
+			track->cb[0].cpp = 2;
+			break;
+		case 6:
+			track->cb[0].cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+			return -EINVAL;
+		}
+		if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
+			DRM_ERROR("No support for depth xy offset in kms\n");
+			return -EINVAL;
+		}
+
+		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZSTENCILCNTL:
+		switch (idx_value & 0xf) {
+		case 0:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+		case 3:
+		case 4:
+		case 5:
+		case 9:
+		case 11:
+			track->zb.cpp = 4;
+			break;
+		default:
+			break;
+		}
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZPASS_ADDR:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case RADEON_PP_CNTL:
+		{
+			uint32_t temp = idx_value >> 4;
+			for (i = 0; i < track->num_texture; i++)
+				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
+		}
+		break;
+	case RADEON_SE_VF_CNTL:
+		track->vap_vf_cntl = idx_value;
+		break;
+	case 0x210c:
+		/* VAP_VF_MAX_VTX_INDX */
+		track->max_indx = idx_value & 0x00FFFFFFUL;
+		break;
+	case R200_SE_VTX_FMT_0:
+		track->vtx_size = r200_get_vtx_size_0(idx_value);
+		break;
+	case R200_SE_VTX_FMT_1:
+		track->vtx_size += r200_get_vtx_size_1(idx_value);
+		break;
+	case R200_PP_TXSIZE_0:
+	case R200_PP_TXSIZE_1:
+	case R200_PP_TXSIZE_2:
+	case R200_PP_TXSIZE_3:
+	case R200_PP_TXSIZE_4:
+	case R200_PP_TXSIZE_5:
+		i = (reg - R200_PP_TXSIZE_0) / 32;
+		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXPITCH_0:
+	case R200_PP_TXPITCH_1:
+	case R200_PP_TXPITCH_2:
+	case R200_PP_TXPITCH_3:
+	case R200_PP_TXPITCH_4:
+	case R200_PP_TXPITCH_5:
+		i = (reg - R200_PP_TXPITCH_0) / 32;
+		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXFILTER_0:
+	case R200_PP_TXFILTER_1:
+	case R200_PP_TXFILTER_2:
+	case R200_PP_TXFILTER_3:
+	case R200_PP_TXFILTER_4:
+	case R200_PP_TXFILTER_5:
+		i = (reg - R200_PP_TXFILTER_0) / 32;
+		track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
+						 >> R200_MAX_MIP_LEVEL_SHIFT);
+		tmp = (idx_value >> 23) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_w = false;
+		tmp = (idx_value >> 27) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXMULTI_CTL_0:
+	case R200_PP_TXMULTI_CTL_1:
+	case R200_PP_TXMULTI_CTL_2:
+	case R200_PP_TXMULTI_CTL_3:
+	case R200_PP_TXMULTI_CTL_4:
+	case R200_PP_TXMULTI_CTL_5:
+		i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
+		break;
+	case R200_PP_TXFORMAT_X_0:
+	case R200_PP_TXFORMAT_X_1:
+	case R200_PP_TXFORMAT_X_2:
+	case R200_PP_TXFORMAT_X_3:
+	case R200_PP_TXFORMAT_X_4:
+	case R200_PP_TXFORMAT_X_5:
+		i = (reg - R200_PP_TXFORMAT_X_0) / 32;
+		track->textures[i].txdepth = idx_value & 0x7;
+		tmp = (idx_value >> 16) & 0x3;
+		/* 2D, 3D, CUBE */
+		switch (tmp) {
+		case 0:
+		case 3:
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+			/* 1D/2D */
+			track->textures[i].tex_coord_type = 0;
+			break;
+		case 1:
+			/* CUBE */
+			track->textures[i].tex_coord_type = 2;
+			break;
+		case 2:
+			/* 3D */
+			track->textures[i].tex_coord_type = 1;
+			break;
+		}
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXFORMAT_0:
+	case R200_PP_TXFORMAT_1:
+	case R200_PP_TXFORMAT_2:
+	case R200_PP_TXFORMAT_3:
+	case R200_PP_TXFORMAT_4:
+	case R200_PP_TXFORMAT_5:
+		i = (reg - R200_PP_TXFORMAT_0) / 32;
+		if (idx_value & R200_TXFORMAT_NON_POWER2) {
+			track->textures[i].use_pitch = 1;
+		} else {
+			track->textures[i].use_pitch = 0;
+			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+		}
+		if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+			track->textures[i].lookup_disable = true;
+		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+		case R200_TXFORMAT_I8:
+		case R200_TXFORMAT_RGB332:
+		case R200_TXFORMAT_Y8:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_AI88:
+		case R200_TXFORMAT_ARGB1555:
+		case R200_TXFORMAT_RGB565:
+		case R200_TXFORMAT_ARGB4444:
+		case R200_TXFORMAT_VYUY422:
+		case R200_TXFORMAT_YVYU422:
+		case R200_TXFORMAT_LDVDU655:
+		case R200_TXFORMAT_DVDU88:
+		case R200_TXFORMAT_AVYU4444:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_ARGB8888:
+		case R200_TXFORMAT_RGBA8888:
+		case R200_TXFORMAT_ABGR8888:
+		case R200_TXFORMAT_BGR111110:
+		case R200_TXFORMAT_LDVDU8888:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case R200_TXFORMAT_DXT23:
+		case R200_TXFORMAT_DXT45:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		}
+		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
+		break;
+	case R200_PP_CUBIC_FACES_0:
+	case R200_PP_CUBIC_FACES_1:
+	case R200_PP_CUBIC_FACES_2:
+	case R200_PP_CUBIC_FACES_3:
+	case R200_PP_CUBIC_FACES_4:
+	case R200_PP_CUBIC_FACES_5:
+		tmp = idx_value;
+		i = (reg - R200_PP_CUBIC_FACES_0) / 32;
+		for (face = 0; face < 4; face++) {
+			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+		}
+		track->tex_dirty = true;
+		break;
+	default:
+		pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void r200_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
+	rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
+}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
new file mode 100644
index 0000000..21161aa
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -0,0 +1,1593 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "r100_track.h"
+#include "r300d.h"
+#include "rv350d.h"
+#include "r300_reg_safe.h"
+
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ *   using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ *   However, scheduling such write to the ring seems harmless, i suspect
+ *   the CP read collide with the flush somehow, or maybe the MC, hard to
+ *   tell. (Jerome Glisse)
+ */
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	uint32_t r;
+
+	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
+	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+	r = RREG32(RADEON_PCIE_DATA);
+	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
+	return r;
+}
+
+void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
+	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+	WREG32(RADEON_PCIE_DATA, (v));
+	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
+}
+
+/*
+ * rv370,rv380 PCIE GART
+ */
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	int i;
+
+	/* Workaround HW bug do flush 2 times */
+	for (i = 0; i < 2; i++) {
+		tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
+		(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	}
+	mb();
+}
+
+#define R300_PTE_UNSNOOPED (1 << 0)
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE  (1 << 3)
+
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+	addr = (lower_32_bits(addr) >> 8) |
+		((upper_32_bits(addr) & 0xff) << 24);
+	if (flags & RADEON_GART_PAGE_READ)
+		addr |= R300_PTE_READABLE;
+	if (flags & RADEON_GART_PAGE_WRITE)
+		addr |= R300_PTE_WRITEABLE;
+	if (!(flags & RADEON_GART_PAGE_SNOOP))
+		addr |= R300_PTE_UNSNOOPED;
+	return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+			      uint64_t entry)
+{
+	void __iomem *ptr = rdev->gart.ptr;
+
+	/* on x86 we want this to be CPU endian, on powerpc
+	 * on powerpc without HW swappers, it'll get swapped on way
+	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
+	writel(entry, ((void __iomem *)ptr) + (i * 4));
+}
+
+int rv370_pcie_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		WARN(1, "RV370 PCIE GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	r = rv370_debugfs_pcie_gart_info_init(rdev);
+	if (r)
+		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+	rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
+	rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+int rv370_pcie_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t table_addr;
+	uint32_t tmp;
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	/* discard memory request outside of configured range */
+	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
+	tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+	table_addr = rdev->gart.table_addr;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
+	/* FIXME: setup default page */
+	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
+	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
+	/* Clear error */
+	WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	tmp |= RADEON_PCIE_TX_GART_EN;
+	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	rv370_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void rv370_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+void rv370_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rv370_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+void r300_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+	/* Who ever call radeon_fence_emit should call ring_lock and ask
+	 * for enough space (today caller are ib schedule and buffer move) */
+	/* Write SC register so SC & US assert idle */
+	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+	radeon_ring_write(ring, 0);
+	/* Flush 3D cache */
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH);
+	/* Wait until IDLE & CLEAN */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
+				 RADEON_WAIT_2D_IDLECLEAN |
+				 RADEON_WAIT_DMA_GUI_IDLE));
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
+	/* Emit fence sequence & fire IRQ */
+	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	unsigned gb_tile_config;
+	int r;
+
+	/* Sub pixel 1/12 so we can have 4K rendering according to doc */
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+	switch(rdev->num_gb_pipes) {
+	case 2:
+		gb_tile_config |= R300_PIPE_COUNT_R300;
+		break;
+	case 3:
+		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+		break;
+	case 4:
+		gb_tile_config |= R300_PIPE_COUNT_R420;
+		break;
+	case 1:
+	default:
+		gb_tile_config |= R300_PIPE_COUNT_RV350;
+		break;
+	}
+
+	r = radeon_ring_lock(rdev, ring, 64);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  RADEON_ISYNC_ANY2D_IDLE3D |
+			  RADEON_ISYNC_ANY3D_IDLE2D |
+			  RADEON_ISYNC_WAIT_IDLEGUI |
+			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+	radeon_ring_write(ring, gb_tile_config);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+	radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+	radeon_ring_write(ring,
+			  ((6 << R300_MS_X0_SHIFT) |
+			   (6 << R300_MS_Y0_SHIFT) |
+			   (6 << R300_MS_X1_SHIFT) |
+			   (6 << R300_MS_Y1_SHIFT) |
+			   (6 << R300_MS_X2_SHIFT) |
+			   (6 << R300_MS_Y2_SHIFT) |
+			   (6 << R300_MSBD0_Y_SHIFT) |
+			   (6 << R300_MSBD0_X_SHIFT)));
+	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+	radeon_ring_write(ring,
+			  ((6 << R300_MS_X3_SHIFT) |
+			   (6 << R300_MS_Y3_SHIFT) |
+			   (6 << R300_MS_X4_SHIFT) |
+			   (6 << R300_MS_Y4_SHIFT) |
+			   (6 << R300_MS_X5_SHIFT) |
+			   (6 << R300_MS_Y5_SHIFT) |
+			   (6 << R300_MSBD1_SHIFT)));
+	radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+	radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+	radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+	radeon_ring_write(ring,
+			  R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+	radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+	radeon_ring_write(ring,
+			  R300_GEOMETRY_ROUND_NEAREST |
+			  R300_COLOR_ROUND_NEAREST);
+	radeon_ring_unlock_commit(rdev, ring, false);
+}
+
+static void r300_errata(struct radeon_device *rdev)
+{
+	rdev->pll_errata = 0;
+
+	if (rdev->family == CHIP_R300 &&
+	    (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
+		rdev->pll_errata |= CHIP_ERRATA_R300_CG;
+	}
+}
+
+int r300_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & R300_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void r300_gpu_init(struct radeon_device *rdev)
+{
+	uint32_t gb_tile_config, tmp;
+
+	if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
+	    (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
+		/* r300,r350 */
+		rdev->num_gb_pipes = 2;
+	} else {
+		/* rv350,rv370,rv380,r300 AD, r350 AH */
+		rdev->num_gb_pipes = 1;
+	}
+	rdev->num_z_pipes = 1;
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+	switch (rdev->num_gb_pipes) {
+	case 2:
+		gb_tile_config |= R300_PIPE_COUNT_R300;
+		break;
+	case 3:
+		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+		break;
+	case 4:
+		gb_tile_config |= R300_PIPE_COUNT_R420;
+		break;
+	default:
+	case 1:
+		gb_tile_config |= R300_PIPE_COUNT_RV350;
+		break;
+	}
+	WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
+	}
+
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+	WREG32(R300_RB2D_DSTCACHE_MODE,
+	       R300_DC_AUTOFLUSH_ENABLE |
+	       R300_DC_DC_DISABLE_IGNORE_PE);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
+	}
+	if (r300_mc_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
+	}
+	DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized\n",
+		 rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+int r300_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	struct r100_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* resetting the CP seems to be problematic sometimes it end up
+	 * hard locking the computer, but it's necessary for successful
+	 * reset more test & playing is needed on R3XX/R4XX to find a
+	 * reliable (if any solution)
+	 */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	r100_enable_bm(rdev);
+	/* Check if GPU is idle */
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	r100_mc_resume(rdev, &save);
+	return ret;
+}
+
+/*
+ * r300,r350,rv350,rv380 VRAM info
+ */
+void r300_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+	u32 tmp;
+
+	/* DDR for all card after R300 & IGP */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(RADEON_MEM_CNTL);
+	tmp &= R300_MEM_NUM_CHANNELS_MASK;
+	switch (tmp) {
+	case 0: rdev->mc.vram_width = 64; break;
+	case 1: rdev->mc.vram_width = 128; break;
+	case 2: rdev->mc.vram_width = 256; break;
+	default:  rdev->mc.vram_width = 128; break;
+	}
+	r100_vram_init_sizes(rdev);
+	base = rdev->mc.aper_base;
+	if (rdev->flags & RADEON_IS_IGP)
+		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	uint32_t link_width_cntl, mask;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* FIXME wait for idle */
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+	default:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	}
+
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+		return;
+
+	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+			     RADEON_PCIE_LC_RECONFIG_NOW |
+			     RADEON_PCIE_LC_RECONFIG_LATER |
+			     RADEON_PCIE_LC_SHORT_RECONFIG_EN);
+	link_width_cntl |= mask;
+	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+						     RADEON_PCIE_LC_RECONFIG_NOW));
+
+	/* wait for lane set to complete */
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+	while (link_width_cntl == 0xffffffff)
+		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+}
+
+int rv370_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* FIXME wait for idle */
+
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+		return 0;
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
+	seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
+	seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
+	seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
+	seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
+	seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
+	seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rv370_pcie_gart_info_list[] = {
+	{"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
+};
+#endif
+
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static int r300_packet0_check(struct radeon_cs_parser *p,
+		struct radeon_cs_packet *pkt,
+		unsigned idx, unsigned reg)
+{
+	struct radeon_bo_list *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp, tile_flags = 0;
+	unsigned i;
+	int r;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch(reg) {
+	case AVIVO_D1MODE_VLINE_START_END:
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case R300_RB3D_COLOROFFSET0:
+	case R300_RB3D_COLOROFFSET1:
+	case R300_RB3D_COLOROFFSET2:
+	case R300_RB3D_COLOROFFSET3:
+		i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[i].robj = reloc->robj;
+		track->cb[i].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case R300_ZB_DEPTHOFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case R300_TX_OFFSET_0:
+	case R300_TX_OFFSET_0+4:
+	case R300_TX_OFFSET_0+8:
+	case R300_TX_OFFSET_0+12:
+	case R300_TX_OFFSET_0+16:
+	case R300_TX_OFFSET_0+20:
+	case R300_TX_OFFSET_0+24:
+	case R300_TX_OFFSET_0+28:
+	case R300_TX_OFFSET_0+32:
+	case R300_TX_OFFSET_0+36:
+	case R300_TX_OFFSET_0+40:
+	case R300_TX_OFFSET_0+44:
+	case R300_TX_OFFSET_0+48:
+	case R300_TX_OFFSET_0+52:
+	case R300_TX_OFFSET_0+56:
+	case R300_TX_OFFSET_0+60:
+		i = (reg - R300_TX_OFFSET_0) >> 2;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+
+		if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
+			ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+				  ((idx_value & ~31) + (u32)reloc->gpu_offset);
+		} else {
+			if (reloc->tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_TXO_MACRO_TILE;
+			if (reloc->tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_TXO_MICRO_TILE;
+			else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+			tmp = idx_value + ((u32)reloc->gpu_offset);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	/* Tracked registers */
+	case 0x2084:
+		/* VAP_VF_CNTL */
+		track->vap_vf_cntl = idx_value;
+		break;
+	case 0x20B4:
+		/* VAP_VTX_SIZE */
+		track->vtx_size = idx_value & 0x7F;
+		break;
+	case 0x2134:
+		/* VAP_VF_MAX_VTX_INDX */
+		track->max_indx = idx_value & 0x00FFFFFFUL;
+		break;
+	case 0x2088:
+		/* VAP_ALT_NUM_VERTICES - only valid on r500 */
+		if (p->rdev->family < CHIP_RV515)
+			goto fail;
+		track->vap_alt_nverts = idx_value & 0xFFFFFF;
+		break;
+	case 0x43E4:
+		/* SC_SCISSOR1 */
+		track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
+		if (p->rdev->family < CHIP_RV515) {
+			track->maxy -= 1440;
+		}
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case 0x4E00:
+		/* RB3D_CCTL */
+		if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+		    p->rdev->cmask_filp != p->filp) {
+			DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+			return -EINVAL;
+		}
+		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+		track->cb_dirty = true;
+		break;
+	case 0x4E38:
+	case 0x4E3C:
+	case 0x4E40:
+	case 0x4E44:
+		/* RB3D_COLORPITCH0 */
+		/* RB3D_COLORPITCH1 */
+		/* RB3D_COLORPITCH2 */
+		/* RB3D_COLORPITCH3 */
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					  idx, reg);
+				radeon_cs_dump_packet(p, pkt);
+				return r;
+			}
+
+			if (reloc->tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_COLOR_TILE_ENABLE;
+			if (reloc->tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+			else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		i = (reg - 0x4E38) >> 2;
+		track->cb[i].pitch = idx_value & 0x3FFE;
+		switch (((idx_value >> 21) & 0xF)) {
+		case 9:
+		case 11:
+		case 12:
+			track->cb[i].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 13:
+		case 15:
+			track->cb[i].cpp = 2;
+			break;
+		case 5:
+			if (p->rdev->family < CHIP_RV515) {
+				DRM_ERROR("Invalid color buffer format (%d)!\n",
+					  ((idx_value >> 21) & 0xF));
+				return -EINVAL;
+			}
+			/* Pass through. */
+		case 6:
+			track->cb[i].cpp = 4;
+			break;
+		case 10:
+			track->cb[i].cpp = 8;
+			break;
+		case 7:
+			track->cb[i].cpp = 16;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> 21) & 0xF));
+			return -EINVAL;
+		}
+		track->cb_dirty = true;
+		break;
+	case 0x4F00:
+		/* ZB_CNTL */
+		if (idx_value & 2) {
+			track->z_enabled = true;
+		} else {
+			track->z_enabled = false;
+		}
+		track->zb_dirty = true;
+		break;
+	case 0x4F10:
+		/* ZB_FORMAT */
+		switch ((idx_value & 0xF)) {
+		case 0:
+		case 1:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+			track->zb.cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid z buffer format (%d) !\n",
+				  (idx_value & 0xF));
+			return -EINVAL;
+		}
+		track->zb_dirty = true;
+		break;
+	case 0x4F24:
+		/* ZB_DEPTHPITCH */
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					  idx, reg);
+				radeon_cs_dump_packet(p, pkt);
+				return r;
+			}
+
+			if (reloc->tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+			if (reloc->tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_DEPTHMICROTILE_TILED;
+			else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		track->zb.pitch = idx_value & 0x3FFC;
+		track->zb_dirty = true;
+		break;
+	case 0x4104:
+		/* TX_ENABLE */
+		for (i = 0; i < 16; i++) {
+			bool enabled;
+
+			enabled = !!(idx_value & (1 << i));
+			track->textures[i].enabled = enabled;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x44C0:
+	case 0x44C4:
+	case 0x44C8:
+	case 0x44CC:
+	case 0x44D0:
+	case 0x44D4:
+	case 0x44D8:
+	case 0x44DC:
+	case 0x44E0:
+	case 0x44E4:
+	case 0x44E8:
+	case 0x44EC:
+	case 0x44F0:
+	case 0x44F4:
+	case 0x44F8:
+	case 0x44FC:
+		/* TX_FORMAT1_[0-15] */
+		i = (reg - 0x44C0) >> 2;
+		tmp = (idx_value >> 25) & 0x3;
+		track->textures[i].tex_coord_type = tmp;
+		switch ((idx_value & 0x1F)) {
+		case R300_TX_FORMAT_X8:
+		case R300_TX_FORMAT_Y4X4:
+		case R300_TX_FORMAT_Z3Y3X2:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_X16:
+		case R300_TX_FORMAT_FL_I16:
+		case R300_TX_FORMAT_Y8X8:
+		case R300_TX_FORMAT_Z5Y6X5:
+		case R300_TX_FORMAT_Z6Y5X5:
+		case R300_TX_FORMAT_W4Z4Y4X4:
+		case R300_TX_FORMAT_W1Z5Y5X5:
+		case R300_TX_FORMAT_D3DMFT_CxV8U8:
+		case R300_TX_FORMAT_B8G8_B8G8:
+		case R300_TX_FORMAT_G8R8_G8B8:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_Y16X16:
+		case R300_TX_FORMAT_FL_I16A16:
+		case R300_TX_FORMAT_Z11Y11X10:
+		case R300_TX_FORMAT_Z10Y11X11:
+		case R300_TX_FORMAT_W8Z8Y8X8:
+		case R300_TX_FORMAT_W2Z10Y10X10:
+		case 0x17:
+		case R300_TX_FORMAT_FL_I32:
+		case 0x1e:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_W16Z16Y16X16:
+		case R300_TX_FORMAT_FL_R16G16B16A16:
+		case R300_TX_FORMAT_FL_I32A32:
+			track->textures[i].cpp = 8;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_FL_R32G32B32A32:
+			track->textures[i].cpp = 16;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case R300_TX_FORMAT_ATI2N:
+			if (p->rdev->family < CHIP_R420) {
+				DRM_ERROR("Invalid texture format %u\n",
+					  (idx_value & 0x1F));
+				return -EINVAL;
+			}
+			/* The same rules apply as for DXT3/5. */
+			/* Pass through. */
+		case R300_TX_FORMAT_DXT3:
+		case R300_TX_FORMAT_DXT5:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+			break;
+		default:
+			DRM_ERROR("Invalid texture format %u\n",
+				  (idx_value & 0x1F));
+			return -EINVAL;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4400:
+	case 0x4404:
+	case 0x4408:
+	case 0x440C:
+	case 0x4410:
+	case 0x4414:
+	case 0x4418:
+	case 0x441C:
+	case 0x4420:
+	case 0x4424:
+	case 0x4428:
+	case 0x442C:
+	case 0x4430:
+	case 0x4434:
+	case 0x4438:
+	case 0x443C:
+		/* TX_FILTER0_[0-15] */
+		i = (reg - 0x4400) >> 2;
+		tmp = idx_value & 0x7;
+		if (tmp == 2 || tmp == 4 || tmp == 6) {
+			track->textures[i].roundup_w = false;
+		}
+		tmp = (idx_value >> 3) & 0x7;
+		if (tmp == 2 || tmp == 4 || tmp == 6) {
+			track->textures[i].roundup_h = false;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4500:
+	case 0x4504:
+	case 0x4508:
+	case 0x450C:
+	case 0x4510:
+	case 0x4514:
+	case 0x4518:
+	case 0x451C:
+	case 0x4520:
+	case 0x4524:
+	case 0x4528:
+	case 0x452C:
+	case 0x4530:
+	case 0x4534:
+	case 0x4538:
+	case 0x453C:
+		/* TX_FORMAT2_[0-15] */
+		i = (reg - 0x4500) >> 2;
+		tmp = idx_value & 0x3FFF;
+		track->textures[i].pitch = tmp + 1;
+		if (p->rdev->family >= CHIP_RV515) {
+			tmp = ((idx_value >> 15) & 1) << 11;
+			track->textures[i].width_11 = tmp;
+			tmp = ((idx_value >> 16) & 1) << 11;
+			track->textures[i].height_11 = tmp;
+
+			/* ATI1N */
+			if (idx_value & (1 << 14)) {
+				/* The same rules apply as for DXT1. */
+				track->textures[i].compress_format =
+					R100_TRACK_COMP_DXT1;
+			}
+		} else if (idx_value & (1 << 14)) {
+			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+			return -EINVAL;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4480:
+	case 0x4484:
+	case 0x4488:
+	case 0x448C:
+	case 0x4490:
+	case 0x4494:
+	case 0x4498:
+	case 0x449C:
+	case 0x44A0:
+	case 0x44A4:
+	case 0x44A8:
+	case 0x44AC:
+	case 0x44B0:
+	case 0x44B4:
+	case 0x44B8:
+	case 0x44BC:
+		/* TX_FORMAT0_[0-15] */
+		i = (reg - 0x4480) >> 2;
+		tmp = idx_value & 0x7FF;
+		track->textures[i].width = tmp + 1;
+		tmp = (idx_value >> 11) & 0x7FF;
+		track->textures[i].height = tmp + 1;
+		tmp = (idx_value >> 26) & 0xF;
+		track->textures[i].num_levels = tmp;
+		tmp = idx_value & (1 << 31);
+		track->textures[i].use_pitch = !!tmp;
+		tmp = (idx_value >> 22) & 0xF;
+		track->textures[i].txdepth = tmp;
+		track->tex_dirty = true;
+		break;
+	case R300_ZB_ZPASS_ADDR:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case 0x4e0c:
+		/* RB3D_COLOR_CHANNEL_MASK */
+		track->color_channel_mask = idx_value;
+		track->cb_dirty = true;
+		break;
+	case 0x43a4:
+		/* SC_HYPERZ_EN */
+		/* r300c emits this register - we need to disable hyperz for it
+		 * without complaining */
+		if (p->rdev->hyperz_filp != p->filp) {
+			if (idx_value & 0x1)
+				ib[idx] = idx_value & ~1;
+		}
+		break;
+	case 0x4f1c:
+		/* ZB_BW_CNTL */
+		track->zb_cb_clear = !!(idx_value & (1 << 5));
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		if (p->rdev->hyperz_filp != p->filp) {
+			if (idx_value & (R300_HIZ_ENABLE |
+					 R300_RD_COMP_ENABLE |
+					 R300_WR_COMP_ENABLE |
+					 R300_FAST_FILL_ENABLE))
+				goto fail;
+		}
+		break;
+	case 0x4e04:
+		/* RB3D_BLENDCNTL */
+		track->blend_read_enable = !!(idx_value & (1 << 2));
+		track->cb_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_OFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->aa.robj = reloc->robj;
+		track->aa.offset = idx_value;
+		track->aa_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+		break;
+	case R300_RB3D_AARESOLVE_PITCH:
+		track->aa.pitch = idx_value & 0x3FFE;
+		track->aa_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_CTL:
+		track->aaresolve = idx_value & 0x1;
+		track->aa_dirty = true;
+		break;
+	case 0x4f30: /* ZB_MASK_OFFSET */
+	case 0x4f34: /* ZB_ZMASK_PITCH */
+	case 0x4f44: /* ZB_HIZ_OFFSET */
+	case 0x4f54: /* ZB_HIZ_PITCH */
+		if (idx_value && (p->rdev->hyperz_filp != p->filp))
+			goto fail;
+		break;
+	case 0x4028:
+		if (idx_value && (p->rdev->hyperz_filp != p->filp))
+			goto fail;
+		/* GB_Z_PEQ_CONFIG */
+		if (p->rdev->family >= CHIP_RV350)
+			break;
+		goto fail;
+		break;
+	case 0x4be8:
+		/* valid register only on RV530 */
+		if (p->rdev->family == CHIP_RV530)
+			break;
+		/* fallthrough do not move */
+	default:
+		goto fail;
+	}
+	return 0;
+fail:
+	pr_err("Forbidden register 0x%04X in cs at %d (val=%08x)\n",
+	       reg, idx, idx_value);
+	return -EINVAL;
+}
+
+static int r300_packet3_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt)
+{
+	struct radeon_bo_list *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	unsigned idx;
+	int r;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	track = (struct r100_cs_track *)p->track;
+	switch(pkt->opcode) {
+	case PACKET3_3D_LOAD_VBPNTR:
+		r = r100_packet3_load_vbpntr(p, pkt, idx);
+		if (r)
+			return r;
+		break;
+	case PACKET3_INDX_BUFFER:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
+		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+		if (r) {
+			return r;
+		}
+		break;
+	/* Draw packet */
+	case PACKET3_3D_DRAW_IMMD:
+		/* Number of dwords is vtx_size * (num_vertices - 1)
+		 * PRIM_WALK must be equal to 3 vertex data in embedded
+		 * in cmd stream */
+		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_IMMD_2:
+		/* Number of dwords is vtx_size * (num_vertices - 1)
+		 * PRIM_WALK must be equal to 3 vertex data in embedded
+		 * in cmd stream */
+		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		track->immd_dwords = pkt->count;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_VBUF:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_VBUF_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_INDX:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_INDX_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_CLEAR_HIZ:
+	case PACKET3_3D_CLEAR_ZMASK:
+		if (p->rdev->hyperz_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_3D_CLEAR_CMASK:
+		if (p->rdev->cmask_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r300_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r100_cs_track *track;
+	int r;
+
+	track = kzalloc(sizeof(*track), GFP_KERNEL);
+	if (track == NULL)
+		return -ENOMEM;
+	r100_cs_track_clear(p->rdev, track);
+	p->track = track;
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = r100_cs_parse_packet0(p, &pkt,
+						  p->rdev->config.r300.reg_safe_bm,
+						  p->rdev->config.r300.reg_safe_bm_size,
+						  &r300_packet0_check);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = r300_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			return -EINVAL;
+		}
+		if (r) {
+			return r;
+		}
+	} while (p->idx < p->chunk_ib->length_dw);
+	return 0;
+}
+
+void r300_set_reg_safe(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
+}
+
+void r300_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+	int r;
+
+	r = r100_debugfs_mc_info_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+	}
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(R_00014C_MC_AGP_LOCATION,
+			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32(R_00015C_AGP_BASE_2,
+			upper_32_bits(rdev->mc.agp_base) & 0xff);
+	} else {
+		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+		WREG32(R_000170_AGP_BASE, 0);
+		WREG32(R_00015C_AGP_BASE_2, 0);
+	}
+	/* Wait for mc idle */
+	if (r300_mc_wait_for_idle(rdev))
+		DRM_INFO("Failed to wait MC idle before programming MC.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	r100_mc_resume(rdev, &save);
+}
+
+void r300_clock_startup(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_legacy_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
+		tmp |= S_00000D_FORCE_VAP(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r300_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r300_mc_program(rdev);
+	/* Resume clock */
+	r300_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	r300_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350 ||
+	    rdev->family == CHIP_RV350)
+		r100_enable_bm(rdev);
+
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r300_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r300_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r300_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r300_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r300_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int r300_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Set asic errata */
+	r300_errata(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r300_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r300_set_reg_safe(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = r300_startup(rdev);
+	if (r) {
+		/* Something went wrong with the accel init, so stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCIE)
+			rv370_pcie_gart_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
new file mode 100644
index 0000000..00c0d2b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -0,0 +1,1789 @@
+/*
+ * Copyright 2005 Nicolai Haehnle et al.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Nicolai Haehnle
+ *          Jerome Glisse
+ */
+#ifndef _R300_REG_H_
+#define _R300_REG_H_
+
+#define R300_SURF_TILE_MACRO (1<<16)
+#define R300_SURF_TILE_MICRO (2<<16)
+#define R300_SURF_TILE_BOTH (3<<16)
+
+
+#define R300_MC_INIT_MISC_LAT_TIMER	0x180
+#	define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT	28
+
+#define R300_MC_INIT_GFX_LAT_TIMER	0x154
+#	define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT	28
+
+/*
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
+
+#define R300_SE_VPORT_XSCALE                0x1D98
+#define R300_SE_VPORT_XOFFSET               0x1D9C
+#define R300_SE_VPORT_YSCALE                0x1DA0
+#define R300_SE_VPORT_YOFFSET               0x1DA4
+#define R300_SE_VPORT_ZSCALE                0x1DA8
+#define R300_SE_VPORT_ZOFFSET               0x1DAC
+
+
+/*
+ * Vertex Array Processing (VAP) Control
+ * Stolen from r200 code from Christoph Brill (It's a guess!)
+ */
+#define R300_VAP_CNTL	0x2080
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
+#define R300_VAP_VF_CNTL	0x2084
+#	define	R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT              0
+#	define  R300_VAP_VF_CNTL__PRIM_NONE                     (0<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POINTS                   (1<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINES                    (2<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP               (3<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLES                (4<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN             (5<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP           (6<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP                (12<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUADS                    (13<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP               (14<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POLYGON                  (15<<0)
+
+#	define	R300_VAP_VF_CNTL__PRIM_WALK__SHIFT              4
+	/* State based - direct writes to registers trigger vertex
+           generation */
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED         (0<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_INDICES             (1<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST         (2<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED     (3<<4)
+
+	/* I don't think I saw these three used.. */
+#	define	R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT            6
+#	define	R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT     9
+#	define	R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT        10
+
+	/* index size - when not set the indices are assumed to be 16 bit */
+#	define	R300_VAP_VF_CNTL__INDEX_SIZE_32bit              (1<<11)
+	/* number of vertices */
+#	define	R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT           16
+
+/* BEGIN: Wild guesses */
+#define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
+#       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+
+#define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+/* END: Wild guesses */
+
+#define R300_SE_VTE_CNTL                  0x20b0
+#	define     R300_VPORT_X_SCALE_ENA                0x00000001
+#	define     R300_VPORT_X_OFFSET_ENA               0x00000002
+#	define     R300_VPORT_Y_SCALE_ENA                0x00000004
+#	define     R300_VPORT_Y_OFFSET_ENA               0x00000008
+#	define     R300_VPORT_Z_SCALE_ENA                0x00000010
+#	define     R300_VPORT_Z_OFFSET_ENA               0x00000020
+#	define     R300_VTX_XY_FMT                       0x00000100
+#	define     R300_VTX_Z_FMT                        0x00000200
+#	define     R300_VTX_W0_FMT                       0x00000400
+#	define     R300_VTX_W0_NORMALIZE                 0x00000800
+#	define     R300_VTX_ST_DENORMALIZED              0x00001000
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+
+/* gap */
+
+#define R300_VAP_CNTL_STATUS              0x2140
+#	define R300_VC_NO_SWAP                  (0 << 0)
+#	define R300_VC_16BIT_SWAP               (1 << 0)
+#	define R300_VC_32BIT_SWAP               (2 << 0)
+#	define R300_VAP_TCL_BYPASS		(1 << 8)
+
+/* gap */
+
+/* Where do we get our vertex data?
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
+
+#define R300_VAP_INPUT_ROUTE_0_0            0x2150
+#       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
+#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
+#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_0_1            0x2154
+#define R300_VAP_INPUT_ROUTE_0_2            0x2158
+#define R300_VAP_INPUT_ROUTE_0_3            0x215C
+#define R300_VAP_INPUT_ROUTE_0_4            0x2160
+#define R300_VAP_INPUT_ROUTE_0_5            0x2164
+#define R300_VAP_INPUT_ROUTE_0_6            0x2168
+#define R300_VAP_INPUT_ROUTE_0_7            0x216C
+
+/* gap */
+
+/* Notes:
+ *  - always set up to produce at least two attributes:
+ *    if vertex program uses only position, fglrx will set normal, too
+ *  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
+#define R300_VAP_INPUT_CNTL_0               0x2180
+#       define R300_INPUT_CNTL_0_COLOR           0x00000001
+#define R300_VAP_INPUT_CNTL_1               0x2184
+#       define R300_INPUT_CNTL_POS               0x00000001
+#       define R300_INPUT_CNTL_NORMAL            0x00000002
+#       define R300_INPUT_CNTL_COLOR             0x00000004
+#       define R300_INPUT_CNTL_TC0               0x00000400
+#       define R300_INPUT_CNTL_TC1               0x00000800
+#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
+#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
+#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
+#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
+#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
+#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
+
+/* gap */
+
+/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
+#define R300_VAP_INPUT_ROUTE_1_0            0x21E0
+#       define R300_INPUT_ROUTE_SELECT_X    0
+#       define R300_INPUT_ROUTE_SELECT_Y    1
+#       define R300_INPUT_ROUTE_SELECT_Z    2
+#       define R300_INPUT_ROUTE_SELECT_W    3
+#       define R300_INPUT_ROUTE_SELECT_ZERO 4
+#       define R300_INPUT_ROUTE_SELECT_ONE  5
+#       define R300_INPUT_ROUTE_SELECT_MASK 7
+#       define R300_INPUT_ROUTE_X_SHIFT     0
+#       define R300_INPUT_ROUTE_Y_SHIFT     3
+#       define R300_INPUT_ROUTE_Z_SHIFT     6
+#       define R300_INPUT_ROUTE_W_SHIFT     9
+#       define R300_INPUT_ROUTE_ENABLE      (15 << 12)
+#define R300_VAP_INPUT_ROUTE_1_1            0x21E4
+#define R300_VAP_INPUT_ROUTE_1_2            0x21E8
+#define R300_VAP_INPUT_ROUTE_1_3            0x21EC
+#define R300_VAP_INPUT_ROUTE_1_4            0x21F0
+#define R300_VAP_INPUT_ROUTE_1_5            0x21F4
+#define R300_VAP_INPUT_ROUTE_1_6            0x21F8
+#define R300_VAP_INPUT_ROUTE_1_7            0x21FC
+
+/* END: Vertex data assembly */
+
+/* gap */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
+#define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
+#       define R300_PVS_UPLOAD_PROGRAM           0x00000000
+#       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
+#       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
+
+/* gap */
+
+#define R300_VAP_PVS_UPLOAD_DATA            0x2208
+
+/* END: Upload vertex program and data */
+
+/* gap */
+
+/* I do not know the purpose of this register. However, I do know that
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
+#define R300_VAP_UNKNOWN_221C               0x221C
+#       define R300_221C_NORMAL                  0x00000000
+#       define R300_221C_CLEAR                   0x0001C000
+
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_CLIP_X_0                   0x2220
+#define R300_VAP_CLIP_X_1                   0x2224
+#define R300_VAP_CLIP_Y_0                   0x2228
+#define R300_VAP_CLIP_Y_1                   0x2230
+
+/* gap */
+
+/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_STATE_FLUSH_REG        0x2284
+
+/* Absolutely no clue what this register is about. */
+#define R300_VAP_UNKNOWN_2288               0x2288
+#       define R300_2288_R300                    0x00750000 /* -- nh */
+#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
+
+/* gap */
+
+/* Addresses are relative to the vertex program instruction area of the
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
+#define R300_VAP_PVS_CNTL_1                 0x22D0
+#       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
+#       define R300_PVS_CNTL_1_POS_END_SHIFT         10
+#       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CNTL_2                 0x22D4
+#       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
+#       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
+#define R300_VAP_PVS_CNTL_3	           0x22D8
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
+
+/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+ * immediate vertices
+ */
+#define R300_VAP_VTX_COLOR_R                0x2464
+#define R300_VAP_VTX_COLOR_G                0x2468
+#define R300_VAP_VTX_COLOR_B                0x246C
+#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_Y_1              0x2494
+#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_POS_0_Y_2              0x24A4
+#define R300_VAP_VTX_POS_0_Z_2              0x24A8
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT             0x24AC
+
+/* gap */
+
+/* These are values from r300_reg/r300_reg.h - they are known to be correct
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
+#define R300_GB_VAP_RASTER_VTX_FMT_0	0x4000
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT	(1<<0)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT	(1<<1)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT	(1<<2)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT	(1<<3)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT	(1<<4)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE	(0xf<<5)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT	(0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1	0x4004
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT	0
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT	3
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT	6
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT	9
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT	12
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT	15
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT	18
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT	21
+
+/* UNK30 seems to enables point to quad transformation on textures
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
+#define R300_GB_ENABLE	0x4008
+#	define R300_GB_POINT_STUFF_ENABLE	(1<<0)
+#	define R300_GB_LINE_STUFF_ENABLE	(1<<1)
+#	define R300_GB_TRIANGLE_STUFF_ENABLE	(1<<2)
+#	define R300_GB_STENCIL_AUTO_ENABLE	(1<<4)
+#	define R300_GB_UNK31			(1<<31)
+	/* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE	0
+#define R300_GB_TEX_ST		1
+#define R300_GB_TEX_STR		2
+#	define R300_GB_TEX0_SOURCE_SHIFT	16
+#	define R300_GB_TEX1_SOURCE_SHIFT	18
+#	define R300_GB_TEX2_SOURCE_SHIFT	20
+#	define R300_GB_TEX3_SOURCE_SHIFT	22
+#	define R300_GB_TEX4_SOURCE_SHIFT	24
+#	define R300_GB_TEX5_SOURCE_SHIFT	26
+#	define R300_GB_TEX6_SOURCE_SHIFT	28
+#	define R300_GB_TEX7_SOURCE_SHIFT	30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0	0x4010
+	/* shifts - each of the fields is 4 bits */
+#	define R300_GB_MSPOS0__MS_X0_SHIFT	0
+#	define R300_GB_MSPOS0__MS_Y0_SHIFT	4
+#	define R300_GB_MSPOS0__MS_X1_SHIFT	8
+#	define R300_GB_MSPOS0__MS_Y1_SHIFT	12
+#	define R300_GB_MSPOS0__MS_X2_SHIFT	16
+#	define R300_GB_MSPOS0__MS_Y2_SHIFT	20
+#	define R300_GB_MSPOS0__MSBD0_Y		24
+#	define R300_GB_MSPOS0__MSBD0_X		28
+
+#define R300_GB_MSPOS1	0x4014
+#	define R300_GB_MSPOS1__MS_X3_SHIFT	0
+#	define R300_GB_MSPOS1__MS_Y3_SHIFT	4
+#	define R300_GB_MSPOS1__MS_X4_SHIFT	8
+#	define R300_GB_MSPOS1__MS_Y4_SHIFT	12
+#	define R300_GB_MSPOS1__MS_X5_SHIFT	16
+#	define R300_GB_MSPOS1__MS_Y5_SHIFT	20
+#	define R300_GB_MSPOS1__MSBD1		24
+
+
+#define R300_GB_TILE_CONFIG	0x4018
+#	define R300_GB_TILE_ENABLE	(1<<0)
+#	define R300_GB_TILE_PIPE_COUNT_RV300	0
+#	define R300_GB_TILE_PIPE_COUNT_R300	(3<<1)
+#	define R300_GB_TILE_PIPE_COUNT_R420	(7<<1)
+#	define R300_GB_TILE_PIPE_COUNT_RV410	(3<<1)
+#	define R300_GB_TILE_SIZE_8		0
+#	define R300_GB_TILE_SIZE_16		(1<<4)
+#	define R300_GB_TILE_SIZE_32		(2<<4)
+#	define R300_GB_SUPER_SIZE_1		(0<<6)
+#	define R300_GB_SUPER_SIZE_2		(1<<6)
+#	define R300_GB_SUPER_SIZE_4		(2<<6)
+#	define R300_GB_SUPER_SIZE_8		(3<<6)
+#	define R300_GB_SUPER_SIZE_16		(4<<6)
+#	define R300_GB_SUPER_SIZE_32		(5<<6)
+#	define R300_GB_SUPER_SIZE_64		(6<<6)
+#	define R300_GB_SUPER_SIZE_128		(7<<6)
+#	define R300_GB_SUPER_X_SHIFT		9	/* 3 bits wide */
+#	define R300_GB_SUPER_Y_SHIFT		12	/* 3 bits wide */
+#	define R300_GB_SUPER_TILE_A		0
+#	define R300_GB_SUPER_TILE_B		(1<<15)
+#	define R300_GB_SUBPIXEL_1_12		0
+#	define R300_GB_SUBPIXEL_1_16		(1<<16)
+
+#define R300_GB_FIFO_SIZE	0x4024
+	/* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32	0
+#define R300_GB_FIFO_SIZE_64	1
+#define R300_GB_FIFO_SIZE_128	2
+#define R300_GB_FIFO_SIZE_256	3
+#	define R300_SC_IFIFO_SIZE_SHIFT	0
+#	define R300_SC_TZFIFO_SIZE_SHIFT	2
+#	define R300_SC_BFIFO_SIZE_SHIFT	4
+
+#	define R300_US_OFIFO_SIZE_SHIFT	12
+#	define R300_US_WFIFO_SIZE_SHIFT	14
+	/* the following use the same constants as above, but meaning is
+	   is times 2 (i.e. instead of 32 words it means 64 */
+#	define R300_RS_TFIFO_SIZE_SHIFT	6
+#	define R300_RS_CFIFO_SIZE_SHIFT	8
+#	define R300_US_RAM_SIZE_SHIFT		10
+	/* watermarks, 3 bits wide */
+#	define R300_RS_HIGHWATER_COL_SHIFT	16
+#	define R300_RS_HIGHWATER_TEX_SHIFT	19
+#	define R300_OFIFO_HIGHWATER_SHIFT	22	/* two bits only */
+#	define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT	24
+
+#define R300_GB_SELECT	0x401C
+#	define R300_GB_FOG_SELECT_C0A		0
+#	define R300_GB_FOG_SELECT_C1A		1
+#	define R300_GB_FOG_SELECT_C2A		2
+#	define R300_GB_FOG_SELECT_C3A		3
+#	define R300_GB_FOG_SELECT_1_1_W	4
+#	define R300_GB_FOG_SELECT_Z		5
+#	define R300_GB_DEPTH_SELECT_Z		0
+#	define R300_GB_DEPTH_SELECT_1_1_W	(1<<3)
+#	define R300_GB_W_SELECT_1_W		0
+#	define R300_GB_W_SELECT_1		(1<<4)
+
+#define R300_GB_AA_CONFIG		0x4020
+#	define R300_AA_DISABLE			0x00
+#	define R300_AA_ENABLE			0x01
+#	define R300_AA_SUBSAMPLES_2		0
+#	define R300_AA_SUBSAMPLES_3		(1<<1)
+#	define R300_AA_SUBSAMPLES_4		(2<<1)
+#	define R300_AA_SUBSAMPLES_6		(3<<1)
+
+/* gap */
+
+/* Zero to flush caches. */
+#define R300_TX_INVALTAGS                   0x4100
+#define R300_TX_FLUSH                       0x0
+
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE                      0x4104
+#       define R300_TX_ENABLE_0                  (1 << 0)
+#       define R300_TX_ENABLE_1                  (1 << 1)
+#       define R300_TX_ENABLE_2                  (1 << 2)
+#       define R300_TX_ENABLE_3                  (1 << 3)
+#       define R300_TX_ENABLE_4                  (1 << 4)
+#       define R300_TX_ENABLE_5                  (1 << 5)
+#       define R300_TX_ENABLE_6                  (1 << 6)
+#       define R300_TX_ENABLE_7                  (1 << 7)
+#       define R300_TX_ENABLE_8                  (1 << 8)
+#       define R300_TX_ENABLE_9                  (1 << 9)
+#       define R300_TX_ENABLE_10                 (1 << 10)
+#       define R300_TX_ENABLE_11                 (1 << 11)
+#       define R300_TX_ENABLE_12                 (1 << 12)
+#       define R300_TX_ENABLE_13                 (1 << 13)
+#       define R300_TX_ENABLE_14                 (1 << 14)
+#       define R300_TX_ENABLE_15                 (1 << 15)
+
+/* The pointsize is given in multiples of 6. The pointsize can be
+ * enormous: Clear() renders a single point that fills the entire
+ * framebuffer.
+ */
+#define R300_RE_POINTSIZE                   0x421C
+#       define R300_POINTSIZE_Y_SHIFT            0
+#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_POINTSIZE_X_SHIFT            16
+#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
+#       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
+
+/* The line width is given in multiples of 6.
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
+#define R300_RE_LINE_CNT                      0x4234
+#       define R300_LINESIZE_SHIFT            0
+#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
+#       define R300_LINE_CNT_HO               (1 << 16)
+#       define R300_LINE_CNT_VE               (1 << 17)
+
+/* Some sort of scale or clamp value for texcoordless textures. */
+#define R300_RE_UNK4238                       0x4238
+
+/* Something shade related */
+#define R300_RE_SHADE                         0x4274
+
+#define R300_RE_SHADE_MODEL                   0x4278
+#	define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
+#	define R300_RE_SHADE_MODEL_FLAT       0x39595
+
+/* Dangerous */
+#define R300_RE_POLYGON_MODE                  0x4288
+#	define R300_PM_ENABLED                (1 << 0)
+#	define R300_PM_FRONT_POINT            (0 << 0)
+#	define R300_PM_BACK_POINT             (0 << 0)
+#	define R300_PM_FRONT_LINE             (1 << 4)
+#	define R300_PM_FRONT_FILL             (1 << 5)
+#	define R300_PM_BACK_LINE              (1 << 7)
+#	define R300_PM_BACK_FILL              (1 << 8)
+
+/* Fog parameters */
+#define R300_RE_FOG_SCALE                     0x4294
+#define R300_RE_FOG_START                     0x4298
+
+/* Not sure why there are duplicate of factor and constant values.
+ * My best guess so far is that there are separate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_RE_ZBIAS_CNTL                    0x42A0 /* GUESS */
+#define R300_RE_ZBIAS_T_FACTOR                0x42A4
+#define R300_RE_ZBIAS_T_CONSTANT              0x42A8
+#define R300_RE_ZBIAS_W_FACTOR                0x42AC
+#define R300_RE_ZBIAS_W_CONSTANT              0x42B0
+
+/* This register needs to be set to (1<<1) for RV350 to correctly
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ *  One to enable depth test and one for depth write.
+ * Yet this doesn't explain why depth writes work ...
+ */
+#define R300_RE_OCCLUSION_CNTL		    0x42B4
+#	define R300_OCCLUSION_ON		(1<<1)
+
+#define R300_RE_CULL_CNTL                   0x42B8
+#       define R300_CULL_FRONT                   (1 << 0)
+#       define R300_CULL_BACK                    (1 << 1)
+#       define R300_FRONT_FACE_CCW               (0 << 2)
+#       define R300_FRONT_FACE_CW                (1 << 2)
+
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/* 0_UNKNOWN_18 has always been set except for clear operations.
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
+#define R300_RS_CNTL_0                      0x4300
+#       define R300_RS_CNTL_TC_CNT_SHIFT         2
+#       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
+	/* number of color interpolators used */
+#	define R300_RS_CNTL_CI_CNT_SHIFT         7
+#       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
+	/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
+	   register. */
+#define R300_RS_CNTL_1                      0x4304
+
+/* gap */
+
+/* Only used for texture coordinates.
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
+#define R300_RS_INTERP_0                    0x4310
+#define R300_RS_INTERP_1                    0x4314
+#       define R300_RS_INTERP_1_UNKNOWN          0x40
+#define R300_RS_INTERP_2                    0x4318
+#       define R300_RS_INTERP_2_UNKNOWN          0x80
+#define R300_RS_INTERP_3                    0x431C
+#       define R300_RS_INTERP_3_UNKNOWN          0xC0
+#define R300_RS_INTERP_4                    0x4320
+#define R300_RS_INTERP_5                    0x4324
+#define R300_RS_INTERP_6                    0x4328
+#define R300_RS_INTERP_7                    0x432C
+#       define R300_RS_INTERP_SRC_SHIFT          2
+#       define R300_RS_INTERP_SRC_MASK           (7 << 2)
+#       define R300_RS_INTERP_USED               0x00D10000
+
+/* These DWORDs control how vertex data is routed into fragment program
+ * registers, after interpolators.
+ */
+#define R300_RS_ROUTE_0                     0x4330
+#define R300_RS_ROUTE_1                     0x4334
+#define R300_RS_ROUTE_2                     0x4338
+#define R300_RS_ROUTE_3                     0x433C /* GUESS */
+#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
+#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
+#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
+#define R300_RS_ROUTE_7                     0x434C /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_0     0
+#       define R300_RS_ROUTE_SOURCE_INTERP_1     1
+#       define R300_RS_ROUTE_SOURCE_INTERP_2     2
+#       define R300_RS_ROUTE_SOURCE_INTERP_3     3
+#       define R300_RS_ROUTE_SOURCE_INTERP_4     4
+#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
+#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
+#       define R300_RS_ROUTE_DEST_SHIFT          6
+#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
+
+/* Special handling for color: When the fragment program uses color,
+ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+ * color register index.
+ *
+ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
+ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
+ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
+ * correct or not. - Oliver.
+ */
+#       define R300_RS_ROUTE_0_COLOR             (1 << 14)
+#       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
+#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
+/* As above, but for secondary color */
+#		define R300_RS_ROUTE_1_COLOR1            (1 << 14)
+#		define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
+#		define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
+#		define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
+/* END: Rasterization / Interpolators - many guesses */
+
+/* Hierarchical Z Enable */
+#define R300_SC_HYPERZ                   0x43a4
+#	define R300_SC_HYPERZ_DISABLE     (0 << 0)
+#	define R300_SC_HYPERZ_ENABLE      (1 << 0)
+#	define R300_SC_HYPERZ_MIN         (0 << 1)
+#	define R300_SC_HYPERZ_MAX         (1 << 1)
+#	define R300_SC_HYPERZ_ADJ_256     (0 << 2)
+#	define R300_SC_HYPERZ_ADJ_128     (1 << 2)
+#	define R300_SC_HYPERZ_ADJ_64      (2 << 2)
+#	define R300_SC_HYPERZ_ADJ_32      (3 << 2)
+#	define R300_SC_HYPERZ_ADJ_16      (4 << 2)
+#	define R300_SC_HYPERZ_ADJ_8       (5 << 2)
+#	define R300_SC_HYPERZ_ADJ_4       (6 << 2)
+#	define R300_SC_HYPERZ_ADJ_2       (7 << 2)
+#	define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MIN    (1 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
+#	define R300_SC_HYPERZ_HZ_Z0MAX    (1 << 6)
+
+#define R300_SC_EDGERULE                 0x43a8
+
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
+#define R300_RE_CLIPRECT_TL_0               0x43B0
+#define R300_RE_CLIPRECT_BR_0               0x43B4
+#define R300_RE_CLIPRECT_TL_1               0x43B8
+#define R300_RE_CLIPRECT_BR_1               0x43BC
+#define R300_RE_CLIPRECT_TL_2               0x43C0
+#define R300_RE_CLIPRECT_BR_2               0x43C4
+#define R300_RE_CLIPRECT_TL_3               0x43C8
+#define R300_RE_CLIPRECT_BR_3               0x43CC
+#       define R300_CLIPRECT_OFFSET              1440
+#       define R300_CLIPRECT_MASK                0x1FFF
+#       define R300_CLIPRECT_X_SHIFT             0
+#       define R300_CLIPRECT_X_MASK              (0x1FFF << 0)
+#       define R300_CLIPRECT_Y_SHIFT             13
+#       define R300_CLIPRECT_Y_MASK              (0x1FFF << 13)
+#define R300_RE_CLIPRECT_CNTL               0x43D0
+#       define R300_CLIP_OUT                     (1 << 0)
+#       define R300_CLIP_0                       (1 << 1)
+#       define R300_CLIP_1                       (1 << 2)
+#       define R300_CLIP_10                      (1 << 3)
+#       define R300_CLIP_2                       (1 << 4)
+#       define R300_CLIP_20                      (1 << 5)
+#       define R300_CLIP_21                      (1 << 6)
+#       define R300_CLIP_210                     (1 << 7)
+#       define R300_CLIP_3                       (1 << 8)
+#       define R300_CLIP_30                      (1 << 9)
+#       define R300_CLIP_31                      (1 << 10)
+#       define R300_CLIP_310                     (1 << 11)
+#       define R300_CLIP_32                      (1 << 12)
+#       define R300_CLIP_320                     (1 << 13)
+#       define R300_CLIP_321                     (1 << 14)
+#       define R300_CLIP_3210                    (1 << 15)
+
+/* gap */
+
+#define R300_RE_SCISSORS_TL                 0x43E0
+#define R300_RE_SCISSORS_BR                 0x43E4
+#       define R300_SCISSORS_OFFSET              1440
+#       define R300_SCISSORS_X_SHIFT             0
+#       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
+#       define R300_SCISSORS_Y_SHIFT             13
+#       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
+/* END: Scissors and cliprects */
+
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
+#define R300_TX_FILTER_0                    0x4400
+#       define R300_TX_REPEAT                    0
+#       define R300_TX_MIRRORED                  1
+#       define R300_TX_CLAMP                     4
+#       define R300_TX_CLAMP_TO_EDGE             2
+#       define R300_TX_CLAMP_TO_BORDER           6
+#       define R300_TX_WRAP_S_SHIFT              0
+#       define R300_TX_WRAP_S_MASK               (7 << 0)
+#       define R300_TX_WRAP_T_SHIFT              3
+#       define R300_TX_WRAP_T_MASK               (7 << 3)
+#       define R300_TX_WRAP_Q_SHIFT              6
+#       define R300_TX_WRAP_Q_MASK               (7 << 6)
+#       define R300_TX_MAG_FILTER_NEAREST        (1 << 9)
+#       define R300_TX_MAG_FILTER_LINEAR         (2 << 9)
+#       define R300_TX_MAG_FILTER_MASK           (3 << 9)
+#       define R300_TX_MIN_FILTER_NEAREST        (1 << 11)
+#       define R300_TX_MIN_FILTER_LINEAR         (2 << 11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST       (5  <<  11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR        (9  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
+
+/* NOTE: NEAREST doesn't seem to exist.
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST             (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
+#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
+#	define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
+#	define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
+#	define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
+#	define R300_TX_MAX_ANISO_8_TO_1  (6 << 21)
+#	define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
+#	define R300_TX_MAX_ANISO_MASK    (14 << 21)
+
+#define R300_TX_FILTER1_0                      0x4440
+#	define R300_CHROMA_KEY_MODE_DISABLE    0
+#	define R300_CHROMA_KEY_FORCE	       1
+#	define R300_CHROMA_KEY_BLEND           2
+#	define R300_MC_ROUND_NORMAL            (0<<2)
+#	define R300_MC_ROUND_MPEG4             (1<<2)
+#	define R300_LOD_BIAS_MASK	    0x1fff
+#	define R300_EDGE_ANISO_EDGE_DIAG       (0<<13)
+#	define R300_EDGE_ANISO_EDGE_ONLY       (1<<13)
+#	define R300_MC_COORD_TRUNCATE_DISABLE  (0<<14)
+#	define R300_MC_COORD_TRUNCATE_MPEG     (1<<14)
+#	define R300_TX_TRI_PERF_0_8            (0<<15)
+#	define R300_TX_TRI_PERF_1_8            (1<<15)
+#	define R300_TX_TRI_PERF_1_4            (2<<15)
+#	define R300_TX_TRI_PERF_3_8            (3<<15)
+#	define R300_ANISO_THRESHOLD_MASK       (7<<17)
+
+#define R300_TX_SIZE_0                      0x4480
+#       define R300_TX_WIDTHMASK_SHIFT           0
+#       define R300_TX_WIDTHMASK_MASK            (2047 << 0)
+#       define R300_TX_HEIGHTMASK_SHIFT          11
+#       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
+#       define R300_TX_UNK23                     (1 << 23)
+#       define R300_TX_MAX_MIP_LEVEL_SHIFT       26
+#       define R300_TX_MAX_MIP_LEVEL_MASK        (0xf << 26)
+#       define R300_TX_SIZE_PROJECTED            (1<<30)
+#       define R300_TX_SIZE_TXPITCH_EN           (1<<31)
+#define R300_TX_FORMAT_0                    0x44C0
+	/* The interpretation of the format word by Wladimir van der Laan */
+	/* The X, Y, Z and W refer to the layout of the components.
+	   They are given meanings as R, G, B and Alpha by the swizzle
+	   specification */
+#	define R300_TX_FORMAT_X8		    0x0
+#	define R300_TX_FORMAT_X16		    0x1
+#	define R300_TX_FORMAT_Y4X4		    0x2
+#	define R300_TX_FORMAT_Y8X8		    0x3
+#	define R300_TX_FORMAT_Y16X16		    0x4
+#	define R300_TX_FORMAT_Z3Y3X2		    0x5
+#	define R300_TX_FORMAT_Z5Y6X5		    0x6
+#	define R300_TX_FORMAT_Z6Y5X5		    0x7
+#	define R300_TX_FORMAT_Z11Y11X10		    0x8
+#	define R300_TX_FORMAT_Z10Y11X11		    0x9
+#	define R300_TX_FORMAT_W4Z4Y4X4		    0xA
+#	define R300_TX_FORMAT_W1Z5Y5X5		    0xB
+#	define R300_TX_FORMAT_W8Z8Y8X8		    0xC
+#	define R300_TX_FORMAT_W2Z10Y10X10	    0xD
+#	define R300_TX_FORMAT_W16Z16Y16X16	    0xE
+#	define R300_TX_FORMAT_DXT1		    0xF
+#	define R300_TX_FORMAT_DXT3		    0x10
+#	define R300_TX_FORMAT_DXT5		    0x11
+#	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12     /* no swizzle */
+#	define R300_TX_FORMAT_A8R8G8B8		    0x13     /* no swizzle */
+#	define R300_TX_FORMAT_B8G8_B8G8		    0x14     /* no swizzle */
+#	define R300_TX_FORMAT_G8R8_G8B8		    0x15     /* no swizzle */
+	/* 0x16 - some 16 bit green format.. ?? */
+#	define R300_TX_FORMAT_UNK25		   (1 << 25) /* no swizzle */
+#	define R300_TX_FORMAT_CUBIC_MAP		   (1 << 26)
+
+	/* gap */
+	/* Floating point formats */
+	/* Note - hardware supports both 16 and 32 bit floating point */
+#	define R300_TX_FORMAT_FL_I16		    0x18
+#	define R300_TX_FORMAT_FL_I16A16		    0x19
+#	define R300_TX_FORMAT_FL_R16G16B16A16	    0x1A
+#	define R300_TX_FORMAT_FL_I32		    0x1B
+#	define R300_TX_FORMAT_FL_I32A32		    0x1C
+#	define R300_TX_FORMAT_FL_R32G32B32A32	    0x1D
+#	define R300_TX_FORMAT_ATI2N		    0x1F
+	/* alpha modes, convenience mostly */
+	/* if you have alpha, pick constant appropriate to the
+	   number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+#	define R300_TX_FORMAT_ALPHA_1CH		    0x000
+#	define R300_TX_FORMAT_ALPHA_2CH		    0x200
+#	define R300_TX_FORMAT_ALPHA_4CH		    0x600
+#	define R300_TX_FORMAT_ALPHA_NONE	    0xA00
+	/* Swizzling */
+	/* constants */
+#	define R300_TX_FORMAT_X		0
+#	define R300_TX_FORMAT_Y		1
+#	define R300_TX_FORMAT_Z		2
+#	define R300_TX_FORMAT_W		3
+#	define R300_TX_FORMAT_ZERO	4
+#	define R300_TX_FORMAT_ONE	5
+	/* 2.0*Z, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_Z	6
+	/* 2.0*W, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_W	7
+
+#	define R300_TX_FORMAT_B_SHIFT	18
+#	define R300_TX_FORMAT_G_SHIFT	15
+#	define R300_TX_FORMAT_R_SHIFT	12
+#	define R300_TX_FORMAT_A_SHIFT	9
+	/* Convenience macro to take care of layout and swizzling */
+#	define R300_EASY_TX_FORMAT(B, G, R, A, FMT)	(		\
+		((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT)		\
+		| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT)	\
+		| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT)	\
+		| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT)	\
+		| (R300_TX_FORMAT_##FMT)				\
+		)
+	/* These can be ORed with result of R300_EASY_TX_FORMAT()
+	   We don't really know what they do. Take values from a
+           constant color ? */
+#	define R300_TX_FORMAT_CONST_X		(1<<5)
+#	define R300_TX_FORMAT_CONST_Y		(2<<5)
+#	define R300_TX_FORMAT_CONST_Z		(4<<5)
+#	define R300_TX_FORMAT_CONST_W		(8<<5)
+
+#	define R300_TX_FORMAT_YUV_MODE		0x00800000
+
+#define R300_TX_PITCH_0			    0x4500 /* obvious missing in gap */
+#define R300_TX_OFFSET_0                    0x4540
+	/* BEGIN: Guess from R200 */
+#       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
+#       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
+#       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
+#       define R300_TXO_ENDIAN_HALFDW_SWAP       (3 << 0)
+#       define R300_TXO_MACRO_TILE               (1 << 2)
+#       define R300_TXO_MICRO_TILE               (1 << 3)
+#       define R300_TXO_MICRO_TILE_SQUARE        (2 << 3)
+#       define R300_TXO_OFFSET_MASK              0xffffffe0
+#       define R300_TXO_OFFSET_SHIFT             5
+	/* END: Guess from R200 */
+
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0                      0x4580
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+#define R300_TX_BORDER_COLOR_0              0x45C0
+
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
+#define R300_PFS_CNTL_0                     0x4600
+#       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
+#       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
+#       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
+#define R300_PFS_CNTL_1                     0x4604
+/* There is an unshifted value here which has so far always been equal to the
+ * index of the highest used temporary register.
+ */
+#define R300_PFS_CNTL_2                     0x4608
+#       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_CNTL_ALU_END_SHIFT       6
+#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
+#       define R300_PFS_CNTL_TEX_END_SHIFT       18
+#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
+
+/* gap */
+
+/* Nodes are stored backwards. The last active node is always stored in
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
+#define R300_PFS_NODE_0                     0x4610
+#define R300_PFS_NODE_1                     0x4614
+#define R300_PFS_NODE_2                     0x4618
+#define R300_PFS_NODE_3                     0x461C
+#       define R300_PFS_NODE_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_NODE_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_NODE_ALU_END_SHIFT       6
+#       define R300_PFS_NODE_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_NODE_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
+#       define R300_PFS_NODE_TEX_END_SHIFT       17
+#       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
+#		define R300_PFS_NODE_OUTPUT_COLOR        (1 << 22)
+#		define R300_PFS_NODE_OUTPUT_DEPTH        (1 << 23)
+
+/* TEX
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
+#define R300_PFS_TEXI_0                     0x4620
+#	define R300_FPITX_SRC_SHIFT              0
+#	define R300_FPITX_SRC_MASK               (31 << 0)
+	/* GUESS */
+#	define R300_FPITX_SRC_CONST              (1 << 5)
+#	define R300_FPITX_DST_SHIFT              6
+#	define R300_FPITX_DST_MASK               (31 << 6)
+#	define R300_FPITX_IMAGE_SHIFT            11
+	/* GUESS based on layout and native limits */
+#       define R300_FPITX_IMAGE_MASK             (15 << 11)
+/* Unsure if these are opcodes, or some kind of bitfield, but this is how
+ * they were set when I checked
+ */
+#	define R300_FPITX_OPCODE_SHIFT		15
+#		define R300_FPITX_OP_TEX	1
+#		define R300_FPITX_OP_KIL	2
+#		define R300_FPITX_OP_TXP	3
+#		define R300_FPITX_OP_TXB	4
+#	define R300_FPITX_OPCODE_MASK           (7 << 15)
+
+/* ALU
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ *  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ *  - DP4: Use OUTC_DP4, OUTA_DP4
+ *  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ *  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ *  - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ *  - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ *  - FLR: use FRC+MAD
+ *  - XPD: use MAD+MAD
+ *  - SGE, SLT: use MAD+CMP
+ *  - RSQ: use ABS modifier for argument
+ *  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ *    (e.g. RCP) into color register
+ *  - apparently, there's no quick DST operation
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ *  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ *  - Argument order is the same as in ARB_fragment_program.
+ *  - Operation is MAD
+ *  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ *  - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
+#define R300_PFS_INSTR1_0                   0x46C0
+#       define R300_FPI1_SRC0C_SHIFT             0
+#       define R300_FPI1_SRC0C_MASK              (31 << 0)
+#       define R300_FPI1_SRC0C_CONST             (1 << 5)
+#       define R300_FPI1_SRC1C_SHIFT             6
+#       define R300_FPI1_SRC1C_MASK              (31 << 6)
+#       define R300_FPI1_SRC1C_CONST             (1 << 11)
+#       define R300_FPI1_SRC2C_SHIFT             12
+#       define R300_FPI1_SRC2C_MASK              (31 << 12)
+#       define R300_FPI1_SRC2C_CONST             (1 << 17)
+#       define R300_FPI1_SRC_MASK                0x0003ffff
+#       define R300_FPI1_DSTC_SHIFT              18
+#       define R300_FPI1_DSTC_MASK               (31 << 18)
+#		define R300_FPI1_DSTC_REG_MASK_SHIFT     23
+#       define R300_FPI1_DSTC_REG_X              (1 << 23)
+#       define R300_FPI1_DSTC_REG_Y              (1 << 24)
+#       define R300_FPI1_DSTC_REG_Z              (1 << 25)
+#		define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT  26
+#       define R300_FPI1_DSTC_OUTPUT_X           (1 << 26)
+#       define R300_FPI1_DSTC_OUTPUT_Y           (1 << 27)
+#       define R300_FPI1_DSTC_OUTPUT_Z           (1 << 28)
+
+#define R300_PFS_INSTR3_0                   0x47C0
+#       define R300_FPI3_SRC0A_SHIFT             0
+#       define R300_FPI3_SRC0A_MASK              (31 << 0)
+#       define R300_FPI3_SRC0A_CONST             (1 << 5)
+#       define R300_FPI3_SRC1A_SHIFT             6
+#       define R300_FPI3_SRC1A_MASK              (31 << 6)
+#       define R300_FPI3_SRC1A_CONST             (1 << 11)
+#       define R300_FPI3_SRC2A_SHIFT             12
+#       define R300_FPI3_SRC2A_MASK              (31 << 12)
+#       define R300_FPI3_SRC2A_CONST             (1 << 17)
+#       define R300_FPI3_SRC_MASK                0x0003ffff
+#       define R300_FPI3_DSTA_SHIFT              18
+#       define R300_FPI3_DSTA_MASK               (31 << 18)
+#       define R300_FPI3_DSTA_REG                (1 << 23)
+#       define R300_FPI3_DSTA_OUTPUT             (1 << 24)
+#		define R300_FPI3_DSTA_DEPTH              (1 << 27)
+
+#define R300_PFS_INSTR0_0                   0x48C0
+#       define R300_FPI0_ARGC_SRC0C_XYZ          0
+#       define R300_FPI0_ARGC_SRC0C_XXX          1
+#       define R300_FPI0_ARGC_SRC0C_YYY          2
+#       define R300_FPI0_ARGC_SRC0C_ZZZ          3
+#       define R300_FPI0_ARGC_SRC1C_XYZ          4
+#       define R300_FPI0_ARGC_SRC1C_XXX          5
+#       define R300_FPI0_ARGC_SRC1C_YYY          6
+#       define R300_FPI0_ARGC_SRC1C_ZZZ          7
+#       define R300_FPI0_ARGC_SRC2C_XYZ          8
+#       define R300_FPI0_ARGC_SRC2C_XXX          9
+#       define R300_FPI0_ARGC_SRC2C_YYY          10
+#       define R300_FPI0_ARGC_SRC2C_ZZZ          11
+#       define R300_FPI0_ARGC_SRC0A              12
+#       define R300_FPI0_ARGC_SRC1A              13
+#       define R300_FPI0_ARGC_SRC2A              14
+#       define R300_FPI0_ARGC_SRC1C_LRP          15
+#       define R300_FPI0_ARGC_ZERO               20
+#       define R300_FPI0_ARGC_ONE                21
+	/* GUESS */
+#       define R300_FPI0_ARGC_HALF               22
+#       define R300_FPI0_ARGC_SRC0C_YZX          23
+#       define R300_FPI0_ARGC_SRC1C_YZX          24
+#       define R300_FPI0_ARGC_SRC2C_YZX          25
+#       define R300_FPI0_ARGC_SRC0C_ZXY          26
+#       define R300_FPI0_ARGC_SRC1C_ZXY          27
+#       define R300_FPI0_ARGC_SRC2C_ZXY          28
+#       define R300_FPI0_ARGC_SRC0CA_WZY         29
+#       define R300_FPI0_ARGC_SRC1CA_WZY         30
+#       define R300_FPI0_ARGC_SRC2CA_WZY         31
+
+#       define R300_FPI0_ARG0C_SHIFT             0
+#       define R300_FPI0_ARG0C_MASK              (31 << 0)
+#       define R300_FPI0_ARG0C_NEG               (1 << 5)
+#       define R300_FPI0_ARG0C_ABS               (1 << 6)
+#       define R300_FPI0_ARG1C_SHIFT             7
+#       define R300_FPI0_ARG1C_MASK              (31 << 7)
+#       define R300_FPI0_ARG1C_NEG               (1 << 12)
+#       define R300_FPI0_ARG1C_ABS               (1 << 13)
+#       define R300_FPI0_ARG2C_SHIFT             14
+#       define R300_FPI0_ARG2C_MASK              (31 << 14)
+#       define R300_FPI0_ARG2C_NEG               (1 << 19)
+#       define R300_FPI0_ARG2C_ABS               (1 << 20)
+#       define R300_FPI0_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI0_OUTC_MAD                (0 << 23)
+#       define R300_FPI0_OUTC_DP3                (1 << 23)
+#       define R300_FPI0_OUTC_DP4                (2 << 23)
+#       define R300_FPI0_OUTC_MIN                (4 << 23)
+#       define R300_FPI0_OUTC_MAX                (5 << 23)
+#       define R300_FPI0_OUTC_CMPH               (7 << 23)
+#       define R300_FPI0_OUTC_CMP                (8 << 23)
+#       define R300_FPI0_OUTC_FRC                (9 << 23)
+#       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
+#       define R300_FPI0_OUTC_SAT                (1 << 30)
+#       define R300_FPI0_INSERT_NOP              (1 << 31)
+
+#define R300_PFS_INSTR2_0                   0x49C0
+#       define R300_FPI2_ARGA_SRC0C_X            0
+#       define R300_FPI2_ARGA_SRC0C_Y            1
+#       define R300_FPI2_ARGA_SRC0C_Z            2
+#       define R300_FPI2_ARGA_SRC1C_X            3
+#       define R300_FPI2_ARGA_SRC1C_Y            4
+#       define R300_FPI2_ARGA_SRC1C_Z            5
+#       define R300_FPI2_ARGA_SRC2C_X            6
+#       define R300_FPI2_ARGA_SRC2C_Y            7
+#       define R300_FPI2_ARGA_SRC2C_Z            8
+#       define R300_FPI2_ARGA_SRC0A              9
+#       define R300_FPI2_ARGA_SRC1A              10
+#       define R300_FPI2_ARGA_SRC2A              11
+#       define R300_FPI2_ARGA_SRC1A_LRP          15
+#       define R300_FPI2_ARGA_ZERO               16
+#       define R300_FPI2_ARGA_ONE                17
+	/* GUESS */
+#       define R300_FPI2_ARGA_HALF               18
+#       define R300_FPI2_ARG0A_SHIFT             0
+#       define R300_FPI2_ARG0A_MASK              (31 << 0)
+#       define R300_FPI2_ARG0A_NEG               (1 << 5)
+	/* GUESS */
+#	define R300_FPI2_ARG0A_ABS		 (1 << 6)
+#       define R300_FPI2_ARG1A_SHIFT             7
+#       define R300_FPI2_ARG1A_MASK              (31 << 7)
+#       define R300_FPI2_ARG1A_NEG               (1 << 12)
+	/* GUESS */
+#	define R300_FPI2_ARG1A_ABS		 (1 << 13)
+#       define R300_FPI2_ARG2A_SHIFT             14
+#       define R300_FPI2_ARG2A_MASK              (31 << 14)
+#       define R300_FPI2_ARG2A_NEG               (1 << 19)
+	/* GUESS */
+#	define R300_FPI2_ARG2A_ABS		 (1 << 20)
+#       define R300_FPI2_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI2_OUTA_MAD                (0 << 23)
+#       define R300_FPI2_OUTA_DP4                (1 << 23)
+#       define R300_FPI2_OUTA_MIN                (2 << 23)
+#       define R300_FPI2_OUTA_MAX                (3 << 23)
+#       define R300_FPI2_OUTA_CMP                (6 << 23)
+#       define R300_FPI2_OUTA_FRC                (7 << 23)
+#       define R300_FPI2_OUTA_EX2                (8 << 23)
+#       define R300_FPI2_OUTA_LG2                (9 << 23)
+#       define R300_FPI2_OUTA_RCP                (10 << 23)
+#       define R300_FPI2_OUTA_RSQ                (11 << 23)
+#       define R300_FPI2_OUTA_SAT                (1 << 30)
+#       define R300_FPI2_UNKNOWN_31              (1 << 31)
+/* END: Fragment program instruction set */
+
+/* Fog state and color */
+#define R300_RE_FOG_STATE                   0x4BC0
+#       define R300_FOG_ENABLE                   (1 << 0)
+#	define R300_FOG_MODE_LINEAR              (0 << 1)
+#	define R300_FOG_MODE_EXP                 (1 << 1)
+#	define R300_FOG_MODE_EXP2                (2 << 1)
+#	define R300_FOG_MODE_MASK                (3 << 1)
+#define R300_FOG_COLOR_R                    0x4BC8
+#define R300_FOG_COLOR_G                    0x4BCC
+#define R300_FOG_COLOR_B                    0x4BD0
+
+#define R300_PP_ALPHA_TEST                  0x4BD4
+#       define R300_REF_ALPHA_MASK               0x000000ff
+#       define R300_ALPHA_TEST_FAIL              (0 << 8)
+#       define R300_ALPHA_TEST_LESS              (1 << 8)
+#       define R300_ALPHA_TEST_LEQUAL            (3 << 8)
+#       define R300_ALPHA_TEST_EQUAL             (2 << 8)
+#       define R300_ALPHA_TEST_GEQUAL            (6 << 8)
+#       define R300_ALPHA_TEST_GREATER           (4 << 8)
+#       define R300_ALPHA_TEST_NEQUAL            (5 << 8)
+#       define R300_ALPHA_TEST_PASS              (7 << 8)
+#       define R300_ALPHA_TEST_OP_MASK           (7 << 8)
+#       define R300_ALPHA_TEST_ENABLE            (1 << 11)
+
+/* gap */
+
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X                  0x4C00
+#define R300_PFS_PARAM_0_Y                  0x4C04
+#define R300_PFS_PARAM_0_Z                  0x4C08
+#define R300_PFS_PARAM_0_W                  0x4C0C
+/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
+#define R300_PFS_PARAM_31_X                 0x4DF0
+#define R300_PFS_PARAM_31_Y                 0x4DF4
+#define R300_PFS_PARAM_31_Z                 0x4DF8
+#define R300_PFS_PARAM_31_W                 0x4DFC
+
+/* Notes:
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ *   the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ *    are set to the same
+ *   function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
+#define R300_RB3D_CBLEND                    0x4E04
+#define R300_RB3D_ABLEND                    0x4E08
+/* the following only appear in CBLEND */
+#       define R300_BLEND_ENABLE                     (1 << 0)
+#       define R300_BLEND_UNKNOWN                    (3 << 1)
+#       define R300_BLEND_NO_SEPARATE                (1 << 3)
+/* the following are shared between CBLEND and ABLEND */
+#       define R300_FCN_MASK                         (3  << 12)
+#       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define R300_COMB_FCN_MIN                     (4  << 12)
+#       define R300_COMB_FCN_MAX                     (5  << 12)
+#       define R300_COMB_FCN_RSUB_CLAMP              (6  << 12)
+#       define R300_COMB_FCN_RSUB_NOCLAMP            (7  << 12)
+#       define R300_BLEND_GL_ZERO                    (32)
+#       define R300_BLEND_GL_ONE                     (33)
+#       define R300_BLEND_GL_SRC_COLOR               (34)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_COLOR     (35)
+#       define R300_BLEND_GL_DST_COLOR               (36)
+#       define R300_BLEND_GL_ONE_MINUS_DST_COLOR     (37)
+#       define R300_BLEND_GL_SRC_ALPHA               (38)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA     (39)
+#       define R300_BLEND_GL_DST_ALPHA               (40)
+#       define R300_BLEND_GL_ONE_MINUS_DST_ALPHA     (41)
+#       define R300_BLEND_GL_SRC_ALPHA_SATURATE      (42)
+#       define R300_BLEND_GL_CONST_COLOR             (43)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_COLOR   (44)
+#       define R300_BLEND_GL_CONST_ALPHA             (45)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA   (46)
+#       define R300_BLEND_MASK                       (63)
+#       define R300_SRC_BLEND_SHIFT                  (16)
+#       define R300_DST_BLEND_SHIFT                  (24)
+#define R300_RB3D_BLEND_COLOR               0x4E10
+#define R300_RB3D_COLORMASK                 0x4E0C
+#       define R300_COLORMASK0_B                 (1<<0)
+#       define R300_COLORMASK0_G                 (1<<1)
+#       define R300_COLORMASK0_R                 (1<<2)
+#       define R300_COLORMASK0_A                 (1<<3)
+
+/* gap */
+
+#define R300_RB3D_COLOROFFSET0              0x4E28
+#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
+
+/* gap */
+
+/* Bit 16: Larger tiles
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
+#define R300_RB3D_COLORPITCH0               0x4E38
+#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
+#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
+#       define R300_COLOR_FORMAT_RGB565          (2 << 22)
+#       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
+#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
+
+#define R300_RB3D_AARESOLVE_OFFSET          0x4E80
+#define R300_RB3D_AARESOLVE_PITCH           0x4E84
+#define R300_RB3D_AARESOLVE_CTL             0x4E88
+/* gap */
+
+/* Guess by Vladimir.
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
+/*#define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C*/
+#       define R300_RB3D_DSTCACHE_UNKNOWN_02             0x00000002
+#       define R300_RB3D_DSTCACHE_UNKNOWN_0A             0x0000000A
+
+/* gap */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
+ */
+#define R300_ZB_CNTL                             0x4F00
+#	define R300_STENCIL_ENABLE		 (1 << 0)
+#	define R300_Z_ENABLE		         (1 << 1)
+#	define R300_Z_WRITE_ENABLE		 (1 << 2)
+#	define R300_Z_SIGNED_COMPARE		 (1 << 3)
+#	define R300_STENCIL_FRONT_BACK		 (1 << 4)
+
+#define R300_ZB_ZSTENCILCNTL                   0x4f04
+	/* functions */
+#	define R300_ZS_NEVER			0
+#	define R300_ZS_LESS			1
+#	define R300_ZS_LEQUAL			2
+#	define R300_ZS_EQUAL			3
+#	define R300_ZS_GEQUAL			4
+#	define R300_ZS_GREATER			5
+#	define R300_ZS_NOTEQUAL			6
+#	define R300_ZS_ALWAYS			7
+#       define R300_ZS_MASK                     7
+	/* operations */
+#	define R300_ZS_KEEP			0
+#	define R300_ZS_ZERO			1
+#	define R300_ZS_REPLACE			2
+#	define R300_ZS_INCR			3
+#	define R300_ZS_DECR			4
+#	define R300_ZS_INVERT			5
+#	define R300_ZS_INCR_WRAP		6
+#	define R300_ZS_DECR_WRAP		7
+#	define R300_Z_FUNC_SHIFT		0
+	/* front and back refer to operations done for front
+	   and back faces, i.e. separate stencil function support */
+#	define R300_S_FRONT_FUNC_SHIFT	        3
+#	define R300_S_FRONT_SFAIL_OP_SHIFT	6
+#	define R300_S_FRONT_ZPASS_OP_SHIFT	9
+#	define R300_S_FRONT_ZFAIL_OP_SHIFT      12
+#	define R300_S_BACK_FUNC_SHIFT           15
+#	define R300_S_BACK_SFAIL_OP_SHIFT       18
+#	define R300_S_BACK_ZPASS_OP_SHIFT       21
+#	define R300_S_BACK_ZFAIL_OP_SHIFT       24
+
+#define R300_ZB_STENCILREFMASK                        0x4f08
+#	define R300_STENCILREF_SHIFT       0
+#	define R300_STENCILREF_MASK        0x000000ff
+#	define R300_STENCILMASK_SHIFT      8
+#	define R300_STENCILMASK_MASK       0x0000ff00
+#	define R300_STENCILWRITEMASK_SHIFT 16
+#	define R300_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* gap */
+
+#define R300_ZB_FORMAT                             0x4f10
+#	define R300_DEPTHFORMAT_16BIT_INT_Z   (0 << 0)
+#	define R300_DEPTHFORMAT_16BIT_13E3    (1 << 0)
+#	define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL   (2 << 0)
+/* reserved up to (15 << 0) */
+#	define R300_INVERT_13E3_LEADING_ONES  (0 << 4)
+#	define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
+
+#define R300_ZB_ZTOP                             0x4F14
+#	define R300_ZTOP_DISABLE                 (0 << 0)
+#	define R300_ZTOP_ENABLE                  (1 << 0)
+
+/* gap */
+
+#define R300_ZB_ZCACHE_CTLSTAT            0x4f18
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT      (0 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT       (0 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE            (1 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE            (0 << 31)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY            (1 << 31)
+
+#define R300_ZB_BW_CNTL                     0x4f1c
+#	define R300_HIZ_DISABLE                              (0 << 0)
+#	define R300_HIZ_ENABLE                               (1 << 0)
+#	define R300_HIZ_MIN                                  (0 << 1)
+#	define R300_HIZ_MAX                                  (1 << 1)
+#	define R300_FAST_FILL_DISABLE                        (0 << 2)
+#	define R300_FAST_FILL_ENABLE                         (1 << 2)
+#	define R300_RD_COMP_DISABLE                          (0 << 3)
+#	define R300_RD_COMP_ENABLE                           (1 << 3)
+#	define R300_WR_COMP_DISABLE                          (0 << 4)
+#	define R300_WR_COMP_ENABLE                           (1 << 4)
+#	define R300_ZB_CB_CLEAR_RMW                          (0 << 5)
+#	define R300_ZB_CB_CLEAR_CACHE_LINEAR                 (1 << 5)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE   (0 << 6)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE    (1 << 6)
+
+#	define R500_ZEQUAL_OPTIMIZE_ENABLE                   (0 << 7)
+#	define R500_ZEQUAL_OPTIMIZE_DISABLE                  (1 << 7)
+#	define R500_SEQUAL_OPTIMIZE_ENABLE                   (0 << 8)
+#	define R500_SEQUAL_OPTIMIZE_DISABLE                  (1 << 8)
+
+#	define R500_BMASK_ENABLE                             (0 << 10)
+#	define R500_BMASK_DISABLE                            (1 << 10)
+#	define R500_HIZ_EQUAL_REJECT_DISABLE                 (0 << 11)
+#	define R500_HIZ_EQUAL_REJECT_ENABLE                  (1 << 11)
+#	define R500_HIZ_FP_EXP_BITS_DISABLE                  (0 << 12)
+#	define R500_HIZ_FP_EXP_BITS_1                        (1 << 12)
+#	define R500_HIZ_FP_EXP_BITS_2                        (2 << 12)
+#	define R500_HIZ_FP_EXP_BITS_3                        (3 << 12)
+#	define R500_HIZ_FP_EXP_BITS_4                        (4 << 12)
+#	define R500_HIZ_FP_EXP_BITS_5                        (5 << 12)
+#	define R500_HIZ_FP_INVERT_LEADING_ONES               (0 << 15)
+#	define R500_HIZ_FP_INVERT_LEADING_ZEROS              (1 << 15)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE      (0 << 16)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE     (1 << 16)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE           (0 << 17)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE          (1 << 17)
+#	define R500_PEQ_PACKING_DISABLE                      (0 << 18)
+#	define R500_PEQ_PACKING_ENABLE                       (1 << 18)
+#	define R500_COVERED_PTR_MASKING_DISABLE              (0 << 18)
+#	define R500_COVERED_PTR_MASKING_ENABLE               (1 << 18)
+
+
+/* gap */
+
+/* Z Buffer Address Offset.
+ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
+ */
+#define R300_ZB_DEPTHOFFSET               0x4f20
+
+/* Z Buffer Pitch and Endian Control */
+#define R300_ZB_DEPTHPITCH                0x4f24
+#       define R300_DEPTHPITCH_MASK              0x00003FFC
+#       define R300_DEPTHMACROTILE_DISABLE      (0 << 16)
+#       define R300_DEPTHMACROTILE_ENABLE       (1 << 16)
+#       define R300_DEPTHMICROTILE_LINEAR       (0 << 17)
+#       define R300_DEPTHMICROTILE_TILED        (1 << 17)
+#       define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
+#       define R300_DEPTHENDIAN_NO_SWAP         (0 << 18)
+#       define R300_DEPTHENDIAN_WORD_SWAP       (1 << 18)
+#       define R300_DEPTHENDIAN_DWORD_SWAP      (2 << 18)
+#       define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
+
+/* Z Buffer Clear Value */
+#define R300_ZB_DEPTHCLEARVALUE                  0x4f28
+
+#define R300_ZB_ZMASK_OFFSET			 0x4f30
+#define R300_ZB_ZMASK_PITCH			 0x4f34
+#define R300_ZB_ZMASK_WRINDEX			 0x4f38
+#define R300_ZB_ZMASK_DWORD			 0x4f3c
+#define R300_ZB_ZMASK_RDINDEX			 0x4f40
+
+/* Hierarchical Z Memory Offset */
+#define R300_ZB_HIZ_OFFSET                       0x4f44
+
+/* Hierarchical Z Write Index */
+#define R300_ZB_HIZ_WRINDEX                      0x4f48
+
+/* Hierarchical Z Data */
+#define R300_ZB_HIZ_DWORD                        0x4f4c
+
+/* Hierarchical Z Read Index */
+#define R300_ZB_HIZ_RDINDEX                      0x4f50
+
+/* Hierarchical Z Pitch */
+#define R300_ZB_HIZ_PITCH                        0x4f54
+
+/* Z Buffer Z Pass Counter Data */
+#define R300_ZB_ZPASS_DATA                       0x4f58
+
+/* Z Buffer Z Pass Counter Address */
+#define R300_ZB_ZPASS_ADDR                       0x4f5c
+
+/* Depth buffer X and Y coordinate offset */
+#define R300_ZB_DEPTHXY_OFFSET                   0x4f60
+#	define R300_DEPTHX_OFFSET_SHIFT  1
+#	define R300_DEPTHX_OFFSET_MASK   0x000007FE
+#	define R300_DEPTHY_OFFSET_SHIFT  17
+#	define R300_DEPTHY_OFFSET_MASK   0x07FE0000
+
+/* Sets the fifo sizes */
+#define R500_ZB_FIFO_SIZE                        0x4fd0
+#	define R500_OP_FIFO_SIZE_FULL   (0 << 0)
+#	define R500_OP_FIFO_SIZE_HALF   (1 << 0)
+#	define R500_OP_FIFO_SIZE_QUATER (2 << 0)
+#	define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
+
+/* Stencil Reference Value and Mask for backfacing quads */
+/* R300_ZB_STENCILREFMASK handles front face */
+#define R500_ZB_STENCILREFMASK_BF                0x4fd4
+#	define R500_STENCILREF_SHIFT       0
+#	define R500_STENCILREF_MASK        0x000000ff
+#	define R500_STENCILMASK_SHIFT      8
+#	define R500_STENCILMASK_MASK       0x0000ff00
+#	define R500_STENCILWRITEMASK_SHIFT 16
+#	define R500_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* BEGIN: Vertex program instruction set */
+
+/* Every instruction is four dwords long:
+ *  DWORD 0: output and opcode
+ *  DWORD 1: first argument
+ *  DWORD 2: second argument
+ *  DWORD 3: third argument
+ *
+ * Notes:
+ *  - ABS r, a is implemented as MAX r, a, -a
+ *  - MOV is implemented as ADD to zero
+ *  - XPD is implemented as MUL + MAD
+ *  - FLR is implemented as FRC + ADD
+ *  - apparently, fglrx tries to schedule instructions so that there is at
+ *    least one instruction between the write to a temporary and the first
+ *    read from said temporary; however, violations of this scheduling are
+ *    allowed
+ *  - register indices seem to be unrelated with OpenGL aliasing to
+ *    conventional state
+ *  - only one attribute and one parameter can be loaded at a time; however,
+ *    the same attribute/parameter can be used for more than one argument
+ *  - the second software argument for POW is the third hardware argument
+ *    (no idea why)
+ *  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ *   The single argument is replicated across all three inputs, but swizzled:
+ *     First argument: xyzy
+ *     Second argument: xyzx
+ *     Third argument: xyzw
+ *   Whenever the result is used later in the fragment program, fglrx forces
+ *   x and w to be 1.0 in the input selection; I don't know whether this is
+ *   strictly necessary
+ */
+#define R300_VPI_OUT_OP_DOT                     (1 << 0)
+#define R300_VPI_OUT_OP_MUL                     (2 << 0)
+#define R300_VPI_OUT_OP_ADD                     (3 << 0)
+#define R300_VPI_OUT_OP_MAD                     (4 << 0)
+#define R300_VPI_OUT_OP_DST                     (5 << 0)
+#define R300_VPI_OUT_OP_FRC                     (6 << 0)
+#define R300_VPI_OUT_OP_MAX                     (7 << 0)
+#define R300_VPI_OUT_OP_MIN                     (8 << 0)
+#define R300_VPI_OUT_OP_SGE                     (9 << 0)
+#define R300_VPI_OUT_OP_SLT                     (10 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_UNK12                   (12 << 0)
+#define R300_VPI_OUT_OP_ARL                     (13 << 0)
+#define R300_VPI_OUT_OP_EXP                     (65 << 0)
+#define R300_VPI_OUT_OP_LOG                     (66 << 0)
+	/* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK67                   (67 << 0)
+#define R300_VPI_OUT_OP_LIT                     (68 << 0)
+#define R300_VPI_OUT_OP_POW                     (69 << 0)
+#define R300_VPI_OUT_OP_RCP                     (70 << 0)
+#define R300_VPI_OUT_OP_RSQ                     (72 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK73                   (73 << 0)
+#define R300_VPI_OUT_OP_EX2                     (75 << 0)
+#define R300_VPI_OUT_OP_LG2                     (76 << 0)
+#define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
+	/* all temps, vector(scalar, vector, vector) */
+#define R300_VPI_OUT_OP_UNK129                  (129 << 0)
+
+#define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_ADDR             (1 << 8)
+#define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
+#define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
+
+#define R300_VPI_OUT_REG_INDEX_SHIFT            13
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)
+
+#define R300_VPI_OUT_WRITE_X                    (1 << 20)
+#define R300_VPI_OUT_WRITE_Y                    (1 << 21)
+#define R300_VPI_OUT_WRITE_Z                    (1 << 22)
+#define R300_VPI_OUT_WRITE_W                    (1 << 23)
+
+#define R300_VPI_IN_REG_CLASS_TEMPORARY         (0 << 0)
+#define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
+#define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
+#define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
+#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)
+
+#define R300_VPI_IN_REG_INDEX_SHIFT             5
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)
+
+/* The R300 can select components from the input register arbitrarily.
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
+#define R300_VPI_IN_SELECT_X    0
+#define R300_VPI_IN_SELECT_Y    1
+#define R300_VPI_IN_SELECT_Z    2
+#define R300_VPI_IN_SELECT_W    3
+#define R300_VPI_IN_SELECT_ZERO 4
+#define R300_VPI_IN_SELECT_ONE  5
+#define R300_VPI_IN_SELECT_MASK 7
+
+#define R300_VPI_IN_X_SHIFT                     13
+#define R300_VPI_IN_Y_SHIFT                     16
+#define R300_VPI_IN_Z_SHIFT                     19
+#define R300_VPI_IN_W_SHIFT                     22
+
+#define R300_VPI_IN_NEG_X                       (1 << 25)
+#define R300_VPI_IN_NEG_Y                       (1 << 26)
+#define R300_VPI_IN_NEG_Z                       (1 << 27)
+#define R300_VPI_IN_NEG_W                       (1 << 28)
+/* END: Vertex program instruction set */
+
+/* BEGIN: Packet 3 commands */
+
+/* A primitive emission dword. */
+#define R300_PRIM_TYPE_NONE                     (0 << 0)
+#define R300_PRIM_TYPE_POINT                    (1 << 0)
+#define R300_PRIM_TYPE_LINE                     (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP               (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST                 (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN                  (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP                (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2                (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
+	/* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)
+#define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
+#define R300_PRIM_TYPE_QUADS                    (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
+#define R300_PRIM_TYPE_POLYGON                  (15 << 0)
+#define R300_PRIM_TYPE_MASK                     0xF
+#define R300_PRIM_WALK_IND                      (1 << 4)
+#define R300_PRIM_WALK_LIST                     (2 << 4)
+#define R300_PRIM_WALK_RING                     (3 << 4)
+#define R300_PRIM_WALK_MASK                     (3 << 4)
+	/* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)
+#define R300_PRIM_NUM_VERTICES_SHIFT            16
+#define R300_PRIM_NUM_VERTICES_MASK             0xffff
+
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. The first parameter appears to be always 0
+ * 1. The second parameter is a standard primitive emission dword.
+ */
+#define R300_PACKET3_3D_DRAW_VBUF           0x00002800
+
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
+#define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER            0x00003300
+#    define R300_EB_UNK1_SHIFT                      24
+#    define R300_EB_UNK1                    (0x80<<24)
+#    define R300_EB_UNK2                        0x0810
+#define R300_PACKET3_3D_DRAW_VBUF_2         0x00003400
+#define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
+
+/* END: Packet 3 commands */
+
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8	2
+#define R300_CP_COLOR_FORMAT_ARGB1555	3
+#define R300_CP_COLOR_FORMAT_RGB565	4
+#define R300_CP_COLOR_FORMAT_ARGB8888	6
+#define R300_CP_COLOR_FORMAT_RGB332	7
+#define R300_CP_COLOR_FORMAT_RGB8	9
+#define R300_CP_COLOR_FORMAT_ARGB4444	15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI	0xC0009B00
+
+#define R500_VAP_INDEX_OFFSET		0x208c
+
+#define R500_GA_US_VECTOR_INDEX         0x4250
+#define R500_GA_US_VECTOR_DATA          0x4254
+
+#define R500_RS_IP_0                    0x4074
+#define R500_RS_INST_0                  0x4320
+
+#define R500_US_CONFIG                  0x4600
+
+#define R500_US_FC_CTRL			0x4624
+#define R500_US_CODE_ADDR		0x4630
+
+#define R500_RB3D_COLOR_CLEAR_VALUE_AR  0x46c0
+#define R500_RB3D_CONSTANT_COLOR_AR     0x4ef8
+
+#define R300_SU_REG_DEST                0x42c8
+#define RV530_FG_ZBREG_DEST             0x4be8
+#define R300_ZB_ZPASS_DATA              0x4f58
+#define R300_ZB_ZPASS_ADDR              0x4f5c
+
+#endif /* _R300_REG_H */
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
new file mode 100644
index 0000000..ff229a0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R300D_H__
+#define __R300D_H__
+
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_3D_CLEAR_ZMASK		0x32
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_3D_CLEAR_CMASK		0x38
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION                     0x00014C
+#define   S_00014C_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_00014C_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_00014C_MC_AGP_START                        0xFFFF0000
+#define   S_00014C_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_00014C_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_00014C_MC_AGP_TOP                          0x0000FFFF
+#define R_00015C_AGP_BASE_2                          0x00015C
+#define   S_00015C_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_00015C_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_00015C_AGP_BASE_ADDR_2                     0xFFFFFFF0
+#define R_000170_AGP_BASE                            0x000170
+#define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000170_AGP_BASE_ADDR                       0x00000000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
new file mode 100644
index 0000000..45e1d4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r100d.h"
+#include "r420d.h"
+#include "r420_reg_safe.h"
+
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
+
+void r420_pipes_init(struct radeon_device *rdev)
+{
+	unsigned tmp;
+	unsigned gb_pipe_select;
+	unsigned num_pipes;
+
+	/* GA_ENHANCE workaround TCL deadlock issue */
+	WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
+	       (1 << 2) | (1 << 3));
+	/* add idle wait as per freedesktop.org bug 24041 */
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
+	}
+	/* get max number of pipes */
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+	/* SE chips have 1 pipe */
+	if ((rdev->pdev->device == 0x5e4c) ||
+	    (rdev->pdev->device == 0x5e4f))
+		num_pipes = 1;
+
+	rdev->num_gb_pipes = num_pipes;
+	tmp = 0;
+	switch (num_pipes) {
+	default:
+		/* force to 1 pipe */
+		num_pipes = 1;
+	case 1:
+		tmp = (0 << 1);
+		break;
+	case 2:
+		tmp = (3 << 1);
+		break;
+	case 3:
+		tmp = (6 << 1);
+		break;
+	case 4:
+		tmp = (7 << 1);
+		break;
+	}
+	WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
+	/* Sub pixel 1/12 so we can have 4K rendering according to doc */
+	tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
+	WREG32(R300_GB_TILE_CONFIG, tmp);
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
+	}
+
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+	WREG32(R300_RB2D_DSTCACHE_MODE,
+	       RREG32(R300_RB2D_DSTCACHE_MODE) |
+	       R300_DC_AUTOFLUSH_ENABLE |
+	       R300_DC_DC_DISABLE_IGNORE_PE);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
+	}
+
+	if (rdev->family == CHIP_RV530) {
+		tmp = RREG32(RV530_GB_PIPE_SELECT2);
+		if ((tmp & 3) == 3)
+			rdev->num_z_pipes = 2;
+		else
+			rdev->num_z_pipes = 1;
+	} else
+		rdev->num_z_pipes = 1;
+
+	DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
+		 rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
+	r = RREG32(R_0001FC_MC_IND_DATA);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+	return r;
+}
+
+void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
+		S_0001F8_MC_IND_WR_EN(1));
+	WREG32(R_0001FC_MC_IND_DATA, v);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+}
+
+static void r420_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+	}
+	if (r420_debugfs_pipes_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+}
+
+static void r420_clock_resume(struct radeon_device *rdev)
+{
+	u32 sclk_cntl;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_atom_set_clock_gating(rdev, 1);
+	sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
+	sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if (rdev->family == CHIP_R420)
+		sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
+}
+
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+	int r;
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	/* RV410 and R420 can lock up if CP DMA to host memory happens
+	 * while the 2D engine is busy.
+	 *
+	 * The proper workaround is to queue a RESYNC at the beginning
+	 * of the CP init, apparently.
+	 */
+	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+	r = radeon_ring_lock(rdev, ring, 8);
+	WARN_ON(r);
+	radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+	radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring, false);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+	int r;
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	/* Catch the RESYNC we dispatched all the way back,
+	 * at the very beginning of the CP init.
+	 */
+	r = radeon_ring_lock(rdev, ring, 8);
+	WARN_ON(r);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
+static int r420_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r300_mc_program(rdev);
+	/* Resume clock */
+	r420_clock_resume(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	r420_pipes_init(rdev);
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+	r420_cp_errata_init(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r420_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r420_clock_resume(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (rdev->is_atom_bios) {
+		atom_asic_init(rdev->mode_info.atom_context);
+	} else {
+		radeon_combios_asic_init(rdev->ddev);
+	}
+	/* Resume clock after posting */
+	r420_clock_resume(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r420_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r420_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	r420_cp_errata_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r420_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	if (rdev->is_atom_bios) {
+		radeon_atombios_fini(rdev);
+	} else {
+		radeon_combios_fini(rdev);
+	}
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int r420_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r) {
+			return r;
+		}
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r) {
+			return r;
+		}
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r300_mc_init(rdev);
+	r420_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r) {
+		return r;
+	}
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r) {
+		return r;
+	}
+	if (rdev->family == CHIP_R420)
+		r100_enable_bm(rdev);
+
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r420_set_reg_safe(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = r420_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCIE)
+			rv370_pcie_gart_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(R400_GB_PIPE_SELECT);
+	seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+	tmp = RREG32(R300_GB_TILE_CONFIG);
+	seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list r420_pipes_info_list[] = {
+	{"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
+};
+#endif
+
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
+#else
+	return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h
new file mode 100644
index 0000000..fc78d31
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r420d.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef R420D_H
+#define R420D_H
+
+#define R_0001F8_MC_IND_INDEX                        0x0001F8
+#define   S_0001F8_MC_IND_ADDR(x)                      (((x) & 0x7F) << 0)
+#define   G_0001F8_MC_IND_ADDR(x)                      (((x) >> 0) & 0x7F)
+#define   C_0001F8_MC_IND_ADDR                         0xFFFFFF80
+#define   S_0001F8_MC_IND_WR_EN(x)                     (((x) & 0x1) << 8)
+#define   G_0001F8_MC_IND_WR_EN(x)                     (((x) >> 8) & 0x1)
+#define   C_0001F8_MC_IND_WR_EN                        0xFFFFFEFF
+#define R_0001FC_MC_IND_DATA                         0x0001FC
+#define   S_0001FC_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_0001FC_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0001FC_MC_IND_DATA                         0x00000000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+/* CLK registers */
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
+#define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_PX                            0xFBFFFFFF
+#define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TX                            0xF7FFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
new file mode 100644
index 0000000..136b7bc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -0,0 +1,801 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R500_REG_H__
+#define __R500_REG_H__
+
+/* pipe config regs */
+#define R300_GA_POLY_MODE				0x4288
+#       define R300_FRONT_PTYPE_POINT                   (0 << 4)
+#       define R300_FRONT_PTYPE_LINE                    (1 << 4)
+#       define R300_FRONT_PTYPE_TRIANGE                 (2 << 4)
+#       define R300_BACK_PTYPE_POINT                    (0 << 7)
+#       define R300_BACK_PTYPE_LINE                     (1 << 7)
+#       define R300_BACK_PTYPE_TRIANGE                  (2 << 7)
+#define R300_GA_ROUND_MODE				0x428c
+#       define R300_GEOMETRY_ROUND_TRUNC                (0 << 0)
+#       define R300_GEOMETRY_ROUND_NEAREST              (1 << 0)
+#       define R300_COLOR_ROUND_TRUNC                   (0 << 2)
+#       define R300_COLOR_ROUND_NEAREST                 (1 << 2)
+#define R300_GB_MSPOS0				        0x4010
+#       define R300_MS_X0_SHIFT                         0
+#       define R300_MS_Y0_SHIFT                         4
+#       define R300_MS_X1_SHIFT                         8
+#       define R300_MS_Y1_SHIFT                         12
+#       define R300_MS_X2_SHIFT                         16
+#       define R300_MS_Y2_SHIFT                         20
+#       define R300_MSBD0_Y_SHIFT                       24
+#       define R300_MSBD0_X_SHIFT                       28
+#define R300_GB_MSPOS1				        0x4014
+#       define R300_MS_X3_SHIFT                         0
+#       define R300_MS_Y3_SHIFT                         4
+#       define R300_MS_X4_SHIFT                         8
+#       define R300_MS_Y4_SHIFT                         12
+#       define R300_MS_X5_SHIFT                         16
+#       define R300_MS_Y5_SHIFT                         20
+#       define R300_MSBD1_SHIFT                         24
+
+#define R300_GA_ENHANCE				        0x4274
+#       define R300_GA_DEADLOCK_CNTL                    (1 << 0)
+#       define R300_GA_FASTSYNC_CNTL                    (1 << 1)
+#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
+#	define R300_RB3D_DC_FLUSH		(2 << 0)
+#	define R300_RB3D_DC_FREE		(2 << 2)
+#	define R300_RB3D_DC_FINISH		(1 << 4)
+#define R300_RB3D_ZCACHE_CTLSTAT			0x4f18
+#       define R300_ZC_FLUSH                            (1 << 0)
+#       define R300_ZC_FREE                             (1 << 1)
+#       define R300_ZC_FLUSH_ALL                        0x3
+#define R400_GB_PIPE_SELECT             0x402c
+#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
+#define R500_SU_REG_DEST                0x42c8
+#define R300_GB_TILE_CONFIG             0x4018
+#       define R300_ENABLE_TILING       (1 << 0)
+#       define R300_PIPE_COUNT_RV350    (0 << 1)
+#       define R300_PIPE_COUNT_R300     (3 << 1)
+#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
+#       define R300_PIPE_COUNT_R420     (7 << 1)
+#       define R300_TILE_SIZE_8         (0 << 4)
+#       define R300_TILE_SIZE_16        (1 << 4)
+#       define R300_TILE_SIZE_32        (2 << 4)
+#       define R300_SUBPIXEL_1_12       (0 << 16)
+#       define R300_SUBPIXEL_1_16       (1 << 16)
+#define R300_DST_PIPE_CONFIG            0x170c
+#       define R300_PIPE_AUTO_CONFIG    (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE         0x3428
+#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_CP_STAT		0x7C0
+#define RADEON_RBBM_CMDFIFO_ADDR	0xE70
+#define RADEON_RBBM_CMDFIFO_DATA	0xE74
+#define RADEON_ISYNC_CNTL		0x1724
+#	define RADEON_ISYNC_ANY2D_IDLE3D	(1 << 0)
+#	define RADEON_ISYNC_ANY3D_IDLE2D	(1 << 1)
+#	define RADEON_ISYNC_TRIG2D_IDLE3D	(1 << 2)
+#	define RADEON_ISYNC_TRIG3D_IDLE2D	(1 << 3)
+#	define RADEON_ISYNC_WAIT_IDLEGUI	(1 << 4)
+#	define RADEON_ISYNC_CPSCRATCH_IDLEGUI	(1 << 5)
+
+#define RS480_NB_MC_INDEX               0x168
+#	define RS480_NB_MC_IND_WR_EN	(1 << 8)
+#define RS480_NB_MC_DATA                0x16c
+
+/*
+ * RS690
+ */
+#define RS690_MCCFG_FB_LOCATION		0x100
+#define		RS690_MC_FB_START_MASK		0x0000FFFF
+#define		RS690_MC_FB_START_SHIFT		0
+#define		RS690_MC_FB_TOP_MASK		0xFFFF0000
+#define		RS690_MC_FB_TOP_SHIFT		16
+#define RS690_MCCFG_AGP_LOCATION	0x101
+#define		RS690_MC_AGP_START_MASK		0x0000FFFF
+#define		RS690_MC_AGP_START_SHIFT	0
+#define		RS690_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RS690_MC_AGP_TOP_SHIFT		16
+#define RS690_MCCFG_AGP_BASE		0x102
+#define RS690_MCCFG_AGP_BASE_2		0x103
+#define RS690_MC_INIT_MISC_LAT_TIMER            0x104
+#define RS690_HDP_FB_LOCATION		0x0134
+#define RS690_MC_INDEX				0x78
+#	define RS690_MC_INDEX_MASK		0x1ff
+#	define RS690_MC_INDEX_WR_EN		(1 << 9)
+#	define RS690_MC_INDEX_WR_ACK		0x7f
+#define RS690_MC_DATA				0x7c
+#define RS690_MC_STATUS                         0x90
+#define RS690_MC_STATUS_IDLE                    (1 << 0)
+#define RS480_AGP_BASE_2		0x0164
+#define RS480_MC_MISC_CNTL              0x18
+#	define RS480_DISABLE_GTW	(1 << 1)
+#	define RS480_GART_INDEX_REG_EN	(1 << 12)
+#	define RS690_BLOCK_GFX_D3_EN	(1 << 14)
+#define RS480_GART_FEATURE_ID           0x2b
+#	define RS480_HANG_EN	        (1 << 11)
+#	define RS480_TLB_ENABLE	        (1 << 18)
+#	define RS480_P2P_ENABLE	        (1 << 19)
+#	define RS480_GTW_LAC_EN	        (1 << 25)
+#	define RS480_2LEVEL_GART	(0 << 30)
+#	define RS480_1LEVEL_GART	(1 << 30)
+#	define RS480_PDC_EN	        (1 << 31)
+#define RS480_GART_BASE                 0x2c
+#define RS480_GART_CACHE_CNTRL          0x2e
+#	define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
+#	define RS480_GART_EN	        (1 << 0)
+#	define RS480_VA_SIZE_32MB	(0 << 1)
+#	define RS480_VA_SIZE_64MB	(1 << 1)
+#	define RS480_VA_SIZE_128MB	(2 << 1)
+#	define RS480_VA_SIZE_256MB	(3 << 1)
+#	define RS480_VA_SIZE_512MB	(4 << 1)
+#	define RS480_VA_SIZE_1GB	(5 << 1)
+#	define RS480_VA_SIZE_2GB	(6 << 1)
+#define RS480_AGP_MODE_CNTL             0x39
+#	define RS480_POST_GART_Q_SIZE	(1 << 18)
+#	define RS480_NONGART_SNOOP	(1 << 19)
+#	define RS480_AGP_RD_BUF_SIZE	(1 << 20)
+#	define RS480_REQ_TYPE_SNOOP_SHIFT 22
+#	define RS480_REQ_TYPE_SNOOP_MASK  0x3
+#	define RS480_REQ_TYPE_SNOOP_DIS	(1 << 24)
+
+#define RS690_AIC_CTRL_SCRATCH		0x3A
+#	define RS690_DIS_OUT_OF_PCI_GART_ACCESS	(1 << 1)
+
+/*
+ * RS600
+ */
+#define RS600_MC_STATUS                         0x0
+#define RS600_MC_STATUS_IDLE                    (1 << 0)
+#define RS600_MC_INDEX                          0x70
+#       define RS600_MC_ADDR_MASK               0xffff
+#       define RS600_MC_IND_SEQ_RBS_0           (1 << 16)
+#       define RS600_MC_IND_SEQ_RBS_1           (1 << 17)
+#       define RS600_MC_IND_SEQ_RBS_2           (1 << 18)
+#       define RS600_MC_IND_SEQ_RBS_3           (1 << 19)
+#       define RS600_MC_IND_AIC_RBS             (1 << 20)
+#       define RS600_MC_IND_CITF_ARB0           (1 << 21)
+#       define RS600_MC_IND_CITF_ARB1           (1 << 22)
+#       define RS600_MC_IND_WR_EN               (1 << 23)
+#define RS600_MC_DATA                           0x74
+#define RS600_MC_STATUS                         0x0
+#       define RS600_MC_IDLE                    (1 << 1)
+#define RS600_MC_FB_LOCATION                    0x4
+#define		RS600_MC_FB_START_MASK		0x0000FFFF
+#define		RS600_MC_FB_START_SHIFT		0
+#define		RS600_MC_FB_TOP_MASK		0xFFFF0000
+#define		RS600_MC_FB_TOP_SHIFT		16
+#define RS600_MC_AGP_LOCATION                   0x5
+#define		RS600_MC_AGP_START_MASK		0x0000FFFF
+#define		RS600_MC_AGP_START_SHIFT	0
+#define		RS600_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RS600_MC_AGP_TOP_SHIFT		16
+#define RS600_MC_AGP_BASE                          0x6
+#define RS600_MC_AGP_BASE_2                        0x7
+#define RS600_MC_CNTL1                          0x9
+#       define RS600_ENABLE_PAGE_TABLES         (1 << 26)
+#define RS600_MC_PT0_CNTL                       0x100
+#       define RS600_ENABLE_PT                  (1 << 0)
+#       define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+#       define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+#       define RS600_INVALIDATE_ALL_L1_TLBS     (1 << 28)
+#       define RS600_INVALIDATE_L2_CACHE        (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL              0x102
+#       define RS600_ENABLE_PAGE_TABLE          (1 << 0)
+#       define RS600_PAGE_TABLE_TYPE_FLAT       (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR   0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR  0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR    0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR   0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR     0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL               0x16c
+#       define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE       (1 << 0)
+#       define RS600_TRANSLATION_MODE_OVERRIDE              (1 << 1)
+#       define RS600_SYSTEM_ACCESS_MODE_MASK                (3 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 8)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH        (0 << 10)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE       (1 << 10)
+#       define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+#       define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+#       define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+#       define RS600_INVALIDATE_L1_TLB          (1 << 20)
+/* rs600/rs690/rs740 */
+#	define RS600_BUS_MASTER_DIS		(1 << 14)
+#	define RS600_MSI_REARM		        (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+
+
+#define RV515_MC_FB_LOCATION		0x01
+#define		RV515_MC_FB_START_MASK		0x0000FFFF
+#define		RV515_MC_FB_START_SHIFT		0
+#define		RV515_MC_FB_TOP_MASK		0xFFFF0000
+#define		RV515_MC_FB_TOP_SHIFT		16
+#define RV515_MC_AGP_LOCATION		0x02
+#define		RV515_MC_AGP_START_MASK		0x0000FFFF
+#define		RV515_MC_AGP_START_SHIFT	0
+#define		RV515_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RV515_MC_AGP_TOP_SHIFT		16
+#define RV515_MC_AGP_BASE		0x03
+#define RV515_MC_AGP_BASE_2		0x04
+
+#define R520_MC_FB_LOCATION		0x04
+#define		R520_MC_FB_START_MASK		0x0000FFFF
+#define		R520_MC_FB_START_SHIFT		0
+#define		R520_MC_FB_TOP_MASK		0xFFFF0000
+#define		R520_MC_FB_TOP_SHIFT		16
+#define R520_MC_AGP_LOCATION		0x05
+#define		R520_MC_AGP_START_MASK		0x0000FFFF
+#define		R520_MC_AGP_START_SHIFT		0
+#define		R520_MC_AGP_TOP_MASK		0xFFFF0000
+#define		R520_MC_AGP_TOP_SHIFT		16
+#define R520_MC_AGP_BASE		0x06
+#define R520_MC_AGP_BASE_2		0x07
+
+
+#define AVIVO_MC_INDEX						0x0070
+#define R520_MC_STATUS 0x00
+#define R520_MC_STATUS_IDLE (1<<1)
+#define RV515_MC_STATUS 0x08
+#define RV515_MC_STATUS_IDLE (1<<4)
+#define RV515_MC_INIT_MISC_LAT_TIMER            0x09
+#define AVIVO_MC_DATA						0x0074
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA  0x74
+
+#define RV515_MC_CNTL          0x5
+#	define RV515_MEM_NUM_CHANNELS_MASK  0x3
+#define R520_MC_CNTL0          0x8
+#	define R520_MEM_NUM_CHANNELS_MASK  (0x3 << 24)
+#	define R520_MEM_NUM_CHANNELS_SHIFT  24
+#	define R520_MC_CHANNEL_SIZE  (1 << 23)
+
+#define AVIVO_CP_DYN_CNTL                              0x000f /* PLL */
+#       define AVIVO_CP_FORCEON                        (1 << 0)
+#define AVIVO_E2_DYN_CNTL                              0x0011 /* PLL */
+#       define AVIVO_E2_FORCEON                        (1 << 0)
+#define AVIVO_IDCT_DYN_CNTL                            0x0013 /* PLL */
+#       define AVIVO_IDCT_FORCEON                      (1 << 0)
+
+#define AVIVO_HDP_FB_LOCATION 0x134
+
+#define AVIVO_VGA_RENDER_CONTROL				0x0300
+#       define AVIVO_VGA_VSTATUS_CNTL_MASK                      (3 << 16)
+#define AVIVO_D1VGA_CONTROL					0x0330
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
+#       define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
+#define AVIVO_D2VGA_CONTROL					0x0338
+
+#define AVIVO_EXT1_PPLL_REF_DIV_SRC                             0x400
+#define AVIVO_EXT1_PPLL_REF_DIV                                 0x404
+#define AVIVO_EXT1_PPLL_UPDATE_LOCK                             0x408
+#define AVIVO_EXT1_PPLL_UPDATE_CNTL                             0x40c
+
+#define AVIVO_EXT2_PPLL_REF_DIV_SRC                             0x410
+#define AVIVO_EXT2_PPLL_REF_DIV                                 0x414
+#define AVIVO_EXT2_PPLL_UPDATE_LOCK                             0x418
+#define AVIVO_EXT2_PPLL_UPDATE_CNTL                             0x41c
+
+#define AVIVO_EXT1_PPLL_FB_DIV                                   0x430
+#define AVIVO_EXT2_PPLL_FB_DIV                                   0x434
+
+#define AVIVO_EXT1_PPLL_POST_DIV_SRC                                 0x438
+#define AVIVO_EXT1_PPLL_POST_DIV                                     0x43c
+
+#define AVIVO_EXT2_PPLL_POST_DIV_SRC                                 0x440
+#define AVIVO_EXT2_PPLL_POST_DIV                                     0x444
+
+#define AVIVO_EXT1_PPLL_CNTL                                    0x448
+#define AVIVO_EXT2_PPLL_CNTL                                    0x44c
+
+#define AVIVO_P1PLL_CNTL                                        0x450
+#define AVIVO_P2PLL_CNTL                                        0x454
+#define AVIVO_P1PLL_INT_SS_CNTL                                 0x458
+#define AVIVO_P2PLL_INT_SS_CNTL                                 0x45c
+#define AVIVO_P1PLL_TMDSA_CNTL                                  0x460
+#define AVIVO_P2PLL_LVTMA_CNTL                                  0x464
+
+#define AVIVO_PCLK_CRTC1_CNTL                                   0x480
+#define AVIVO_PCLK_CRTC2_CNTL                                   0x484
+
+#define AVIVO_D1CRTC_H_TOTAL					0x6000
+#define AVIVO_D1CRTC_H_BLANK_START_END                          0x6004
+#define AVIVO_D1CRTC_H_SYNC_A                                   0x6008
+#define AVIVO_D1CRTC_H_SYNC_A_CNTL                              0x600c
+#define AVIVO_D1CRTC_H_SYNC_B                                   0x6010
+#define AVIVO_D1CRTC_H_SYNC_B_CNTL                              0x6014
+
+#define AVIVO_D1CRTC_V_TOTAL					0x6020
+#define AVIVO_D1CRTC_V_BLANK_START_END                          0x6024
+#define AVIVO_D1CRTC_V_SYNC_A                                   0x6028
+#define AVIVO_D1CRTC_V_SYNC_A_CNTL                              0x602c
+#define AVIVO_D1CRTC_V_SYNC_B                                   0x6030
+#define AVIVO_D1CRTC_V_SYNC_B_CNTL                              0x6034
+
+#define AVIVO_D1CRTC_CONTROL                                    0x6080
+#       define AVIVO_CRTC_EN                                    (1 << 0)
+#       define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE             (1 << 24)
+#define AVIVO_D1CRTC_BLANK_CONTROL                              0x6084
+#define AVIVO_D1CRTC_INTERLACE_CONTROL                          0x6088
+#define AVIVO_D1CRTC_INTERLACE_STATUS                           0x608c
+#define AVIVO_D1CRTC_STATUS                                     0x609c
+#       define AVIVO_D1CRTC_V_BLANK                             (1 << 0)
+#define AVIVO_D1CRTC_STATUS_POSITION                            0x60a0
+#define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
+#define AVIVO_D1CRTC_STATUS_HV_COUNT                            0x60ac
+#define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
+
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK                         0x60e0
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE                         0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK                                0x60e8
+
+/* master controls */
+#define AVIVO_DC_CRTC_MASTER_EN                                 0x60f8
+#define AVIVO_DC_CRTC_TV_CONTROL                                0x60fc
+
+#define AVIVO_D1GRPH_ENABLE                                     0x6100
+#define AVIVO_D1GRPH_CONTROL                                    0x6104
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP                  (0 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP                 (1 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP                 (2 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP                 (3 << 0)
+
+#       define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED                (0 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_RGB565                (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444              (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_AI88                  (3 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_MONO16                (4 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010           (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL               (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010        (3 << 8)
+
+
+#       define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616          (0 << 8)
+
+#       define AVIVO_D1GRPH_SWAP_RB                             (1 << 16)
+#       define AVIVO_D1GRPH_TILED                               (1 << 20)
+#       define AVIVO_D1GRPH_MACRO_ADDRESS_MODE                  (1 << 21)
+
+#       define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL            (0 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED            (1 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1            (2 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1            (4 << 20)
+
+/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
+ * block and vice versa.  This applies to GRPH, CUR, etc.
+ */
+#define AVIVO_D1GRPH_LUT_SEL                                    0x6108
+#       define AVIVO_LUT_10BIT_BYPASS_EN                        (1 << 8)
+#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6914
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6114
+#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x691c
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x611c
+#define AVIVO_D1GRPH_PITCH                                      0x6120
+#define AVIVO_D1GRPH_SURFACE_OFFSET_X                           0x6124
+#define AVIVO_D1GRPH_SURFACE_OFFSET_Y                           0x6128
+#define AVIVO_D1GRPH_X_START                                    0x612c
+#define AVIVO_D1GRPH_Y_START                                    0x6130
+#define AVIVO_D1GRPH_X_END                                      0x6134
+#define AVIVO_D1GRPH_Y_END                                      0x6138
+#define AVIVO_D1GRPH_UPDATE                                     0x6144
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING              (1 << 2)
+#       define AVIVO_D1GRPH_UPDATE_LOCK                         (1 << 16)
+#define AVIVO_D1GRPH_FLIP_CONTROL                               0x6148
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN         (1 << 0)
+
+#define AVIVO_D1CUR_CONTROL                     0x6400
+#       define AVIVO_D1CURSOR_EN                (1 << 0)
+#       define AVIVO_D1CURSOR_MODE_SHIFT        8
+#       define AVIVO_D1CURSOR_MODE_MASK         (3 << 8)
+#       define AVIVO_D1CURSOR_MODE_24BPP        2
+#define AVIVO_D1CUR_SURFACE_ADDRESS             0x6408
+#define R700_D1CUR_SURFACE_ADDRESS_HIGH         0x6c0c
+#define R700_D2CUR_SURFACE_ADDRESS_HIGH         0x640c
+#define AVIVO_D1CUR_SIZE                        0x6410
+#define AVIVO_D1CUR_POSITION                    0x6414
+#define AVIVO_D1CUR_HOT_SPOT                    0x6418
+#define AVIVO_D1CUR_UPDATE                      0x6424
+#       define AVIVO_D1CURSOR_UPDATE_LOCK       (1 << 16)
+
+#define AVIVO_DC_LUT_RW_SELECT                  0x6480
+#define AVIVO_DC_LUT_RW_MODE                    0x6484
+#define AVIVO_DC_LUT_RW_INDEX                   0x6488
+#define AVIVO_DC_LUT_SEQ_COLOR                  0x648c
+#define AVIVO_DC_LUT_PWL_DATA                   0x6490
+#define AVIVO_DC_LUT_30_COLOR                   0x6494
+#define AVIVO_DC_LUT_READ_PIPE_SELECT           0x6498
+#define AVIVO_DC_LUT_WRITE_EN_MASK              0x649c
+#define AVIVO_DC_LUT_AUTOFILL                   0x64a0
+
+#define AVIVO_DC_LUTA_CONTROL                   0x64c0
+#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE         0x64c4
+#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN        0x64c8
+#define AVIVO_DC_LUTA_BLACK_OFFSET_RED          0x64cc
+#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE         0x64d0
+#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN        0x64d4
+#define AVIVO_DC_LUTA_WHITE_OFFSET_RED          0x64d8
+
+#define AVIVO_DC_LB_MEMORY_SPLIT                0x6520
+#       define AVIVO_DC_LB_MEMORY_SPLIT_MASK    0x3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT   0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF  0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q    1
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY        2
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q    3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
+#       define AVIVO_DC_LB_DISP1_END_ADR_SHIFT  4
+#       define AVIVO_DC_LB_DISP1_END_ADR_MASK   0x7ff
+
+#define AVIVO_D1MODE_DATA_FORMAT                0x6528
+#       define AVIVO_D1MODE_INTERLEAVE_EN       (1 << 0)
+#define AVIVO_D1MODE_DESKTOP_HEIGHT             0x652C
+#define AVIVO_D1MODE_VBLANK_STATUS              0x6534
+#       define AVIVO_VBLANK_ACK                 (1 << 4)
+#define AVIVO_D1MODE_VLINE_START_END            0x6538
+#define AVIVO_D1MODE_VLINE_STATUS               0x653c
+#       define AVIVO_D1MODE_VLINE_STAT          (1 << 12)
+#define AVIVO_DxMODE_INT_MASK                   0x6540
+#       define AVIVO_D1MODE_INT_MASK            (1 << 0)
+#       define AVIVO_D2MODE_INT_MASK            (1 << 8)
+#define AVIVO_D1MODE_VIEWPORT_START             0x6580
+#define AVIVO_D1MODE_VIEWPORT_SIZE              0x6584
+#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6588
+#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM    0x658c
+
+#define AVIVO_D1SCL_SCALER_ENABLE               0x6590
+#define AVIVO_D1SCL_SCALER_TAP_CONTROL		0x6594
+#define AVIVO_D1SCL_UPDATE                      0x65cc
+#       define AVIVO_D1SCL_UPDATE_LOCK          (1 << 16)
+
+/* second crtc */
+#define AVIVO_D2CRTC_H_TOTAL					0x6800
+#define AVIVO_D2CRTC_H_BLANK_START_END                          0x6804
+#define AVIVO_D2CRTC_H_SYNC_A                                   0x6808
+#define AVIVO_D2CRTC_H_SYNC_A_CNTL                              0x680c
+#define AVIVO_D2CRTC_H_SYNC_B                                   0x6810
+#define AVIVO_D2CRTC_H_SYNC_B_CNTL                              0x6814
+
+#define AVIVO_D2CRTC_V_TOTAL					0x6820
+#define AVIVO_D2CRTC_V_BLANK_START_END                          0x6824
+#define AVIVO_D2CRTC_V_SYNC_A                                   0x6828
+#define AVIVO_D2CRTC_V_SYNC_A_CNTL                              0x682c
+#define AVIVO_D2CRTC_V_SYNC_B                                   0x6830
+#define AVIVO_D2CRTC_V_SYNC_B_CNTL                              0x6834
+
+#define AVIVO_D2CRTC_CONTROL                                    0x6880
+#define AVIVO_D2CRTC_BLANK_CONTROL                              0x6884
+#define AVIVO_D2CRTC_INTERLACE_CONTROL                          0x6888
+#define AVIVO_D2CRTC_INTERLACE_STATUS                           0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION                            0x68a0
+#define AVIVO_D2CRTC_FRAME_COUNT                                0x68a4
+#define AVIVO_D2CRTC_STEREO_CONTROL                             0x68c4
+
+#define AVIVO_D2GRPH_ENABLE                                     0x6900
+#define AVIVO_D2GRPH_CONTROL                                    0x6904
+#define AVIVO_D2GRPH_LUT_SEL                                    0x6908
+#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS                    0x6910
+#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS                  0x6918
+#define AVIVO_D2GRPH_PITCH                                      0x6920
+#define AVIVO_D2GRPH_SURFACE_OFFSET_X                           0x6924
+#define AVIVO_D2GRPH_SURFACE_OFFSET_Y                           0x6928
+#define AVIVO_D2GRPH_X_START                                    0x692c
+#define AVIVO_D2GRPH_Y_START                                    0x6930
+#define AVIVO_D2GRPH_X_END                                      0x6934
+#define AVIVO_D2GRPH_Y_END                                      0x6938
+#define AVIVO_D2GRPH_UPDATE                                     0x6944
+#define AVIVO_D2GRPH_FLIP_CONTROL                               0x6948
+
+#define AVIVO_D2CUR_CONTROL                     0x6c00
+#define AVIVO_D2CUR_SURFACE_ADDRESS             0x6c08
+#define AVIVO_D2CUR_SIZE                        0x6c10
+#define AVIVO_D2CUR_POSITION                    0x6c14
+
+#define AVIVO_D2MODE_VBLANK_STATUS              0x6d34
+#define AVIVO_D2MODE_VLINE_START_END            0x6d38
+#define AVIVO_D2MODE_VLINE_STATUS               0x6d3c
+#define AVIVO_D2MODE_VIEWPORT_START             0x6d80
+#define AVIVO_D2MODE_VIEWPORT_SIZE              0x6d84
+#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6d88
+#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM    0x6d8c
+
+#define AVIVO_D2SCL_SCALER_ENABLE               0x6d90
+#define AVIVO_D2SCL_SCALER_TAP_CONTROL		0x6d94
+
+#define AVIVO_DDIA_BIT_DEPTH_CONTROL				0x7214
+
+#define AVIVO_DACA_ENABLE					0x7800
+#	define AVIVO_DAC_ENABLE				(1 << 0)
+#define AVIVO_DACA_SOURCE_SELECT				0x7804
+#       define AVIVO_DAC_SOURCE_CRTC1                   (0 << 0)
+#       define AVIVO_DAC_SOURCE_CRTC2                   (1 << 0)
+#       define AVIVO_DAC_SOURCE_TV                      (2 << 0)
+
+#define AVIVO_DACA_FORCE_OUTPUT_CNTL				0x783c
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACA_POWERDOWN					0x7850
+# define AVIVO_DACA_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACA_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACA_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACA_POWERDOWN_RED                               (1 << 24)
+
+#define AVIVO_DACB_ENABLE					0x7a00
+#define AVIVO_DACB_SOURCE_SELECT				0x7a04
+#define AVIVO_DACB_FORCE_OUTPUT_CNTL				0x7a3c
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACB_POWERDOWN					0x7a50
+# define AVIVO_DACB_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACB_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACB_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACB_POWERDOWN_RED
+
+#define AVIVO_TMDSA_CNTL                    0x7880
+#   define AVIVO_TMDSA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_TMDSA_CNTL_HDMI_EN              (1 << 2)
+#   define AVIVO_TMDSA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_TMDSA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_TMDSA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_TMDSA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_TMDSA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_TMDSA_SOURCE_SELECT				0x7884
+/* 78a8 appears to be some kind of (reasonably tolerant) clock?
+ * 78d0 definitely hits the transmitter, definitely clock. */
+/* MYSTERY1 This appears to control dithering? */
+#define AVIVO_TMDSA_BIT_DEPTH_CONTROL		0x7894
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+#define AVIVO_TMDSA_DCBALANCER_CONTROL                  0x78d0
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+#define AVIVO_TMDSA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define AVIVO_TMDSA_CLOCK_ENABLE            0x7900
+#define AVIVO_TMDSA_TRANSMITTER_ENABLE              0x7904
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE          (1 << 0)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE          (1 << 8)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK  (1 << 16)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define AVIVO_TMDSA_TRANSMITTER_CONTROL				0x7910
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE	(1 << 0)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET	(1 << 1)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT	(2)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL	        (1 << 4)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP          (1 << 5)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN	(1 << 6)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK	        (1 << 8)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS	(1 << 13)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK	        (1 << 14)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS	(1 << 15)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL	(1 << 28)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA     (1 << 29)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL	(1 << 31)
+
+#define AVIVO_LVTMA_CNTL					0x7a80
+#   define AVIVO_LVTMA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_LVTMA_CNTL_HDMI_EN              (1 << 2)
+#   define AVIVO_LVTMA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_LVTMA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_LVTMA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_LVTMA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_LVTMA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_LVTMA_SOURCE_SELECT                               0x7a84
+#define AVIVO_LVTMA_COLOR_FORMAT                                0x7a88
+#define AVIVO_LVTMA_BIT_DEPTH_CONTROL                           0x7a94
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+
+
+
+#define AVIVO_LVTMA_DCBALANCER_CONTROL                  0x7ad0
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+
+#define AVIVO_LVTMA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define R500_LVTMA_CLOCK_ENABLE			0x7b00
+#define R600_LVTMA_CLOCK_ENABLE			0x7b04
+
+#define R500_LVTMA_TRANSMITTER_ENABLE              0x7b04
+#define R600_LVTMA_TRANSMITTER_ENABLE              0x7b08
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN            (1 << 5)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN             (1 << 9)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define R500_LVTMA_TRANSMITTER_CONTROL			        0x7b10
+#define R600_LVTMA_TRANSMITTER_CONTROL			        0x7b14
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE	  (1 << 0)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET	  (1 << 1)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL	          (1 << 4)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP            (1 << 5)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN	  (1 << 6)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK	          (1 << 8)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS	  (1 << 13)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK	          (1 << 14)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS	  (1 << 15)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT  (16)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL	  (1 << 28)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA       (1 << 29)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
+
+#define R500_LVTMA_PWRSEQ_CNTL						0x7af0
+#define R600_LVTMA_PWRSEQ_CNTL						0x7af4
+#	define AVIVO_LVTMA_PWRSEQ_EN					    (1 << 0)
+#	define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK			    (1 << 2)
+#	define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK			    (1 << 3)
+#	define AVIVO_LVTMA_PWRSEQ_TARGET_STATE				    (1 << 4)
+#	define AVIVO_LVTMA_SYNCEN					    (1 << 8)
+#	define AVIVO_LVTMA_SYNCEN_OVRD					    (1 << 9)
+#	define AVIVO_LVTMA_SYNCEN_POL					    (1 << 10)
+#	define AVIVO_LVTMA_DIGON					    (1 << 16)
+#	define AVIVO_LVTMA_DIGON_OVRD					    (1 << 17)
+#	define AVIVO_LVTMA_DIGON_POL					    (1 << 18)
+#	define AVIVO_LVTMA_BLON						    (1 << 24)
+#	define AVIVO_LVTMA_BLON_OVRD					    (1 << 25)
+#	define AVIVO_LVTMA_BLON_POL					    (1 << 26)
+
+#define R500_LVTMA_PWRSEQ_STATE                        0x7af4
+#define R600_LVTMA_PWRSEQ_STATE                        0x7af8
+#       define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R          (1 << 0)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DIGON                   (1 << 1)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN                  (1 << 2)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_BLON                    (1 << 3)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DONE                    (1 << 4)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT            (8)
+
+#define AVIVO_LVDS_BACKLIGHT_CNTL			0x7af8
+#	define AVIVO_LVDS_BACKLIGHT_CNTL_EN			(1 << 0)
+#	define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK		0x0000ff00
+#	define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT		8
+
+#define AVIVO_DVOA_BIT_DEPTH_CONTROL			0x7988
+
+#define AVIVO_DC_GPIO_HPD_A                 0x7e94
+#define AVIVO_DC_GPIO_HPD_Y                 0x7e9c
+
+#define AVIVO_DC_I2C_STATUS1				0x7d30
+#	define AVIVO_DC_I2C_DONE			(1 << 0)
+#	define AVIVO_DC_I2C_NACK			(1 << 1)
+#	define AVIVO_DC_I2C_HALT			(1 << 2)
+#	define AVIVO_DC_I2C_GO			        (1 << 3)
+#define AVIVO_DC_I2C_RESET 				0x7d34
+#	define AVIVO_DC_I2C_SOFT_RESET			(1 << 0)
+#	define AVIVO_DC_I2C_ABORT			(1 << 8)
+#define AVIVO_DC_I2C_CONTROL1 				0x7d38
+#	define AVIVO_DC_I2C_START			(1 << 0)
+#	define AVIVO_DC_I2C_STOP			(1 << 1)
+#	define AVIVO_DC_I2C_RECEIVE			(1 << 2)
+#	define AVIVO_DC_I2C_EN			        (1 << 8)
+#	define AVIVO_DC_I2C_PIN_SELECT(x)		((x) << 16)
+#	define AVIVO_SEL_DDC1			        0
+#	define AVIVO_SEL_DDC2			        1
+#	define AVIVO_SEL_DDC3			        2
+#define AVIVO_DC_I2C_CONTROL2 				0x7d3c
+#	define AVIVO_DC_I2C_ADDR_COUNT(x)		((x) << 0)
+#	define AVIVO_DC_I2C_DATA_COUNT(x)		((x) << 8)
+#define AVIVO_DC_I2C_CONTROL3 				0x7d40
+#	define AVIVO_DC_I2C_DATA_DRIVE_EN		(1 << 0)
+#	define AVIVO_DC_I2C_DATA_DRIVE_SEL		(1 << 1)
+#	define AVIVO_DC_I2C_CLK_DRIVE_EN		(1 << 7)
+#	define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x)      ((x) << 8)
+#	define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x)	((x) << 16)
+#	define AVIVO_DC_I2C_TIME_LIMIT(x)		((x) << 24)
+#define AVIVO_DC_I2C_DATA 				0x7d44
+#define AVIVO_DC_I2C_INTERRUPT_CONTROL 			0x7d48
+#	define AVIVO_DC_I2C_INTERRUPT_STATUS		(1 << 0)
+#	define AVIVO_DC_I2C_INTERRUPT_AK		(1 << 8)
+#	define AVIVO_DC_I2C_INTERRUPT_ENABLE		(1 << 16)
+#define AVIVO_DC_I2C_ARBITRATION 			0x7d50
+#	define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C		(1 << 0)
+#	define AVIVO_DC_I2C_SW_CAN_USE_I2C		(1 << 1)
+#	define AVIVO_DC_I2C_SW_DONE_USING_I2C		(1 << 8)
+#	define AVIVO_DC_I2C_HW_NEEDS_I2C		(1 << 9)
+#	define AVIVO_DC_I2C_ABORT_HDCP_I2C		(1 << 16)
+#	define AVIVO_DC_I2C_HW_USING_I2C		(1 << 17)
+
+#define AVIVO_DC_GPIO_DDC1_MASK 		        0x7e40
+#define AVIVO_DC_GPIO_DDC1_A 		                0x7e44
+#define AVIVO_DC_GPIO_DDC1_EN 		                0x7e48
+#define AVIVO_DC_GPIO_DDC1_Y 		                0x7e4c
+
+#define AVIVO_DC_GPIO_DDC2_MASK 		        0x7e50
+#define AVIVO_DC_GPIO_DDC2_A 		                0x7e54
+#define AVIVO_DC_GPIO_DDC2_EN 		                0x7e58
+#define AVIVO_DC_GPIO_DDC2_Y 		                0x7e5c
+
+#define AVIVO_DC_GPIO_DDC3_MASK 		        0x7e60
+#define AVIVO_DC_GPIO_DDC3_A 		                0x7e64
+#define AVIVO_DC_GPIO_DDC3_EN 		                0x7e68
+#define AVIVO_DC_GPIO_DDC3_Y 		                0x7e6c
+
+#define AVIVO_DISP_INTERRUPT_STATUS                             0x7edc
+#       define AVIVO_D1_VBLANK_INTERRUPT                        (1 << 4)
+#       define AVIVO_D2_VBLANK_INTERRUPT                        (1 << 5)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
new file mode 100644
index 0000000..074cf75
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r520d.h"
+
+/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
+
+int r520_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(R520_MC_STATUS);
+		if (tmp & R520_MC_STATUS_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void r520_gpu_init(struct radeon_device *rdev)
+{
+	unsigned pipe_select_current, gb_pipe_select, tmp;
+
+	rv515_vga_render_disable(rdev);
+	/*
+	 * DST_PIPE_CONFIG		0x170C
+	 * GB_TILE_CONFIG		0x4018
+	 * GB_FIFO_SIZE			0x4024
+	 * GB_PIPE_SELECT		0x402C
+	 * GB_PIPE_SELECT2              0x4124
+	 *	Z_PIPE_SHIFT			0
+	 *	Z_PIPE_MASK			0x000000003
+	 * GB_FIFO_SIZE2                0x4128
+	 *	SC_SFIFO_SIZE_SHIFT		0
+	 *	SC_SFIFO_SIZE_MASK		0x000000003
+	 *	SC_MFIFO_SIZE_SHIFT		2
+	 *	SC_MFIFO_SIZE_MASK		0x00000000C
+	 *	FG_SFIFO_SIZE_SHIFT		4
+	 *	FG_SFIFO_SIZE_MASK		0x000000030
+	 *	ZB_MFIFO_SIZE_SHIFT		6
+	 *	ZB_MFIFO_SIZE_MASK		0x0000000C0
+	 * GA_ENHANCE			0x4274
+	 * SU_REG_DEST			0x42C8
+	 */
+	/* workaround for RV530 */
+	if (rdev->family == CHIP_RV530) {
+		WREG32(0x4128, 0xFF);
+	}
+	r420_pipes_init(rdev);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	pipe_select_current = (tmp >> 2) & 3;
+	tmp = (1 << pipe_select_current) |
+	      (((gb_pipe_select >> 8) & 0xF) << 4);
+	WREG32_PLL(0x000D, tmp);
+	if (r520_mc_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
+	}
+}
+
+static void r520_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_width = 128;
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32_MC(R520_MC_CNTL0);
+	switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
+	case 0:
+		rdev->mc.vram_width = 32;
+		break;
+	case 1:
+		rdev->mc.vram_width = 64;
+		break;
+	case 2:
+		rdev->mc.vram_width = 128;
+		break;
+	case 3:
+		rdev->mc.vram_width = 256;
+		break;
+	default:
+		rdev->mc.vram_width = 128;
+		break;
+	}
+	if (tmp & R520_MC_CHANNEL_SIZE)
+		rdev->mc.vram_width *= 2;
+}
+
+static void r520_mc_init(struct radeon_device *rdev)
+{
+
+	r520_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+static void r520_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (r520_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Write VRAM size in case we are limiting it */
+	WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000004_MC_FB_LOCATION,
+			S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32_MC(R_000005_MC_AGP_LOCATION,
+			S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32_MC(R_000007_AGP_BASE_2,
+			S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+	} else {
+		WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
+		WREG32_MC(R_000006_AGP_BASE, 0);
+		WREG32_MC(R_000007_AGP_BASE_2, 0);
+	}
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int r520_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	r520_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	r520_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r520_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r520_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r520_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	if (!radeon_card_posted(rdev) && rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r520_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rv370_pcie_gart_init(rdev);
+	if (r)
+		return r;
+	rv515_set_safe_registers(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = r520_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv370_pcie_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r520d.h b/drivers/gpu/drm/radeon/r520d.h
new file mode 100644
index 0000000..61af61f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520d.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R520D_H__
+#define __R520D_H__
+
+/* Registers */
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
new file mode 100644
index 0000000..e06e2d8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -0,0 +1,4617 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "radeon_mode.h"
+#include "r600d.h"
+#include "atom.h"
+#include "avivod.h"
+#include "radeon_ucode.h"
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/R600_pfp.bin");
+MODULE_FIRMWARE("radeon/R600_me.bin");
+MODULE_FIRMWARE("radeon/RV610_pfp.bin");
+MODULE_FIRMWARE("radeon/RV610_me.bin");
+MODULE_FIRMWARE("radeon/RV630_pfp.bin");
+MODULE_FIRMWARE("radeon/RV630_me.bin");
+MODULE_FIRMWARE("radeon/RV620_pfp.bin");
+MODULE_FIRMWARE("radeon/RV620_me.bin");
+MODULE_FIRMWARE("radeon/RV635_pfp.bin");
+MODULE_FIRMWARE("radeon/RV635_me.bin");
+MODULE_FIRMWARE("radeon/RV670_pfp.bin");
+MODULE_FIRMWARE("radeon/RV670_me.bin");
+MODULE_FIRMWARE("radeon/RS780_pfp.bin");
+MODULE_FIRMWARE("radeon/RS780_me.bin");
+MODULE_FIRMWARE("radeon/RV770_pfp.bin");
+MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV770_smc.bin");
+MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV730_smc.bin");
+MODULE_FIRMWARE("radeon/RV740_smc.bin");
+MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/RV710_smc.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
+MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO_me.bin");
+MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev);
+
+/* r600,rv610,rv630,rv620,rv635,rv670 */
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+static void r600_gpu_init(struct radeon_device *rdev);
+void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+extern int evergreen_rlc_resume(struct radeon_device *rdev);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
+
+/*
+ * Indirect registers accessor
+ */
+u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
+	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
+	r = RREG32(R600_RCU_DATA);
+	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
+	return r;
+}
+
+void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
+	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
+	WREG32(R600_RCU_DATA, (v));
+	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
+}
+
+u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
+	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
+	r = RREG32(R600_UVD_CTX_DATA);
+	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
+	return r;
+}
+
+void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
+	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
+	WREG32(R600_UVD_CTX_DATA, (v));
+	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
+}
+
+/**
+ * r600_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int r600_get_allowed_info_register(struct radeon_device *rdev,
+				   u32 reg, u32 *val)
+{
+	switch (reg) {
+	case GRBM_STATUS:
+	case GRBM_STATUS2:
+	case R_000E50_SRBM_STATUS:
+	case DMA_STATUS_REG:
+	case UVD_STATUS:
+		*val = RREG32(reg);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+	return rdev->clock.spll.reference_freq;
+}
+
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
+	int r;
+
+	/* bypass vclk and dclk with bclk */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	/* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
+		 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
+
+	if (rdev->family >= CHIP_RS780)
+		WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
+			 ~UPLL_BYPASS_CNTL);
+
+	if (!vclk || !dclk) {
+		/* keep the Bypass mode, put PLL to sleep */
+		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+		return 0;
+	}
+
+	if (rdev->clock.spll.reference_freq == 10000)
+		ref_div = 34;
+	else
+		ref_div = 4;
+
+	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+					  ref_div + 1, 0xFFF, 2, 30, ~0,
+					  &fb_div, &vclk_div, &dclk_div);
+	if (r)
+		return r;
+
+	if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
+		fb_div >>= 1;
+	else
+		fb_div |= 1;
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* assert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+	/* For RS780 we have to choose ref clk */
+	if (rdev->family >= CHIP_RS780)
+		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
+			 ~UPLL_REFCLK_SRC_SEL_MASK);
+
+	/* set the required fb, ref and post divder values */
+	WREG32_P(CG_UPLL_FUNC_CNTL,
+		 UPLL_FB_DIV(fb_div) |
+		 UPLL_REF_DIV(ref_div),
+		 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 UPLL_SW_HILEN(vclk_div >> 1) |
+		 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+		 UPLL_SW_HILEN2(dclk_div >> 1) |
+		 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
+		 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
+		 ~UPLL_SW_MASK);
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* deassert BYPASS EN */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+	if (rdev->family >= CHIP_RS780)
+		WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
+
+void dce3_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= FMT_SPATIAL_DITHER_EN;
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
+/* get temperature in millidegrees */
+int rv6xx_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
+		ASIC_T_SHIFT;
+	int actual_temp = temp & 0xff;
+
+	if (temp & 0x100)
+		actual_temp -= 256;
+
+	return actual_temp * 1000;
+}
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	/* power state array is low to high, default is first */
+	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+		int min_power_state_index = 0;
+
+		if (rdev->pm.num_power_states > 2)
+			min_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_power_state_index = min_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.current_power_state_index == min_power_state_index) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_downclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = 0; i < rdev->pm.num_power_states; i++) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i >= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else {
+					if (rdev->pm.current_power_state_index == 0)
+						rdev->pm.requested_power_state_index =
+							rdev->pm.num_power_states - 1;
+					else
+						rdev->pm.requested_power_state_index =
+							rdev->pm.current_power_state_index - 1;
+				}
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_power_state_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_upclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i <= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else
+					rdev->pm.requested_power_state_index =
+						rdev->pm.current_power_state_index + 1;
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	} else {
+		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
+		/* for now just select the first power state and switch between clock modes */
+		/* power state array is low to high, default is first (0) */
+		if (rdev->pm.active_crtc_count > 1) {
+			rdev->pm.requested_power_state_index = -1;
+			/* start at 1 as we don't want the default mode */
+			for (i = 1; i < rdev->pm.num_power_states; i++) {
+				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+					continue;
+				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+					rdev->pm.requested_power_state_index = i;
+					break;
+				}
+			}
+			/* if nothing selected, grab the default state. */
+			if (rdev->pm.requested_power_state_index == -1)
+				rdev->pm.requested_power_state_index = 0;
+		} else
+			rdev->pm.requested_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index == 0) {
+					rdev->pm.requested_clock_mode_index = 0;
+					rdev->pm.dynpm_can_downclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index - 1;
+			} else {
+				rdev->pm.requested_clock_mode_index = 0;
+				rdev->pm.dynpm_can_downclock = false;
+			}
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_clock_mode_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index ==
+				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+					rdev->pm.dynpm_can_upclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index + 1;
+			} else {
+				rdev->pm.requested_clock_mode_index =
+					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+				rdev->pm.dynpm_can_upclock = false;
+			}
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states == 2) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else if (rdev->pm.num_power_states == 3) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	}
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	if (rdev->family == CHIP_R600) {
+		/* XXX */
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		if (rdev->pm.num_power_states < 4) {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+			/* mid sh */
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+			/* high sh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+			/* low mh */
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+			/* high mh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		} else {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			if (rdev->flags & RADEON_IS_MOBILITY)
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+			else
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+			/* mid sh */
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+			/* high sh */
+			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			if (rdev->flags & RADEON_IS_MOBILITY)
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+			else
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+			/* mid mh */
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+			/* high mh */
+			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		}
+	}
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage != rdev->pm.current_vddc) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
+		}
+	}
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+		return false;
+	else
+		return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_4:
+			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_5:
+			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_6:
+			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	}
+	return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r600_hpd_sense(rdev, hpd);
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_4:
+			tmp = RREG32(DC_HPD4_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD4_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_5:
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_6:
+			tmp = RREG32(DC_HPD6_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 */
+			continue;
+		}
+		if (ASIC_IS_DCE3(rdev)) {
+			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+			if (ASIC_IS_DCE32(rdev))
+				tmp |= DC_HPDx_EN;
+
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, tmp);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, tmp);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, tmp);
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, tmp);
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, tmp);
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, tmp);
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			default:
+				break;
+			}
+		}
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (ASIC_IS_DCE3(rdev)) {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, 0);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, 0);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, 0);
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, 0);
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, 0);
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, 0);
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+				break;
+			default:
+				break;
+			}
+		}
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * R600 PCIE GART
+ */
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	/* flush hdp cache so updates hit vram */
+	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+	    !(rdev->flags & RADEON_IS_AGP)) {
+		void __iomem *ptr = (void *)rdev->gart.ptr;
+		u32 tmp;
+
+		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
+		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+		 * This seems to cause problems on some AGP cards. Just use the old
+		 * method for them.
+		 */
+		WREG32(HDP_DEBUG1, 0);
+		tmp = readl((void __iomem *)ptr);
+	} else
+		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+		if (tmp == 2) {
+			pr_warn("[drm] r600 flush TLB failed\n");
+			return;
+		}
+		if (tmp) {
+			return;
+		}
+		udelay(1);
+	}
+}
+
+int r600_pcie_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		WARN(1, "R600 PCIE GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int r600_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	for (i = 1; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	r600_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void r600_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Disable all tables */
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	/* Disable L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup L1 TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void r600_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	r600_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+static void r600_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+int r600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	uint32_t r;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+	r = RREG32(R_0028FC_MC_DATA);
+	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+	return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+		S_0028F8_MC_IND_WR_EN(1));
+	WREG32(R_0028FC_MC_DATA, v);
+	WREG32(R_0028F8_MC_INDEX, 0x7F);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+}
+
+static void r600_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture (doesn't exist before R600) */
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	rv515_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = mc->mc_mask - mc->gtt_end;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end + 1;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+				mc->mc_vram_size >> 20, mc->vram_start,
+				mc->vram_end, mc->real_vram_size >> 20);
+	} else {
+		u64 base = 0;
+		if (rdev->flags & RADEON_IS_IGP) {
+			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+			base <<= 24;
+		}
+		radeon_vram_location(rdev, &rdev->mc, base);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
+static int r600_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+	uint32_t h_addr, l_addr;
+	unsigned long long k8_addr;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* Setup GPU memory space */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r600_vram_gtt_location(rdev, &rdev->mc);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		rs690_pm_info(rdev);
+		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+		if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+			/* Use K8 direct mapping for fast fb access. */
+			rdev->fastfb_working = false;
+			h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+			l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+			k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+			if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+			{
+				/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+		 		* memory is present.
+		 		*/
+				if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+					DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+						(unsigned long long)rdev->mc.aper_base, k8_addr);
+					rdev->mc.aper_base = (resource_size_t)k8_addr;
+					rdev->fastfb_working = true;
+				}
+			}
+		}
+	}
+
+	radeon_update_bandwidth_info(rdev);
+	return 0;
+}
+
+int r600_vram_scratch_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->vram_scratch.robj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+				     0, NULL, NULL, &rdev->vram_scratch.robj);
+		if (r) {
+			return r;
+		}
+	}
+
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->vram_scratch.robj,
+			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+		return r;
+	}
+	r = radeon_bo_kmap(rdev->vram_scratch.robj,
+				(void **)&rdev->vram_scratch.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+	radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+	return r;
+}
+
+void r600_vram_scratch_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->vram_scratch.robj == NULL) {
+		return;
+	}
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->vram_scratch.robj);
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+	}
+	radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
+{
+	u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
+
+	if (hung)
+		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+	else
+		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+	WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
+	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
+		 RREG32(R_008010_GRBM_STATUS));
+	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
+		 RREG32(R_008014_GRBM_STATUS2));
+	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
+		 RREG32(R_000E50_SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		 RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		 RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		 RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		 RREG32(CP_STAT));
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+}
+
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+	u32 crtc_hung = 0;
+	u32 crtc_status[2];
+	u32 i, j, tmp;
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+			crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+			crtc_hung |= (1 << i);
+		}
+	}
+
+	for (j = 0; j < 10; j++) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (crtc_hung & (1 << i)) {
+				tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+				if (tmp != crtc_status[i])
+					crtc_hung &= ~(1 << i);
+			}
+		}
+		if (crtc_hung == 0)
+			return false;
+		udelay(100);
+	}
+
+	return true;
+}
+
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(R_008010_GRBM_STATUS);
+	if (rdev->family >= CHIP_RV770) {
+		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+		    G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+			reset_mask |= RADEON_RESET_GFX;
+	} else {
+		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+		    G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+			reset_mask |= RADEON_RESET_GFX;
+	}
+
+	if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+	    G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (G_008010_GRBM_EE_BUSY(tmp))
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* DMA_STATUS_REG */
+	tmp = RREG32(DMA_STATUS_REG);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(R_000E50_SRBM_STATUS);
+	if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+		reset_mask |= RADEON_RESET_RLC;
+
+	if (G_000E50_IH_BUSY(tmp))
+		reset_mask |= RADEON_RESET_IH;
+
+	if (G_000E50_SEM_BUSY(tmp))
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (G_000E50_GRBM_RQ_PENDING(tmp))
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (G_000E50_VMC_BUSY(tmp))
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+	    G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+	    G_000E50_MCDW_BUSY(tmp))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (r600_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct rv515_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	r600_print_gpu_status_regs(rdev);
+
+	/* Disable CP parsing/prefetching */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+	else
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+	/* disable the RLC */
+	WREG32(RLC_CNTL, 0);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* Disable DMA */
+		tmp = RREG32(DMA_RB_CNTL);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL, tmp);
+	}
+
+	mdelay(50);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+		if (rdev->family >= CHIP_RV770)
+			grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+				S_008020_SOFT_RESET_CB(1) |
+				S_008020_SOFT_RESET_PA(1) |
+				S_008020_SOFT_RESET_SC(1) |
+				S_008020_SOFT_RESET_SPI(1) |
+				S_008020_SOFT_RESET_SX(1) |
+				S_008020_SOFT_RESET_SH(1) |
+				S_008020_SOFT_RESET_TC(1) |
+				S_008020_SOFT_RESET_TA(1) |
+				S_008020_SOFT_RESET_VC(1) |
+				S_008020_SOFT_RESET_VGT(1);
+		else
+			grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+				S_008020_SOFT_RESET_DB(1) |
+				S_008020_SOFT_RESET_CB(1) |
+				S_008020_SOFT_RESET_PA(1) |
+				S_008020_SOFT_RESET_SC(1) |
+				S_008020_SOFT_RESET_SMX(1) |
+				S_008020_SOFT_RESET_SPI(1) |
+				S_008020_SOFT_RESET_SX(1) |
+				S_008020_SOFT_RESET_SH(1) |
+				S_008020_SOFT_RESET_TC(1) |
+				S_008020_SOFT_RESET_TA(1) |
+				S_008020_SOFT_RESET_VC(1) |
+				S_008020_SOFT_RESET_VGT(1);
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
+			S_008020_SOFT_RESET_VGT(1);
+
+		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+	}
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		if (rdev->family >= CHIP_RV770)
+			srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+		else
+			srbm_soft_reset |= SOFT_RESET_DMA;
+	}
+
+	if (reset_mask & RADEON_RESET_RLC)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		if (reset_mask & RADEON_RESET_MC)
+			srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+	}
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	mdelay(1);
+
+	rv515_mc_resume(rdev, &save);
+	udelay(50);
+
+	r600_print_gpu_status_regs(rdev);
+}
+
+static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* Disable CP parsing/prefetching */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+	else
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+	/* disable the RLC */
+	WREG32(RLC_CNTL, 0);
+
+	/* Disable DMA */
+	tmp = RREG32(DMA_RB_CNTL);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, tmp);
+
+	mdelay(50);
+
+	/* set mclk/sclk to bypass */
+	if (rdev->family >= CHIP_RV770)
+		rv770_set_clk_bypass_mode(rdev);
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* disable mem access */
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	/* BIF reset workaround.  Not sure if this is needed on 6xx */
+	tmp = RREG32(BUS_CNTL);
+	tmp |= VGA_COHE_SPEC_TIMER_DIS;
+	WREG32(BUS_CNTL, tmp);
+
+	tmp = RREG32(BIF_SCRATCH0);
+
+	/* reset */
+	radeon_pci_config_reset(rdev);
+	mdelay(1);
+
+	/* BIF reset workaround.  Not sure if this is needed on 6xx */
+	tmp = SOFT_RESET_BIF;
+	WREG32(SRBM_SOFT_RESET, tmp);
+	mdelay(1);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+}
+
+int r600_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	u32 reset_mask;
+
+	if (hard) {
+		r600_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
+	reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	/* try soft reset */
+	r600_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		r600_gpu_pci_config_reset(rdev);
+
+	reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+			      u32 tiling_pipe_num,
+			      u32 max_rb_num,
+			      u32 total_max_rb_num,
+			      u32 disabled_rb_mask)
+{
+	u32 rendering_pipe_num, rb_num_width, req_rb_num;
+	u32 pipe_rb_ratio, pipe_rb_remain, tmp;
+	u32 data = 0, mask = 1 << (max_rb_num - 1);
+	unsigned i, j;
+
+	/* mask out the RBs that don't exist on that asic */
+	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+	/* make sure at least one RB is available */
+	if ((tmp & 0xff) != 0xff)
+		disabled_rb_mask = tmp;
+
+	rendering_pipe_num = 1 << tiling_pipe_num;
+	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+	BUG_ON(rendering_pipe_num < req_rb_num);
+
+	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+	if (rdev->family <= CHIP_RV740) {
+		/* r6xx/r7xx */
+		rb_num_width = 2;
+	} else {
+		/* eg+ */
+		rb_num_width = 4;
+	}
+
+	for (i = 0; i < max_rb_num; i++) {
+		if (!(mask & disabled_rb_mask)) {
+			for (j = 0; j < pipe_rb_ratio; j++) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+			}
+			if (pipe_rb_remain) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+				pipe_rb_remain--;
+			}
+		}
+		mask >>= 1;
+	}
+
+	return data;
+}
+
+int r600_count_pipe_bits(uint32_t val)
+{
+	return hweight32(val);
+}
+
+static void r600_gpu_init(struct radeon_device *rdev)
+{
+	u32 tiling_config;
+	u32 ramcfg;
+	u32 cc_gc_shader_pipe_config;
+	u32 tmp;
+	int i, j;
+	u32 sq_config;
+	u32 sq_gpr_resource_mgmt_1 = 0;
+	u32 sq_gpr_resource_mgmt_2 = 0;
+	u32 sq_thread_resource_mgmt = 0;
+	u32 sq_stack_resource_mgmt_1 = 0;
+	u32 sq_stack_resource_mgmt_2 = 0;
+	u32 disabled_rb_mask;
+
+	rdev->config.r600.tiling_group_size = 256;
+	switch (rdev->family) {
+	case CHIP_R600:
+		rdev->config.r600.max_pipes = 4;
+		rdev->config.r600.max_tile_pipes = 8;
+		rdev->config.r600.max_simds = 4;
+		rdev->config.r600.max_backends = 4;
+		rdev->config.r600.max_gprs = 256;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 256;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 16;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		rdev->config.r600.max_pipes = 2;
+		rdev->config.r600.max_tile_pipes = 2;
+		rdev->config.r600.max_simds = 3;
+		rdev->config.r600.max_backends = 1;
+		rdev->config.r600.max_gprs = 128;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 128;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 4;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		rdev->config.r600.max_pipes = 1;
+		rdev->config.r600.max_tile_pipes = 1;
+		rdev->config.r600.max_simds = 2;
+		rdev->config.r600.max_backends = 1;
+		rdev->config.r600.max_gprs = 128;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 128;
+		rdev->config.r600.max_hw_contexts = 4;
+		rdev->config.r600.max_gs_threads = 4;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 1;
+		break;
+	case CHIP_RV670:
+		rdev->config.r600.max_pipes = 4;
+		rdev->config.r600.max_tile_pipes = 4;
+		rdev->config.r600.max_simds = 4;
+		rdev->config.r600.max_backends = 4;
+		rdev->config.r600.max_gprs = 192;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 256;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 16;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	/* Setup tiling */
+	tiling_config = 0;
+	ramcfg = RREG32(RAMCFG);
+	switch (rdev->config.r600.max_tile_pipes) {
+	case 1:
+		tiling_config |= PIPE_TILING(0);
+		break;
+	case 2:
+		tiling_config |= PIPE_TILING(1);
+		break;
+	case 4:
+		tiling_config |= PIPE_TILING(2);
+		break;
+	case 8:
+		tiling_config |= PIPE_TILING(3);
+		break;
+	default:
+		break;
+	}
+	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+
+	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+	if (tmp > 3) {
+		tiling_config |= ROW_TILING(3);
+		tiling_config |= SAMPLE_SPLIT(3);
+	} else {
+		tiling_config |= ROW_TILING(tmp);
+		tiling_config |= SAMPLE_SPLIT(tmp);
+	}
+	tiling_config |= BANK_SWAPS(1);
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+	tmp = rdev->config.r600.max_simds -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+	rdev->config.r600.active_simds = tmp;
+
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+	tmp = 0;
+	for (i = 0; i < rdev->config.r600.max_backends; i++)
+		tmp |= (1 << i);
+	/* if all the backends are disabled, fix it up here */
+	if ((disabled_rb_mask & tmp) == tmp) {
+		for (i = 0; i < rdev->config.r600.max_backends; i++)
+			disabled_rb_mask &= ~(1 << i);
+	}
+	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+					R6XX_MAX_BACKENDS, disabled_rb_mask);
+	tiling_config |= tmp << 16;
+	rdev->config.r600.backend_map = tmp;
+
+	rdev->config.r600.tile_config = tiling_config;
+	WREG32(GB_TILING_CONFIG, tiling_config);
+	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
+	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
+
+	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+	/* Setup some CP states */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
+
+	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
+			     SYNC_WALKER | SYNC_ALIGNER));
+	/* Setup various GPU states */
+	if (rdev->family == CHIP_RV670)
+		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
+
+	tmp = RREG32(SX_DEBUG_1);
+	tmp |= SMX_EVENT_RELEASE;
+	if ((rdev->family > CHIP_R600))
+		tmp |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, tmp);
+
+	if (((rdev->family) == CHIP_R600) ||
+	    ((rdev->family) == CHIP_RV630) ||
+	    ((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
+	} else {
+		WREG32(DB_DEBUG, 0);
+	}
+	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
+			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+	WREG32(VGT_NUM_INSTANCES, 0);
+
+	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
+
+	tmp = RREG32(SQ_MS_FIFO_SIZES);
+	if (((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		tmp = (CACHE_FIFO_SIZE(0xa) |
+		       FETCH_FIFO_HIWATER(0xa) |
+		       DONE_FIFO_HIWATER(0xe0) |
+		       ALU_UPDATE_FIFO_HIWATER(0x8));
+	} else if (((rdev->family) == CHIP_R600) ||
+		   ((rdev->family) == CHIP_RV630)) {
+		tmp &= ~DONE_FIFO_HIWATER(0xff);
+		tmp |= DONE_FIFO_HIWATER(0x4);
+	}
+	WREG32(SQ_MS_FIFO_SIZES, tmp);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (DX9_CONSTS |
+		      VC_ENABLE |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	if ((rdev->family) == CHIP_R600) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
+					  NUM_VS_GPRS(124) |
+					  NUM_CLAUSE_TEMP_GPRS(4));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
+					  NUM_ES_GPRS(0));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
+					   NUM_VS_THREADS(48) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(4));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
+					    NUM_VS_STACK_ENTRIES(128));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
+					    NUM_ES_STACK_ENTRIES(0));
+	} else if (((rdev->family) == CHIP_RV610) ||
+		   ((rdev->family) == CHIP_RV620) ||
+		   ((rdev->family) == CHIP_RS780) ||
+		   ((rdev->family) == CHIP_RS880)) {
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+					  NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+					    NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+					    NUM_ES_STACK_ENTRIES(16));
+	} else if (((rdev->family) == CHIP_RV630) ||
+		   ((rdev->family) == CHIP_RV635)) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
+					  NUM_ES_GPRS(18));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+					    NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+					    NUM_ES_STACK_ENTRIES(16));
+	} else if ((rdev->family) == CHIP_RV670) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+					  NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
+					    NUM_VS_STACK_ENTRIES(64));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
+					    NUM_ES_STACK_ENTRIES(64));
+	}
+
+	WREG32(SQ_CONFIG, sq_config);
+	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
+	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+
+	if (((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
+	} else {
+		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
+	}
+
+	/* More default values. 2D/3D driver should adjust as needed */
+	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
+					 S1_X(0x4) | S1_Y(0xc)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
+					 S1_X(0x2) | S1_Y(0x2) |
+					 S2_X(0xa) | S2_Y(0x6) |
+					 S3_X(0x6) | S3_Y(0xa)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
+					     S1_X(0x4) | S1_Y(0xc) |
+					     S2_X(0x1) | S2_Y(0x6) |
+					     S3_X(0xa) | S3_Y(0xe)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
+					     S5_X(0x0) | S5_Y(0x0) |
+					     S6_X(0xb) | S6_Y(0x4) |
+					     S7_X(0x7) | S7_Y(0x8)));
+
+	WREG32(VGT_STRMOUT_EN, 0);
+	tmp = rdev->config.r600.max_pipes * 16;
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		tmp += 32;
+		break;
+	case CHIP_RV670:
+		tmp += 128;
+		break;
+	default:
+		break;
+	}
+	if (tmp > 256) {
+		tmp = 256;
+	}
+	WREG32(VGT_ES_PER_GS, 128);
+	WREG32(VGT_GS_PER_ES, tmp);
+	WREG32(VGT_GS_PER_VS, 2);
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+	WREG32(VGT_STRMOUT_EN, 0);
+	WREG32(SX_MISC, 0);
+	WREG32(PA_SC_MODE_CNTL, 0);
+	WREG32(PA_SC_AA_CONFIG, 0);
+	WREG32(PA_SC_LINE_STIPPLE, 0);
+	WREG32(SPI_INPUT_Z, 0);
+	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	/* Clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		tmp = TC_L2_SIZE(8);
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		tmp = TC_L2_SIZE(4);
+		break;
+	case CHIP_R600:
+		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
+		break;
+	default:
+		tmp = TC_L2_SIZE(0);
+		break;
+	}
+	WREG32(TC_CNTL, tmp);
+
+	tmp = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, tmp);
+
+	tmp = RREG32(ARB_POP);
+	tmp |= ENABLE_TC128;
+	WREG32(ARB_POP, tmp);
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+			       NUM_CLIP_SEQ(3)));
+	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+	WREG32(VC_ENHANCE, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	r = RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
+	return r;
+}
+
+void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	WREG32(PCIE_PORT_DATA, (v));
+	(void)RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
+}
+
+/*
+ * CP & Ring
+ */
+void r600_cp_stop(struct radeon_device *rdev)
+{
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+int r600_init_microcode(struct radeon_device *rdev)
+{
+	const char *chip_name;
+	const char *rlc_chip_name;
+	const char *smc_chip_name = "RV770";
+	size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (rdev->family) {
+	case CHIP_R600:
+		chip_name = "R600";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV610:
+		chip_name = "RV610";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV630:
+		chip_name = "RV630";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV620:
+		chip_name = "RV620";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV635:
+		chip_name = "RV635";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV670:
+		chip_name = "RV670";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RS780:
+	case CHIP_RS880:
+		chip_name = "RS780";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV770:
+		chip_name = "RV770";
+		rlc_chip_name = "R700";
+		smc_chip_name = "RV770";
+		smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_RV730:
+		chip_name = "RV730";
+		rlc_chip_name = "R700";
+		smc_chip_name = "RV730";
+		smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_RV710:
+		chip_name = "RV710";
+		rlc_chip_name = "R700";
+		smc_chip_name = "RV710";
+		smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_RV740:
+		chip_name = "RV730";
+		rlc_chip_name = "R700";
+		smc_chip_name = "RV740";
+		smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_CEDAR:
+		chip_name = "CEDAR";
+		rlc_chip_name = "CEDAR";
+		smc_chip_name = "CEDAR";
+		smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_REDWOOD:
+		chip_name = "REDWOOD";
+		rlc_chip_name = "REDWOOD";
+		smc_chip_name = "REDWOOD";
+		smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_JUNIPER:
+		chip_name = "JUNIPER";
+		rlc_chip_name = "JUNIPER";
+		smc_chip_name = "JUNIPER";
+		smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		chip_name = "CYPRESS";
+		rlc_chip_name = "CYPRESS";
+		smc_chip_name = "CYPRESS";
+		smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_PALM:
+		chip_name = "PALM";
+		rlc_chip_name = "SUMO";
+		break;
+	case CHIP_SUMO:
+		chip_name = "SUMO";
+		rlc_chip_name = "SUMO";
+		break;
+	case CHIP_SUMO2:
+		chip_name = "SUMO2";
+		rlc_chip_name = "SUMO";
+		break;
+	default: BUG();
+	}
+
+	if (rdev->family >= CHIP_CEDAR) {
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+	} else if (rdev->family >= CHIP_RV770) {
+		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+		me_req_size = R700_PM4_UCODE_SIZE * 4;
+		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
+	} else {
+		pfp_req_size = R600_PFP_UCODE_SIZE * 4;
+		me_req_size = R600_PM4_UCODE_SIZE * 12;
+		rlc_req_size = R600_RLC_UCODE_SIZE * 4;
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+	if (err)
+		goto out;
+	if (rdev->pfp_fw->size != pfp_req_size) {
+		pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+	if (err)
+		goto out;
+	if (rdev->me_fw->size != me_req_size) {
+		pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		pr_err("r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
+		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+		if (err) {
+			pr_err("smc: error loading firmware \"%s\"\n", fw_name);
+			release_firmware(rdev->smc_fw);
+			rdev->smc_fw = NULL;
+			err = 0;
+		} else if (rdev->smc_fw->size != smc_req_size) {
+			pr_err("smc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->smc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	}
+
+out:
+	if (err) {
+		if (err != -EINVAL)
+			pr_err("r600_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->smc_fw);
+		rdev->smc_fw = NULL;
+	}
+	return err;
+}
+
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else
+		rptr = RREG32(R600_CP_RB_RPTR);
+
+	return rptr;
+}
+
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	return RREG32(R600_CP_RB_WPTR);
+}
+
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	WREG32(R600_CP_RB_WPTR, ring->wptr);
+	(void)RREG32(R600_CP_RB_WPTR);
+}
+
+static int r600_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r600_cp_stop(rdev);
+
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	WREG32(CP_ME_RAM_WADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
+		WREG32(CP_ME_RAM_DATA,
+		       be32_to_cpup(fw_data++));
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA,
+		       be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+int r600_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+	uint32_t cp_me;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	if (rdev->family >= CHIP_RV770) {
+		radeon_ring_write(ring, 0x0);
+		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
+	} else {
+		radeon_ring_write(ring, 0x3);
+		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
+	}
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	cp_me = 0xff;
+	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
+	return 0;
+}
+
+int r600_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	/* Set ring buffer size */
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB_CNTL, tmp);
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+	WREG32(CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB_RPTR_ADDR,
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB_CNTL, tmp);
+
+	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+	r600_cp_start(rdev);
+	ring->ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
+{
+	u32 rb_bufsz;
+	int r;
+
+	/* Align ring size */
+	rb_bufsz = order_base_2(ring_size / 8);
+	ring_size = (1 << (rb_bufsz + 1)) * 4;
+	ring->ring_size = ring_size;
+	ring->align_mask = 16 - 1;
+
+	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+		if (r) {
+			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+			ring->rptr_save_reg = 0;
+		}
+	}
+}
+
+void r600_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r600_cp_stop(rdev);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+void r600_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->scratch.num_reg = 7;
+	rdev->scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 3);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring, false);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+			  ring->idx, scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
+void r600_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
+
+	if (rdev->family >= CHIP_RV770)
+		cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
+
+	if (rdev->wb.use_event) {
+		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+		/* flush read cache over gart */
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, cp_coher_cntl);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+		/* EVENT_WRITE_EOP - flush caches, send int */
+		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+		radeon_ring_write(ring, lower_32_bits(addr));
+		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+		radeon_ring_write(ring, fence->seq);
+		radeon_ring_write(ring, 0);
+	} else {
+		/* flush read cache over gart */
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, cp_coher_cntl);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+		/* wait for 3D idle clean */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+		/* Emit fence sequence & fire IRQ */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+		radeon_ring_write(ring, fence->seq);
+		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+		radeon_ring_write(ring, RB_INT_STAT);
+	}
+}
+
+/**
+ * r600_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *ring,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+	if (rdev->family < CHIP_CAYMAN)
+		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+	radeon_ring_write(ring, lower_32_bits(addr));
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+
+	/* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
+	if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
+		/* Prevent the PFP from running ahead of the semaphore wait */
+		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+		radeon_ring_write(ring, 0x0);
+	}
+
+	return true;
+}
+
+/**
+ * r600_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the CP DMA engine (r6xx+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
+				     uint64_t src_offset, uint64_t dst_offset,
+				     unsigned num_gpu_pages,
+				     struct reservation_object *resv)
+{
+	struct radeon_fence *fence;
+	struct radeon_sync sync;
+	int ring_index = rdev->asic->copy.blit_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes, tmp;
+	int i, num_loops;
+	int r = 0;
+
+	radeon_sync_create(&sync);
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_rings(rdev, &sync, ring->idx);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, WAIT_3D_IDLE_bit);
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0x1fffff)
+			cur_size_in_bytes = 0x1fffff;
+		size_in_bytes -= cur_size_in_bytes;
+		tmp = upper_32_bits(src_offset) & 0xff;
+		if (size_in_bytes == 0)
+			tmp |= PACKET3_CP_DMA_CP_SYNC;
+		radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
+		radeon_ring_write(ring, lower_32_bits(src_offset));
+		radeon_ring_write(ring, tmp);
+		radeon_ring_write(ring, lower_32_bits(dst_offset));
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, cur_size_in_bytes);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
+
+	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_sync_free(rdev, &sync, fence);
+
+	return fence;
+}
+
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size)
+{
+	/* FIXME: implement */
+	return 0;
+}
+
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+	/* FIXME: implement */
+}
+
+static void r600_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v1_0_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void r600_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v1_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void r600_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static int r600_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	r600_pcie_gen2_enable(rdev);
+
+	/* scratch needs to be initialized before MC */
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	r600_mc_program(rdev);
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		r600_agp_enable(rdev);
+	} else {
+		r = r600_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	r600_gpu_init(rdev);
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r600_uvd_start(rdev);
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	r = r600_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = r600_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r600_uvd_resume(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+void r600_vga_set_state(struct radeon_device *rdev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~(1<<0);
+		temp |= (1<<1);
+	} else {
+		temp &= ~(1<<1);
+	}
+	WREG32(CONFIG_CNTL, temp);
+}
+
+int r600_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_resume(rdev);
+
+	rdev->accel_working = true;
+	r = r600_startup(rdev);
+	if (r) {
+		DRM_ERROR("r600 startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+}
+
+int r600_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	r600_cp_stop(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	r600_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	r600_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int r600_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (r600_debugfs_mc_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for mc !\n");
+	}
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	r = r600_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	r600_uvd_init(rdev);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = r600_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r600_cp_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		r600_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	return 0;
+}
+
+void r600_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	radeon_audio_fini(rdev);
+	r600_cp_fini(rdev);
+	r600_irq_fini(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_fini(rdev);
+	}
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	r600_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+
+/*
+ * CS stuff
+ */
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 next_rptr;
+
+	if (ring->rptr_save_reg) {
+		next_rptr = ring->wptr + 3 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg -
+					 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	} else if (rdev->wb.enabled) {
+		next_rptr = ring->wptr + 5 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+		radeon_ring_write(ring, next_rptr);
+		radeon_ring_write(ring, 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		goto free_scratch;
+	}
+	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	ib.ptr[2] = 0xDEADBEEF;
+	ib.length_dw = 3;
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		goto free_ib;
+	}
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto free_ib;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
+		goto free_ib;
+	}
+	r = 0;
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+free_ib:
+	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
+ * the same as the CP ring buffer, but in reverse.  Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes.  As the host irq handler processes interrupts, it
+ * increments the rptr.  When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+	u32 rb_bufsz;
+
+	/* Align ring size */
+	rb_bufsz = order_base_2(ring_size / 4);
+	ring_size = (1 << rb_bufsz) * 4;
+	rdev->ih.ring_size = ring_size;
+	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+	rdev->ih.rptr = 0;
+}
+
+int r600_ih_ring_alloc(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Allocate ring buffer */
+	if (rdev->ih.ring_obj == NULL) {
+		r = radeon_bo_create(rdev, rdev->ih.ring_size,
+				     PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, 0,
+				     NULL, NULL, &rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->ih.ring_obj,
+				  RADEON_GEM_DOMAIN_GTT,
+				  &rdev->ih.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->ih.ring_obj,
+				   (void **)&rdev->ih.ring);
+		radeon_bo_unreserve(rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+			return r;
+		}
+	}
+	return 0;
+}
+
+void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+	int r;
+	if (rdev->ih.ring_obj) {
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->ih.ring_obj);
+			radeon_bo_unpin(rdev->ih.ring_obj);
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+		}
+		radeon_bo_unref(&rdev->ih.ring_obj);
+		rdev->ih.ring = NULL;
+		rdev->ih.ring_obj = NULL;
+	}
+}
+
+void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+	if ((rdev->family >= CHIP_RV770) &&
+	    (rdev->family <= CHIP_RV740)) {
+		/* r7xx asics need to soft reset RLC before halting */
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+		RREG32(SRBM_SOFT_RESET);
+		mdelay(15);
+		WREG32(SRBM_SOFT_RESET, 0);
+		RREG32(SRBM_SOFT_RESET);
+	}
+
+	WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_resume(struct radeon_device *rdev)
+{
+	u32 i;
+	const __be32 *fw_data;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	r600_rlc_stop(rdev);
+
+	WREG32(RLC_HB_CNTL, 0);
+
+	WREG32(RLC_HB_BASE, 0);
+	WREG32(RLC_HB_RPTR, 0);
+	WREG32(RLC_HB_WPTR, 0);
+	WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+	WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	fw_data = (const __be32 *)rdev->rlc_fw->data;
+	if (rdev->family >= CHIP_RV770) {
+		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else {
+		for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	r600_rlc_start(rdev);
+
+	return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+void r600_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(DxMODE_INT_MASK, 0);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		if (ASIC_IS_DCE32(rdev)) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+		} else {
+			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+			tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+		}
+	} else {
+		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+		tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+	}
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	r600_disable_interrupts(rdev);
+
+	/* init rlc */
+	if (rdev->family >= CHIP_CEDAR)
+		ret = evergreen_rlc_resume(rdev);
+	else
+		ret = r600_rlc_resume(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* set dummy read address to ring address */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+
+	if (rdev->wb.enabled)
+		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+	/* set the writeback address whether it's enabled or not */
+	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	if (rdev->family >= CHIP_CEDAR)
+		evergreen_disable_interrupt_state(rdev);
+	else
+		r600_disable_interrupt_state(rdev);
+
+	/* at this point everything should be setup correctly to enable master */
+	pci_set_master(rdev->pdev);
+
+	/* enable irqs */
+	r600_enable_interrupts(rdev);
+
+	return ret;
+}
+
+void r600_irq_suspend(struct radeon_device *rdev)
+{
+	r600_irq_disable(rdev);
+	r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+	r600_irq_suspend(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 mode_int = 0;
+	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+	u32 grbm_int_cntl = 0;
+	u32 hdmi0, hdmi1;
+	u32 dma_cntl;
+	u32 thermal_int = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		r600_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		r600_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	if (ASIC_IS_DCE3(rdev)) {
+		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		if (ASIC_IS_DCE32(rdev)) {
+			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+			hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+			hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+		} else {
+			hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		}
+	} else {
+		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
+		thermal_int = RREG32(CG_THERMAL_INT) &
+			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+	} else if (rdev->family >= CHIP_RV770) {
+		thermal_int = RREG32(RV770_CG_THERMAL_INT) &
+			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+	}
+	if (rdev->irq.dpm_thermal) {
+		DRM_DEBUG("dpm thermal\n");
+		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int\n");
+		cp_int_cntl |= RB_INT_ENABLE;
+		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("r600_irq_set: vblank 0\n");
+		mode_int |= D1MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("r600_irq_set: vblank 1\n");
+		mode_int |= D2MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("r600_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("r600_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("r600_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("r600_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("r600_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("r600_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.afmt[0]) {
+		DRM_DEBUG("r600_irq_set: hdmi 0\n");
+		hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[1]) {
+		DRM_DEBUG("r600_irq_set: hdmi 0\n");
+		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+	WREG32(DMA_CNTL, dma_cntl);
+	WREG32(DxMODE_INT_MASK, mode_int);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DC_HPD1_INT_CONTROL, hpd1);
+		WREG32(DC_HPD2_INT_CONTROL, hpd2);
+		WREG32(DC_HPD3_INT_CONTROL, hpd3);
+		WREG32(DC_HPD4_INT_CONTROL, hpd4);
+		if (ASIC_IS_DCE32(rdev)) {
+			WREG32(DC_HPD5_INT_CONTROL, hpd5);
+			WREG32(DC_HPD6_INT_CONTROL, hpd6);
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
+		} else {
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+		}
+	} else {
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+	}
+	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
+		WREG32(CG_THERMAL_INT, thermal_int);
+	} else if (rdev->family >= CHIP_RV770) {
+		WREG32(RV770_CG_THERMAL_INT, thermal_int);
+	}
+
+	/* posting read */
+	RREG32(R_000E50_SRBM_STATUS);
+
+	return 0;
+}
+
+static void r600_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+		if (ASIC_IS_DCE32(rdev)) {
+			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
+			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
+		} else {
+			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
+		}
+	} else {
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+		rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+		rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
+	}
+	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
+
+	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
+		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
+		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (ASIC_IS_DCE32(rdev)) {
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+			tmp = RREG32(DC_HPD6_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
+			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
+			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+		}
+	} else {
+		if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
+			tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+			if (ASIC_IS_DCE3(rdev)) {
+				tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
+				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+				WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+			} else {
+				tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
+				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+				WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+			}
+		}
+	}
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	r600_irq_ack(rdev);
+	r600_disable_interrupt_state(rdev);
+}
+
+static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		wptr &= ~RB_OVERFLOW;
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+/*        r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [127:60]  - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id  src_data  description
+ *      1         0  D1 Vblank
+ *      1         1  D1 Vline
+ *      5         0  D2 Vblank
+ *      5         1  D2 Vline
+ *     19         0  FP Hot plug detection A
+ *     19         1  FP Hot plug detection B
+ *     19         2  DAC A auto-detection
+ *     19         3  DAC B auto-detection
+ *     21         4  HDMI block A
+ *     21         5  HDMI block B
+ *    176         -  CP_INT RB
+ *    177         -  CP_INT IB1
+ *    178         -  CP_INT IB2
+ *    181         -  EOP Interrupt
+ *    233         -  GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+	bool queue_thermal = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	/* No MSIs, need a dummy read to flush PCI DMAs */
+	if (!rdev->msi_enabled)
+		RREG32(IH_RB_WPTR);
+
+	wptr = r600_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	r600_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[0]) {
+					drm_handle_vblank(rdev->ddev, 0);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[0]))
+					radeon_crtc_handle_vblank(rdev, 0);
+				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D1 vblank\n");
+
+				break;
+			case 1: /* D1 vline */
+				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
+				    DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D1 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
+					DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+				if (rdev->irq.crtc_vblank_int[1]) {
+					drm_handle_vblank(rdev->ddev, 1);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[1]))
+					radeon_crtc_handle_vblank(rdev, 1);
+				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+				DRM_DEBUG("IH: D2 vblank\n");
+
+				break;
+			case 1: /* D1 vline */
+				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
+					DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+				DRM_DEBUG("IH: D2 vline\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 9: /* D1 pflip */
+			DRM_DEBUG("IH: D1 flip\n");
+			if (radeon_use_pflipirq > 0)
+				radeon_crtc_handle_flip(rdev, 0);
+			break;
+		case 11: /* D2 pflip */
+			DRM_DEBUG("IH: D2 flip\n");
+			if (radeon_use_pflipirq > 0)
+				radeon_crtc_handle_flip(rdev, 1);
+			break;
+		case 19: /* HPD/DAC hotplug */
+			switch (src_data) {
+			case 0:
+				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
+					DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD1\n");
+				break;
+			case 1:
+				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
+					DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD2\n");
+				break;
+			case 4:
+				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
+					DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD3\n");
+				break;
+			case 5:
+				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
+					DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD4\n");
+				break;
+			case 10:
+				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
+					DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD5\n");
+				break;
+			case 12:
+				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
+					DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+				queue_hotplug = true;
+				DRM_DEBUG("IH: HPD6\n");
+
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 21: /* hdmi */
+			switch (src_data) {
+			case 4:
+				if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
+					DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+				queue_hdmi = true;
+				DRM_DEBUG("IH: HDMI0\n");
+
+				break;
+			case 5:
+				if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
+					DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
+
+				rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+				queue_hdmi = true;
+				DRM_DEBUG("IH: HDMI1\n");
+
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 230: /* thermal low to high */
+			DRM_DEBUG("IH: thermal low to high\n");
+			rdev->pm.dpm.thermal.high_to_low = false;
+			queue_thermal = true;
+			break;
+		case 231: /* thermal high to low */
+			DRM_DEBUG("IH: thermal high to low\n");
+			rdev->pm.dpm.thermal.high_to_low = true;
+			queue_thermal = true;
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+		WREG32(IH_RB_RPTR, rptr);
+	}
+	if (queue_hotplug)
+		schedule_delayed_work(&rdev->hotplug_work, 0);
+	if (queue_hdmi)
+		schedule_work(&rdev->audio_work);
+	if (queue_thermal && rdev->pm.dpm_enabled)
+		schedule_work(&rdev->pm.dpm.thermal.work);
+	rdev->ih.rptr = rptr;
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = r600_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int r600_debugfs_mc_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
+	DREG32_SYS(m, rdev, VM_L2_STATUS);
+	return 0;
+}
+
+static struct drm_info_list r600_mc_info_list[] = {
+	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
+#else
+	return 0;
+#endif
+}
+
+/**
+ * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
+ * rdev: radeon device structure
+ *
+ * Some R6XX/R7XX don't seem to take into account HDP flushes performed
+ * through the ring buffer. This leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
+ * directly perform the HDP flush by writing the register through MMIO.
+ */
+void r600_mmio_hdp_flush(struct radeon_device *rdev)
+{
+	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
+	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
+	 * This seems to cause problems on some AGP cards. Just use the old
+	 * method for them.
+	 */
+	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
+		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+		u32 tmp;
+
+		WREG32(HDP_DEBUG1, 0);
+		tmp = readl((void __iomem *)ptr);
+	} else
+		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	u32 link_width_cntl, mask;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	radeon_gui_idle(rdev);
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		/* not actually supported */
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	default:
+		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
+		return;
+	}
+
+	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
+	link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
+	link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
+			    R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+
+	WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return 0;
+
+	radeon_gui_idle(rdev);
+
+	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X12:
+		/* not actually supported */
+		return 12;
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+	u16 link_cntl2;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* only RV6xx+ chips are supported */
+	if (rdev->family <= CHIP_R600)
+		return;
+
+	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if (speed_cntl & LC_CURRENT_DATA_RATE) {
+		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+		return;
+	}
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	/* 55 nm r6xx asics */
+	if ((rdev->family == CHIP_RV670) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RV635)) {
+		/* advertise upconfig capability */
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+					     LC_RECONFIG_ARC_MISSING_ESCAPE);
+			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		} else {
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		}
+	}
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		/* 55 nm r6xx asics */
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			WREG32(MM_CFGREGS_CNTL, 0x8);
+			link_cntl2 = RREG32(0x4088);
+			WREG32(MM_CFGREGS_CNTL, 0);
+			/* not supported yet */
+			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+				return;
+		}
+
+		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
+			training_cntl &= ~LC_POINT_7_PLUS_EN;
+			WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
+		} else {
+			speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+		}
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
+
+/**
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
new file mode 100644
index 0000000..443cbe5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * R6xx+ cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The register state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 r6xx_default_state[] =
+{
+	0xc0002400, /* START_3D_CMDBUF */
+	0x00000000,
+
+	0xc0012800, /* CONTEXT_CONTROL */
+	0x80000000,
+	0x80000000,
+
+	0xc0016800,
+	0x00000010,
+	0x00008000, /* WAIT_UNTIL */
+
+	0xc0016800,
+	0x00000542,
+	0x07000003, /* TA_CNTL_AUX */
+
+	0xc0016800,
+	0x000005c5,
+	0x00000000, /* VC_ENHANCE */
+
+	0xc0016800,
+	0x00000363,
+	0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+	0xc0016800,
+	0x0000060c,
+	0x82000000, /* DB_DEBUG */
+
+	0xc0016800,
+	0x0000060e,
+	0x01020204, /* DB_WATERMARKS */
+
+	0xc0026f00,
+	0x00000000,
+	0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+	0x00000000, /* SQ_VTX_START_INST_LOC */
+
+	0xc0096900,
+	0x0000022a,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000004,
+	0x00000000, /* DB_DEPTH_INFO */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0026900,
+	0x00000343,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000040, /* DB_RENDER_OVERRIDE */
+
+	0xc0016900,
+	0x00000351,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc00f6900,
+	0x00000100,
+	0x00000800, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* CB_FOG_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000, /* DB_STENCILREFMASK */
+	0x00000000, /* DB_STENCILREFMASK_BF */
+	0x00000000, /* SX_ALPHA_REF */
+
+	0xc0046900,
+	0x0000030c,
+	0x01000000, /* CB_CLRCMP_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000048,
+	0x3f800000, /* CB_CLEAR_RED */
+	0x00000000,
+	0x3f800000,
+	0x3f800000,
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00a6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000, /* PA_SC_EDGERULE */
+
+	0xc0406900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MPASS_PS_CNTL */
+	0x00004010, /* PA_SC_MODE_CNTL */
+
+	0xc0096900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x0000002d, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+	0x00000000,
+
+	0xc0016900,
+	0x00000312,
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc0066900,
+	0x0000037e,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+	0xc0046900,
+	0x000001b6,
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00000000, /* SPI_FOG_FUNC_SCALE */
+	0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+	0xc0016900,
+	0x00000225,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0016900,
+	0x00000237,
+	0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_REUSE_DEPTH */
+	0x00000000, /* VGT_GROUP_PRIM_TYPE */
+	0x00000000, /* VGT_GROUP_FIRST_DECR */
+	0x00000000, /* VGT_GROUP_DECR */
+	0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+	0xc0036900,
+	0x000002ac,
+	0x00000000, /* VGT_STRMOUT_EN */
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /* VGT_VTX_CNT_EN */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0016900,
+	0x000002c8,
+	0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+	0xc0076900,
+	0x00000202,
+	0x00cc0000, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CNTL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000244, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+
+	0xc0026900,
+	0x0000008e,
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0016900,
+	0x000001e8,
+	0x00000001, /* CB_SHADER_CONTROL */
+
+	0xc0016900,
+	0x00000185,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc0056900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+	0x00000000, /* SPI_THREAD_GROUPING */
+	0x00000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+	0xc0036e00, /* SET_SAMPLER */
+	0x00000000,
+	0x00000012,
+	0x00000000,
+	0x00000000,
+};
+
+const u32 r7xx_default_state[] =
+{
+	0xc0012800, /* CONTEXT_CONTROL */
+	0x80000000,
+	0x80000000,
+
+	0xc0016800,
+	0x00000010,
+	0x00008000, /* WAIT_UNTIL */
+
+	0xc0016800,
+	0x00000542,
+	0x07000002, /* TA_CNTL_AUX */
+
+	0xc0016800,
+	0x000005c5,
+	0x00000000, /* VC_ENHANCE */
+
+	0xc0016800,
+	0x00000363,
+	0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+	0xc0016800,
+	0x0000060c,
+	0x00000000, /* DB_DEBUG */
+
+	0xc0016800,
+	0x0000060e,
+	0x00420204, /* DB_WATERMARKS */
+
+	0xc0026f00,
+	0x00000000,
+	0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+	0x00000000, /* SQ_VTX_START_INST_LOC */
+
+	0xc0096900,
+	0x0000022a,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000004,
+	0x00000000, /* DB_DEPTH_INFO */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0026900,
+	0x00000343,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_RENDER_OVERRIDE */
+
+	0xc0016900,
+	0x00000351,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0096900,
+	0x00000100,
+	0x00000800, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0036900,
+	0x0000010c,
+	0x00000000, /* DB_STENCILREFMASK */
+	0x00000000, /* DB_STENCILREFMASK_BF */
+	0x00000000, /* SX_ALPHA_REF */
+
+	0xc0046900,
+	0x0000030c, /* CB_CLRCMP_CNTL */
+	0x01000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00a6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+
+	0xc0406900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MPASS_PS_CNTL */
+	0x00514000, /* PA_SC_MODE_CNTL */
+
+	0xc0096900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x0000002d, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+	0x00000000,
+
+	0xc0016900,
+	0x00000312,
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc0066900,
+	0x0000037e,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+	0xc0046900,
+	0x000001b6,
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00000000, /* SPI_FOG_FUNC_SCALE */
+	0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+	0xc0016900,
+	0x00000225,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0016900,
+	0x00000237,
+	0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_REUSE_DEPTH */
+	0x00000000, /* VGT_GROUP_PRIM_TYPE */
+	0x00000000, /* VGT_GROUP_FIRST_DECR */
+	0x00000000, /* VGT_GROUP_DECR */
+	0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+	0xc0036900,
+	0x000002ac,
+	0x00000000, /* VGT_STRMOUT_EN */
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /* VGT_VTX_CNT_EN */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0016900,
+	0x000002c8,
+	0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+	0xc0076900,
+	0x00000202,
+	0x00cc0000, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CNTL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000244, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+
+	0xc0026900,
+	0x0000008e,
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0016900,
+	0x000001e8,
+	0x00000001, /* CB_SHADER_CONTROL */
+
+	0xc0016900,
+	0x00000185,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc0056900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+	0x00000001, /* SPI_THREAD_GROUPING */
+	0x00000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+	0xc0036e00, /* SET_SAMPLER */
+	0x00000000,
+	0x00000012,
+	0x00000000,
+	0x00000000,
+};
+
+/* same for r6xx/r7xx */
+const u32 r6xx_vs[] =
+{
+	0x00000004,
+	0x81000000,
+	0x0000203c,
+	0x94000b08,
+	0x00004000,
+	0x14200b1a,
+	0x00000000,
+	0x00000000,
+	0x3c000000,
+	0x68cd1000,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
+	0x00080000,
+#endif
+	0x00000000,
+};
+
+const u32 r6xx_ps[] =
+{
+	0x00000002,
+	0x80800000,
+	0x00000000,
+	0x94200688,
+	0x00000010,
+	0x000d1000,
+	0xb0800000,
+	0x00000000,
+};
+
+const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
+const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
+const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
+const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
new file mode 100644
index 0000000..f437d36
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef R600_BLIT_SHADERS_H
+#define R600_BLIT_SHADERS_H
+
+extern const u32 r6xx_ps[];
+extern const u32 r6xx_vs[];
+extern const u32 r7xx_default_state[];
+extern const u32 r6xx_default_state[];
+
+
+extern const u32 r6xx_ps_size, r6xx_vs_size;
+extern const u32 r6xx_default_size, r7xx_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
new file mode 100644
index 0000000..c96b319
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -0,0 +1,2535 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/kernel.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+#include "r600_reg_safe.h"
+
+static int r600_nomm;
+extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
+
+
+struct r600_cs_track {
+	/* configuration we miror so that we use same code btw kms/ums */
+	u32			group_size;
+	u32			nbanks;
+	u32			npipes;
+	/* value we track */
+	u32			sq_config;
+	u32			log_nsamples;
+	u32			nsamples;
+	u32			cb_color_base_last[8];
+	struct radeon_bo	*cb_color_bo[8];
+	u64			cb_color_bo_mc[8];
+	u64			cb_color_bo_offset[8];
+	struct radeon_bo	*cb_color_frag_bo[8];
+	u64			cb_color_frag_offset[8];
+	struct radeon_bo	*cb_color_tile_bo[8];
+	u64			cb_color_tile_offset[8];
+	u32			cb_color_mask[8];
+	u32			cb_color_info[8];
+	u32			cb_color_view[8];
+	u32			cb_color_size_idx[8]; /* unused */
+	u32			cb_target_mask;
+	u32			cb_shader_mask;  /* unused */
+	bool			is_resolve;
+	u32			cb_color_size[8];
+	u32			vgt_strmout_en;
+	u32			vgt_strmout_buffer_en;
+	struct radeon_bo	*vgt_strmout_bo[4];
+	u64			vgt_strmout_bo_mc[4]; /* unused */
+	u32			vgt_strmout_bo_offset[4];
+	u32			vgt_strmout_size[4];
+	u32			db_depth_control;
+	u32			db_depth_info;
+	u32			db_depth_size_idx;
+	u32			db_depth_view;
+	u32			db_depth_size;
+	u32			db_offset;
+	struct radeon_bo	*db_bo;
+	u64			db_bo_mc;
+	bool			sx_misc_kill_all_prims;
+	bool			cb_dirty;
+	bool			db_dirty;
+	bool			streamout_dirty;
+	struct radeon_bo	*htile_bo;
+	u64			htile_offset;
+	u32			htile_surface;
+};
+
+#define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
+#define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
+#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
+#define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
+#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
+#define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
+#define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
+
+struct gpu_formats {
+	unsigned blockwidth;
+	unsigned blockheight;
+	unsigned blocksize;
+	unsigned valid_color;
+	enum radeon_family min_family;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+	/* 8 bit */
+	FMT_8_BIT(V_038004_COLOR_8, 1),
+	FMT_8_BIT(V_038004_COLOR_4_4, 1),
+	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+	FMT_8_BIT(V_038004_FMT_1, 0),
+
+	/* 16-bit */
+	FMT_16_BIT(V_038004_COLOR_16, 1),
+	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+	FMT_16_BIT(V_038004_COLOR_8_8, 1),
+	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+	/* 24-bit */
+	FMT_24_BIT(V_038004_FMT_8_8_8),
+
+	/* 32-bit */
+	FMT_32_BIT(V_038004_COLOR_32, 1),
+	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_16_16, 1),
+	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_8_24, 1),
+	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_24_8, 1),
+	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+	/* 48-bit */
+	FMT_48_BIT(V_038004_FMT_16_16_16),
+	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+	/* 64-bit */
+	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+	FMT_64_BIT(V_038004_COLOR_32_32, 1),
+	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+	FMT_96_BIT(V_038004_FMT_32_32_32),
+	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+	/* 128-bit */
+	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+	/* block compressed formats */
+	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
+	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+
+	/* The other Evergreen formats */
+	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
+};
+
+bool r600_fmt_is_valid_color(u32 format)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return false;
+
+	if (color_formats_table[format].valid_color)
+		return true;
+
+	return false;
+}
+
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return false;
+
+	if (family < color_formats_table[format].min_family)
+		return false;
+
+	if (color_formats_table[format].blockwidth > 0)
+		return true;
+
+	return false;
+}
+
+int r600_fmt_get_blocksize(u32 format)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	return color_formats_table[format].blocksize;
+}
+
+int r600_fmt_get_nblocksx(u32 format, u32 w)
+{
+	unsigned bw;
+
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	bw = color_formats_table[format].blockwidth;
+	if (bw == 0)
+		return 0;
+
+	return (w + bw - 1) / bw;
+}
+
+int r600_fmt_get_nblocksy(u32 format, u32 h)
+{
+	unsigned bh;
+
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	bh = color_formats_table[format].blockheight;
+	if (bh == 0)
+		return 0;
+
+	return (h + bh - 1) / bh;
+}
+
+struct array_mode_checker {
+	int array_mode;
+	u32 group_size;
+	u32 nbanks;
+	u32 npipes;
+	u32 nsamples;
+	u32 blocksize;
+};
+
+/* returns alignment in pixels for pitch/height/depth and bytes for base */
+static int r600_get_array_mode_alignment(struct array_mode_checker *values,
+						u32 *pitch_align,
+						u32 *height_align,
+						u32 *depth_align,
+						u64 *base_align)
+{
+	u32 tile_width = 8;
+	u32 tile_height = 8;
+	u32 macro_tile_width = values->nbanks;
+	u32 macro_tile_height = values->npipes;
+	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
+	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
+
+	switch (values->array_mode) {
+	case ARRAY_LINEAR_GENERAL:
+		/* technically tile_width/_height for pitch/height */
+		*pitch_align = 1; /* tile_width */
+		*height_align = 1; /* tile_height */
+		*depth_align = 1;
+		*base_align = 1;
+		break;
+	case ARRAY_LINEAR_ALIGNED:
+		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
+		*height_align = 1;
+		*depth_align = 1;
+		*base_align = values->group_size;
+		break;
+	case ARRAY_1D_TILED_THIN1:
+		*pitch_align = max((u32)tile_width,
+				   (u32)(values->group_size /
+					 (tile_height * values->blocksize * values->nsamples)));
+		*height_align = tile_height;
+		*depth_align = 1;
+		*base_align = values->group_size;
+		break;
+	case ARRAY_2D_TILED_THIN1:
+		*pitch_align = max((u32)macro_tile_width * tile_width,
+				(u32)((values->group_size * values->nbanks) /
+				(values->blocksize * values->nsamples * tile_width)));
+		*height_align = macro_tile_height * tile_height;
+		*depth_align = 1;
+		*base_align = max(macro_tile_bytes,
+				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void r600_cs_track_init(struct r600_cs_track *track)
+{
+	int i;
+
+	/* assume DX9 mode */
+	track->sq_config = DX9_CONSTS;
+	for (i = 0; i < 8; i++) {
+		track->cb_color_base_last[i] = 0;
+		track->cb_color_size[i] = 0;
+		track->cb_color_size_idx[i] = 0;
+		track->cb_color_info[i] = 0;
+		track->cb_color_view[i] = 0xFFFFFFFF;
+		track->cb_color_bo[i] = NULL;
+		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
+		track->cb_color_frag_bo[i] = NULL;
+		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
+		track->cb_color_tile_bo[i] = NULL;
+		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
+		track->cb_color_mask[i] = 0xFFFFFFFF;
+	}
+	track->is_resolve = false;
+	track->nsamples = 16;
+	track->log_nsamples = 4;
+	track->cb_target_mask = 0xFFFFFFFF;
+	track->cb_shader_mask = 0xFFFFFFFF;
+	track->cb_dirty = true;
+	track->db_bo = NULL;
+	track->db_bo_mc = 0xFFFFFFFF;
+	/* assume the biggest format and that htile is enabled */
+	track->db_depth_info = 7 | (1 << 25);
+	track->db_depth_view = 0xFFFFC000;
+	track->db_depth_size = 0xFFFFFFFF;
+	track->db_depth_size_idx = 0;
+	track->db_depth_control = 0xFFFFFFFF;
+	track->db_dirty = true;
+	track->htile_bo = NULL;
+	track->htile_offset = 0xFFFFFFFF;
+	track->htile_surface = 0;
+
+	for (i = 0; i < 4; i++) {
+		track->vgt_strmout_size[i] = 0;
+		track->vgt_strmout_bo[i] = NULL;
+		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
+	}
+	track->streamout_dirty = true;
+	track->sx_misc_kill_all_prims = false;
+}
+
+static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+	struct r600_cs_track *track = p->track;
+	u32 slice_tile_max, size, tmp;
+	u32 height, height_align, pitch, pitch_align, depth_align;
+	u64 base_offset, base_align;
+	struct array_mode_checker array_check;
+	volatile u32 *ib = p->ib.ptr;
+	unsigned array_mode;
+	u32 format;
+	/* When resolve is used, the second colorbuffer has always 1 sample. */
+	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
+
+	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
+	format = G_0280A0_FORMAT(track->cb_color_info[i]);
+	if (!r600_fmt_is_valid_color(format)) {
+		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+			 __func__, __LINE__, format,
+			i, track->cb_color_info[i]);
+		return -EINVAL;
+	}
+	/* pitch in pixels */
+	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
+	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
+	slice_tile_max *= 64;
+	height = slice_tile_max / pitch;
+	if (height > 8192)
+		height = 8192;
+	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+
+	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
+	array_check.array_mode = array_mode;
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = nsamples;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
+	if (r600_get_array_mode_alignment(&array_check,
+					  &pitch_align, &height_align, &depth_align, &base_align)) {
+		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+			 track->cb_color_info[i]);
+		return -EINVAL;
+	}
+	switch (array_mode) {
+	case V_0280A0_ARRAY_LINEAR_GENERAL:
+		break;
+	case V_0280A0_ARRAY_LINEAR_ALIGNED:
+		break;
+	case V_0280A0_ARRAY_1D_TILED_THIN1:
+		/* avoid breaking userspace */
+		if (height > 7)
+			height &= ~0x7;
+		break;
+	case V_0280A0_ARRAY_2D_TILED_THIN1:
+		break;
+	default:
+		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+			track->cb_color_info[i]);
+		return -EINVAL;
+	}
+
+	if (!IS_ALIGNED(pitch, pitch_align)) {
+		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, array_mode);
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(height, height_align)) {
+		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, height, height_align, array_mode);
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(base_offset, base_align)) {
+		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+			 base_offset, base_align, array_mode);
+		return -EINVAL;
+	}
+
+	/* check offset */
+	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
+	      r600_fmt_get_blocksize(format) * nsamples;
+	switch (array_mode) {
+	default:
+	case V_0280A0_ARRAY_LINEAR_GENERAL:
+	case V_0280A0_ARRAY_LINEAR_ALIGNED:
+		tmp += track->cb_color_view[i] & 0xFF;
+		break;
+	case V_0280A0_ARRAY_1D_TILED_THIN1:
+	case V_0280A0_ARRAY_2D_TILED_THIN1:
+		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
+		break;
+	}
+	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+			/* the initial DDX does bad things with the CB size occasionally */
+			/* it rounds up height too far for slice tile max but the BO is smaller */
+			/* r600c,g also seem to flush at bad times in some apps resulting in
+			 * bogus values here. So for linear just allow anything to avoid breaking
+			 * broken userspace.
+			 */
+		} else {
+			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
+				 __func__, i, array_mode,
+				 track->cb_color_bo_offset[i], tmp,
+				 radeon_bo_size(track->cb_color_bo[i]),
+				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
+				 r600_fmt_get_nblocksy(format, height),
+				 r600_fmt_get_blocksize(format));
+			return -EINVAL;
+		}
+	}
+	/* limit max tile */
+	tmp = (height * pitch) >> 6;
+	if (tmp < slice_tile_max)
+		slice_tile_max = tmp;
+	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
+		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
+	ib[track->cb_color_size_idx[i]] = tmp;
+
+	/* FMASK/CMASK */
+	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+	case V_0280A0_TILE_DISABLE:
+		break;
+	case V_0280A0_FRAG_ENABLE:
+		if (track->nsamples > 1) {
+			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
+			/* the tile size is 8x8, but the size is in units of bits.
+			 * for bytes, do just * 8. */
+			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
+
+			if (bytes + track->cb_color_frag_offset[i] >
+			    radeon_bo_size(track->cb_color_frag_bo[i])) {
+				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
+					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+					 __func__, tile_max, bytes,
+					 track->cb_color_frag_offset[i],
+					 radeon_bo_size(track->cb_color_frag_bo[i]));
+				return -EINVAL;
+			}
+		}
+		/* fall through */
+	case V_0280A0_CLEAR_ENABLE:
+	{
+		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
+		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
+		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
+		uint32_t bytes = (block_max + 1) * 128;
+
+		if (bytes + track->cb_color_tile_offset[i] >
+		    radeon_bo_size(track->cb_color_tile_bo[i])) {
+			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
+				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+				 __func__, block_max, bytes,
+				 track->cb_color_tile_offset[i],
+				 radeon_bo_size(track->cb_color_tile_bo[i]));
+			return -EINVAL;
+		}
+		break;
+	}
+	default:
+		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
+{
+	struct r600_cs_track *track = p->track;
+	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+	u32 height_align, pitch_align, depth_align;
+	u32 pitch = 8192;
+	u32 height = 8192;
+	u64 base_offset, base_align;
+	struct array_mode_checker array_check;
+	int array_mode;
+	volatile u32 *ib = p->ib.ptr;
+
+
+	if (track->db_bo == NULL) {
+		dev_warn(p->dev, "z/stencil with no depth buffer\n");
+		return -EINVAL;
+	}
+	switch (G_028010_FORMAT(track->db_depth_info)) {
+	case V_028010_DEPTH_16:
+		bpe = 2;
+		break;
+	case V_028010_DEPTH_X8_24:
+	case V_028010_DEPTH_8_24:
+	case V_028010_DEPTH_X8_24_FLOAT:
+	case V_028010_DEPTH_8_24_FLOAT:
+	case V_028010_DEPTH_32_FLOAT:
+		bpe = 4;
+		break;
+	case V_028010_DEPTH_X24_8_32_FLOAT:
+		bpe = 8;
+		break;
+	default:
+		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+		return -EINVAL;
+	}
+	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+		if (!track->db_depth_size_idx) {
+			dev_warn(p->dev, "z/stencil buffer size not set\n");
+			return -EINVAL;
+		}
+		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+		tmp = (tmp / bpe) >> 6;
+		if (!tmp) {
+			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+					track->db_depth_size, bpe, track->db_offset,
+					radeon_bo_size(track->db_bo));
+			return -EINVAL;
+		}
+		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+	} else {
+		size = radeon_bo_size(track->db_bo);
+		/* pitch in pixels */
+		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+		slice_tile_max *= 64;
+		height = slice_tile_max / pitch;
+		if (height > 8192)
+			height = 8192;
+		base_offset = track->db_bo_mc + track->db_offset;
+		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+		array_check.array_mode = array_mode;
+		array_check.group_size = track->group_size;
+		array_check.nbanks = track->nbanks;
+		array_check.npipes = track->npipes;
+		array_check.nsamples = track->nsamples;
+		array_check.blocksize = bpe;
+		if (r600_get_array_mode_alignment(&array_check,
+					&pitch_align, &height_align, &depth_align, &base_align)) {
+			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+					G_028010_ARRAY_MODE(track->db_depth_info),
+					track->db_depth_info);
+			return -EINVAL;
+		}
+		switch (array_mode) {
+		case V_028010_ARRAY_1D_TILED_THIN1:
+			/* don't break userspace */
+			height &= ~0x7;
+			break;
+		case V_028010_ARRAY_2D_TILED_THIN1:
+			break;
+		default:
+			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+					G_028010_ARRAY_MODE(track->db_depth_info),
+					track->db_depth_info);
+			return -EINVAL;
+		}
+
+		if (!IS_ALIGNED(pitch, pitch_align)) {
+			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+					__func__, __LINE__, pitch, pitch_align, array_mode);
+			return -EINVAL;
+		}
+		if (!IS_ALIGNED(height, height_align)) {
+			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+					__func__, __LINE__, height, height_align, array_mode);
+			return -EINVAL;
+		}
+		if (!IS_ALIGNED(base_offset, base_align)) {
+			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+					base_offset, base_align, array_mode);
+			return -EINVAL;
+		}
+
+		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
+		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+					array_mode,
+					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+					radeon_bo_size(track->db_bo));
+			return -EINVAL;
+		}
+	}
+
+	/* hyperz */
+	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+		unsigned long size;
+		unsigned nbx, nby;
+
+		if (track->htile_bo == NULL) {
+			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+				 __func__, __LINE__, track->db_depth_info);
+			return -EINVAL;
+		}
+		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+				 __func__, __LINE__, track->db_depth_size);
+			return -EINVAL;
+		}
+
+		nbx = pitch;
+		nby = height;
+		if (G_028D24_LINEAR(track->htile_surface)) {
+			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
+			nbx = round_up(nbx, 16 * 8);
+			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
+			nby = round_up(nby, track->npipes * 8);
+		} else {
+			/* always assume 8x8 htile */
+			/* align is htile align * 8, htile align vary according to
+			 * number of pipe and tile width and nby
+			 */
+			switch (track->npipes) {
+			case 8:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 64 * 8);
+				nby = round_up(nby, 64 * 8);
+				break;
+			case 4:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 64 * 8);
+				nby = round_up(nby, 32 * 8);
+				break;
+			case 2:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 32 * 8);
+				nby = round_up(nby, 32 * 8);
+				break;
+			case 1:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 32 * 8);
+				nby = round_up(nby, 16 * 8);
+				break;
+			default:
+				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+					 __func__, __LINE__, track->npipes);
+				return -EINVAL;
+			}
+		}
+		/* compute number of htile */
+		nbx = nbx >> 3;
+		nby = nby >> 3;
+		/* size must be aligned on npipes * 2K boundary */
+		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+		size += track->htile_offset;
+
+		if (size > radeon_bo_size(track->htile_bo)) {
+			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
+				 size, nbx, nby);
+			return -EINVAL;
+		}
+	}
+
+	track->db_dirty = false;
+	return 0;
+}
+
+static int r600_cs_track_check(struct radeon_cs_parser *p)
+{
+	struct r600_cs_track *track = p->track;
+	u32 tmp;
+	int r, i;
+
+	/* on legacy kernel we don't perform advanced check */
+	if (p->rdev == NULL)
+		return 0;
+
+	/* check streamout */
+	if (track->streamout_dirty && track->vgt_strmout_en) {
+		for (i = 0; i < 4; i++) {
+			if (track->vgt_strmout_buffer_en & (1 << i)) {
+				if (track->vgt_strmout_bo[i]) {
+					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+						(u64)track->vgt_strmout_size[i];
+					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+							  i, offset,
+							  radeon_bo_size(track->vgt_strmout_bo[i]));
+						return -EINVAL;
+					}
+				} else {
+					dev_warn(p->dev, "No buffer for streamout %d\n", i);
+					return -EINVAL;
+				}
+			}
+		}
+		track->streamout_dirty = false;
+	}
+
+	if (track->sx_misc_kill_all_prims)
+		return 0;
+
+	/* check that we have a cb for each enabled target, we don't check
+	 * shader_mask because it seems mesa isn't always setting it :(
+	 */
+	if (track->cb_dirty) {
+		tmp = track->cb_target_mask;
+
+		/* We must check both colorbuffers for RESOLVE. */
+		if (track->is_resolve) {
+			tmp |= 0xff;
+		}
+
+		for (i = 0; i < 8; i++) {
+			u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+			if (format != V_0280A0_COLOR_INVALID &&
+			    (tmp >> (i * 4)) & 0xF) {
+				/* at least one component is enabled */
+				if (track->cb_color_bo[i] == NULL) {
+					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+					return -EINVAL;
+				}
+				/* perform rewrite of CB_COLOR[0-7]_SIZE */
+				r = r600_cs_track_validate_cb(p, i);
+				if (r)
+					return r;
+			}
+		}
+		track->cb_dirty = false;
+	}
+
+	/* Check depth buffer */
+	if (track->db_dirty &&
+	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+	     G_028800_Z_ENABLE(track->db_depth_control))) {
+		r = r600_cs_track_validate_db(p);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+/**
+ * r600_cs_packet_parse_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * This is an R600-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+	static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
+					      AVIVO_D2MODE_VLINE_START_END};
+	static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
+					   AVIVO_D2MODE_VLINE_STATUS};
+
+	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+}
+
+/**
+ * r600_cs_common_vline_parse() - common vline parser
+ * @parser:		parser structure holding parsing context.
+ * @vline_start_end:    table of vline_start_end registers
+ * @vline_status:       table of vline_status registers
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case. This function is common for all ASICs that
+ * are R600 and newer. The parsing algorithm is the same, and only
+ * differs in which registers are used.
+ *
+ * Caller is the ASIC-specific function which passes the parser
+ * context and ASIC-specific register table
+ */
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+			       uint32_t *vline_start_end,
+			       uint32_t *vline_status)
+{
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_cs_packet p3reloc, wait_reg_mem;
+	int crtc_id;
+	int r;
+	uint32_t header, h_idx, reg, wait_reg_mem_info;
+	volatile uint32_t *ib;
+
+	ib = p->ib.ptr;
+
+	/* parse the WAIT_REG_MEM */
+	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
+	if (r)
+		return r;
+
+	/* check its a WAIT_REG_MEM */
+	if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
+	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+		return -EINVAL;
+	}
+
+	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+	/* bit 4 is reg (0) or mem (1) */
+	if (wait_reg_mem_info & 0x10) {
+		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+		return -EINVAL;
+	}
+	/* bit 8 is me (0) or pfp (1) */
+	if (wait_reg_mem_info & 0x100) {
+		DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
+		return -EINVAL;
+	}
+	/* waiting for value to be equal */
+	if ((wait_reg_mem_info & 0x7) != 0x3) {
+		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+		return -EINVAL;
+	}
+	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
+		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+		return -EINVAL;
+	}
+
+	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
+		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+		return -EINVAL;
+	}
+
+	/* jump over the NOP */
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+	if (r)
+		return r;
+
+	h_idx = p->idx - 2;
+	p->idx += wait_reg_mem.count + 2;
+	p->idx += p3reloc.count + 2;
+
+	header = radeon_get_ib_value(p, h_idx);
+	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+	reg = R600_CP_PACKET0_GET_REG(header);
+
+	crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
+	if (!crtc) {
+		DRM_ERROR("cannot find crtc %d\n", crtc_id);
+		return -ENOENT;
+	}
+	radeon_crtc = to_radeon_crtc(crtc);
+	crtc_id = radeon_crtc->crtc_id;
+
+	if (!crtc->enabled) {
+		/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+		ib[h_idx + 2] = PACKET2(0);
+		ib[h_idx + 3] = PACKET2(0);
+		ib[h_idx + 4] = PACKET2(0);
+		ib[h_idx + 5] = PACKET2(0);
+		ib[h_idx + 6] = PACKET2(0);
+		ib[h_idx + 7] = PACKET2(0);
+		ib[h_idx + 8] = PACKET2(0);
+	} else if (reg == vline_start_end[0]) {
+		header &= ~R600_CP_PACKET0_REG_MASK;
+		header |= vline_start_end[crtc_id] >> 2;
+		ib[h_idx] = header;
+		ib[h_idx + 4] = vline_status[crtc_id] >> 2;
+	} else {
+		DRM_ERROR("unknown crtc reloc\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_packet0_check(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt,
+				unsigned idx, unsigned reg)
+{
+	int r;
+
+	switch (reg) {
+	case AVIVO_D1MODE_VLINE_START_END:
+		r = r600_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			return r;
+		}
+		break;
+	default:
+		pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt)
+{
+	unsigned reg, i;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+		r = r600_packet0_check(p, pkt, idx, reg);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * r600_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against r600_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
+	struct radeon_bo_list *reloc;
+	u32 m, i, tmp, *ib;
+	int r;
+
+	i = (reg >> 7);
+	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (!(r600_reg_safe_bm[i] & m))
+		return 0;
+	ib = p->ib.ptr;
+	switch (reg) {
+	/* force following reg to 0 in an attempt to disable out buffer
+	 * which will need us to better understand how it works to perform
+	 * security check on it (Jerome)
+	 */
+	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
+	case R_008C44_SQ_ESGS_RING_SIZE:
+	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
+	case R_008C54_SQ_ESTMP_RING_SIZE:
+	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
+	case R_008C74_SQ_FBUF_RING_SIZE:
+	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
+	case R_008C5C_SQ_GSTMP_RING_SIZE:
+	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
+	case R_008C4C_SQ_GSVS_RING_SIZE:
+	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
+	case R_008C6C_SQ_PSTMP_RING_SIZE:
+	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
+	case R_008C7C_SQ_REDUC_RING_SIZE:
+	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
+	case R_008C64_SQ_VSTMP_RING_SIZE:
+	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
+		/* get value to populate the IB don't remove */
+		/*tmp =radeon_get_ib_value(p, idx);
+		  ib[idx] = 0;*/
+		break;
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SQ_CONFIG:
+		track->sq_config = radeon_get_ib_value(p, idx);
+		break;
+	case R_028800_DB_DEPTH_CONTROL:
+		track->db_depth_control = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_028010_DB_DEPTH_INFO:
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+		    radeon_cs_packet_next_is_pkt3_nop(p)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					 "0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->db_depth_info = radeon_get_ib_value(p, idx);
+			ib[idx] &= C_028010_ARRAY_MODE;
+			track->db_depth_info &= C_028010_ARRAY_MODE;
+			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+			} else {
+				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+			}
+		} else {
+			track->db_depth_info = radeon_get_ib_value(p, idx);
+		}
+		track->db_dirty = true;
+		break;
+	case R_028004_DB_DEPTH_VIEW:
+		track->db_depth_view = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_028000_DB_DEPTH_SIZE:
+		track->db_depth_size = radeon_get_ib_value(p, idx);
+		track->db_depth_size_idx = idx;
+		track->db_dirty = true;
+		break;
+	case R_028AB0_VGT_STRMOUT_EN:
+		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case R_028B20_VGT_STRMOUT_BUFFER_EN:
+		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_BASE_0:
+	case VGT_STRMOUT_BUFFER_BASE_1:
+	case VGT_STRMOUT_BUFFER_BASE_2:
+	case VGT_STRMOUT_BUFFER_BASE_3:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->vgt_strmout_bo[tmp] = reloc->robj;
+		track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset;
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_SIZE_0:
+	case VGT_STRMOUT_BUFFER_SIZE_1:
+	case VGT_STRMOUT_BUFFER_SIZE_2:
+	case VGT_STRMOUT_BUFFER_SIZE_3:
+		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+		/* size in register is DWs, convert to bytes */
+		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+		track->streamout_dirty = true;
+		break;
+	case CP_COHER_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case R_028238_CB_TARGET_MASK:
+		track->cb_target_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case R_02823C_CB_SHADER_MASK:
+		track->cb_shader_mask = radeon_get_ib_value(p, idx);
+		break;
+	case R_028C04_PA_SC_AA_CONFIG:
+		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+		track->log_nsamples = tmp;
+		track->nsamples = 1 << tmp;
+		track->cb_dirty = true;
+		break;
+	case R_028808_CB_COLOR_CONTROL:
+		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
+		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
+		track->cb_dirty = true;
+		break;
+	case R_0280A0_CB_COLOR0_INFO:
+	case R_0280A4_CB_COLOR1_INFO:
+	case R_0280A8_CB_COLOR2_INFO:
+	case R_0280AC_CB_COLOR3_INFO:
+	case R_0280B0_CB_COLOR4_INFO:
+	case R_0280B4_CB_COLOR5_INFO:
+	case R_0280B8_CB_COLOR6_INFO:
+	case R_0280BC_CB_COLOR7_INFO:
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+		     radeon_cs_packet_next_is_pkt3_nop(p)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+			} else if (reloc->tiling_flags & RADEON_TILING_MICRO) {
+				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+			}
+		} else {
+			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		}
+		track->cb_dirty = true;
+		break;
+	case R_028080_CB_COLOR0_VIEW:
+	case R_028084_CB_COLOR1_VIEW:
+	case R_028088_CB_COLOR2_VIEW:
+	case R_02808C_CB_COLOR3_VIEW:
+	case R_028090_CB_COLOR4_VIEW:
+	case R_028094_CB_COLOR5_VIEW:
+	case R_028098_CB_COLOR6_VIEW:
+	case R_02809C_CB_COLOR7_VIEW:
+		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case R_028060_CB_COLOR0_SIZE:
+	case R_028064_CB_COLOR1_SIZE:
+	case R_028068_CB_COLOR2_SIZE:
+	case R_02806C_CB_COLOR3_SIZE:
+	case R_028070_CB_COLOR4_SIZE:
+	case R_028074_CB_COLOR5_SIZE:
+	case R_028078_CB_COLOR6_SIZE:
+	case R_02807C_CB_COLOR7_SIZE:
+		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_size_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+		/* This register were added late, there is userspace
+		 * which does provide relocation for those but set
+		 * 0 offset. In order to avoid breaking old userspace
+		 * we detect this and set address to point to last
+		 * CB_COLOR0_BASE, note that if userspace doesn't set
+		 * CB_COLOR0_BASE before this register we will report
+		 * error. Old userspace always set CB_COLOR0_BASE
+		 * before any of this.
+		 */
+	case R_0280E0_CB_COLOR0_FRAG:
+	case R_0280E4_CB_COLOR1_FRAG:
+	case R_0280E8_CB_COLOR2_FRAG:
+	case R_0280EC_CB_COLOR3_FRAG:
+	case R_0280F0_CB_COLOR4_FRAG:
+	case R_0280F4_CB_COLOR5_FRAG:
+	case R_0280F8_CB_COLOR6_FRAG:
+	case R_0280FC_CB_COLOR7_FRAG:
+		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
+		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+			if (!track->cb_color_base_last[tmp]) {
+				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
+		} else {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_frag_bo[tmp] = reloc->robj;
+			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case R_0280C0_CB_COLOR0_TILE:
+	case R_0280C4_CB_COLOR1_TILE:
+	case R_0280C8_CB_COLOR2_TILE:
+	case R_0280CC_CB_COLOR3_TILE:
+	case R_0280D0_CB_COLOR4_TILE:
+	case R_0280D4_CB_COLOR5_TILE:
+	case R_0280D8_CB_COLOR6_TILE:
+	case R_0280DC_CB_COLOR7_TILE:
+		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
+		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+			if (!track->cb_color_base_last[tmp]) {
+				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
+		} else {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_tile_bo[tmp] = reloc->robj;
+			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case R_028100_CB_COLOR0_MASK:
+	case R_028104_CB_COLOR1_MASK:
+	case R_028108_CB_COLOR2_MASK:
+	case R_02810C_CB_COLOR3_MASK:
+	case R_028110_CB_COLOR4_MASK:
+	case R_028114_CB_COLOR5_MASK:
+	case R_028118_CB_COLOR6_MASK:
+	case R_02811C_CB_COLOR7_MASK:
+		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
+		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case CB_COLOR0_BASE:
+	case CB_COLOR1_BASE:
+	case CB_COLOR2_BASE:
+	case CB_COLOR3_BASE:
+	case CB_COLOR4_BASE:
+	case CB_COLOR5_BASE:
+	case CB_COLOR6_BASE:
+	case CB_COLOR7_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - CB_COLOR0_BASE) / 4;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_base_last[tmp] = ib[idx];
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_color_bo_mc[tmp] = reloc->gpu_offset;
+		track->cb_dirty = true;
+		break;
+	case DB_DEPTH_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_offset = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->db_bo = reloc->robj;
+		track->db_bo_mc = reloc->gpu_offset;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_DATA_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		track->htile_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_SURFACE:
+		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
+		track->db_dirty = true;
+		break;
+	case SQ_PGM_START_FS:
+	case SQ_PGM_START_ES:
+	case SQ_PGM_START_VS:
+	case SQ_PGM_START_GS:
+	case SQ_PGM_START_PS:
+	case SQ_ALU_CONST_CACHE_GS_0:
+	case SQ_ALU_CONST_CACHE_GS_1:
+	case SQ_ALU_CONST_CACHE_GS_2:
+	case SQ_ALU_CONST_CACHE_GS_3:
+	case SQ_ALU_CONST_CACHE_GS_4:
+	case SQ_ALU_CONST_CACHE_GS_5:
+	case SQ_ALU_CONST_CACHE_GS_6:
+	case SQ_ALU_CONST_CACHE_GS_7:
+	case SQ_ALU_CONST_CACHE_GS_8:
+	case SQ_ALU_CONST_CACHE_GS_9:
+	case SQ_ALU_CONST_CACHE_GS_10:
+	case SQ_ALU_CONST_CACHE_GS_11:
+	case SQ_ALU_CONST_CACHE_GS_12:
+	case SQ_ALU_CONST_CACHE_GS_13:
+	case SQ_ALU_CONST_CACHE_GS_14:
+	case SQ_ALU_CONST_CACHE_GS_15:
+	case SQ_ALU_CONST_CACHE_PS_0:
+	case SQ_ALU_CONST_CACHE_PS_1:
+	case SQ_ALU_CONST_CACHE_PS_2:
+	case SQ_ALU_CONST_CACHE_PS_3:
+	case SQ_ALU_CONST_CACHE_PS_4:
+	case SQ_ALU_CONST_CACHE_PS_5:
+	case SQ_ALU_CONST_CACHE_PS_6:
+	case SQ_ALU_CONST_CACHE_PS_7:
+	case SQ_ALU_CONST_CACHE_PS_8:
+	case SQ_ALU_CONST_CACHE_PS_9:
+	case SQ_ALU_CONST_CACHE_PS_10:
+	case SQ_ALU_CONST_CACHE_PS_11:
+	case SQ_ALU_CONST_CACHE_PS_12:
+	case SQ_ALU_CONST_CACHE_PS_13:
+	case SQ_ALU_CONST_CACHE_PS_14:
+	case SQ_ALU_CONST_CACHE_PS_15:
+	case SQ_ALU_CONST_CACHE_VS_0:
+	case SQ_ALU_CONST_CACHE_VS_1:
+	case SQ_ALU_CONST_CACHE_VS_2:
+	case SQ_ALU_CONST_CACHE_VS_3:
+	case SQ_ALU_CONST_CACHE_VS_4:
+	case SQ_ALU_CONST_CACHE_VS_5:
+	case SQ_ALU_CONST_CACHE_VS_6:
+	case SQ_ALU_CONST_CACHE_VS_7:
+	case SQ_ALU_CONST_CACHE_VS_8:
+	case SQ_ALU_CONST_CACHE_VS_9:
+	case SQ_ALU_CONST_CACHE_VS_10:
+	case SQ_ALU_CONST_CACHE_VS_11:
+	case SQ_ALU_CONST_CACHE_VS_12:
+	case SQ_ALU_CONST_CACHE_VS_13:
+	case SQ_ALU_CONST_CACHE_VS_14:
+	case SQ_ALU_CONST_CACHE_VS_15:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MEMORY_EXPORT_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MISC:
+		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+		break;
+	default:
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+unsigned r600_mip_minify(unsigned size, unsigned level)
+{
+	unsigned val;
+
+	val = max(1U, size >> level);
+	if (level > 0)
+		val = roundup_pow_of_two(val);
+	return val;
+}
+
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
+			      unsigned block_align, unsigned height_align, unsigned base_align,
+			      unsigned *l0_size, unsigned *mipmap_size)
+{
+	unsigned offset, i, level;
+	unsigned width, height, depth, size;
+	unsigned blocksize;
+	unsigned nbx, nby;
+	unsigned nlevels = llevel - blevel + 1;
+
+	*l0_size = -1;
+	blocksize = r600_fmt_get_blocksize(format);
+
+	w0 = r600_mip_minify(w0, 0);
+	h0 = r600_mip_minify(h0, 0);
+	d0 = r600_mip_minify(d0, 0);
+	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+		width = r600_mip_minify(w0, i);
+		nbx = r600_fmt_get_nblocksx(format, width);
+
+		nbx = round_up(nbx, block_align);
+
+		height = r600_mip_minify(h0, i);
+		nby = r600_fmt_get_nblocksy(format, height);
+		nby = round_up(nby, height_align);
+
+		depth = r600_mip_minify(d0, i);
+
+		size = nbx * nby * blocksize * nsamples;
+		if (nfaces)
+			size *= nfaces;
+		else
+			size *= depth;
+
+		if (i == 0)
+			*l0_size = size;
+
+		if (i == 0 || i == 1)
+			offset = round_up(offset, base_align);
+
+		offset += size;
+	}
+	*mipmap_size = offset;
+	if (llevel == 0)
+		*mipmap_size = *l0_size;
+	if (!blevel)
+		*mipmap_size -= *l0_size;
+}
+
+/**
+ * r600_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+					      struct radeon_bo *texture,
+					      struct radeon_bo *mipmap,
+					      u64 base_offset,
+					      u64 mip_offset,
+					      u32 tiling_flags)
+{
+	struct r600_cs_track *track = p->track;
+	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
+	u32 height_align, pitch, pitch_align, depth_align;
+	u32 barray, larray;
+	u64 base_align;
+	struct array_mode_checker array_check;
+	u32 format;
+	bool is_array;
+
+	/* on legacy kernel we don't perform advanced check */
+	if (p->rdev == NULL)
+		return 0;
+
+	/* convert to bytes */
+	base_offset <<= 8;
+	mip_offset <<= 8;
+
+	word0 = radeon_get_ib_value(p, idx + 0);
+	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+		else if (tiling_flags & RADEON_TILING_MICRO)
+			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+	}
+	word1 = radeon_get_ib_value(p, idx + 1);
+	word2 = radeon_get_ib_value(p, idx + 2) << 8;
+	word3 = radeon_get_ib_value(p, idx + 3) << 8;
+	word4 = radeon_get_ib_value(p, idx + 4);
+	word5 = radeon_get_ib_value(p, idx + 5);
+	dim = G_038000_DIM(word0);
+	w0 = G_038000_TEX_WIDTH(word0) + 1;
+	pitch = (G_038000_PITCH(word0) + 1) * 8;
+	h0 = G_038004_TEX_HEIGHT(word1) + 1;
+	d0 = G_038004_TEX_DEPTH(word1);
+	format = G_038004_DATA_FORMAT(word1);
+	blevel = G_038010_BASE_LEVEL(word4);
+	llevel = G_038014_LAST_LEVEL(word5);
+	/* pitch in texels */
+	array_check.array_mode = G_038000_TILE_MODE(word0);
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = 1;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
+	nfaces = 1;
+	is_array = false;
+	switch (dim) {
+	case V_038000_SQ_TEX_DIM_1D:
+	case V_038000_SQ_TEX_DIM_2D:
+	case V_038000_SQ_TEX_DIM_3D:
+		break;
+	case V_038000_SQ_TEX_DIM_CUBEMAP:
+		if (p->family >= CHIP_RV770)
+			nfaces = 8;
+		else
+			nfaces = 6;
+		break;
+	case V_038000_SQ_TEX_DIM_1D_ARRAY:
+	case V_038000_SQ_TEX_DIM_2D_ARRAY:
+		is_array = true;
+		break;
+	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		is_array = true;
+		/* fall through */
+	case V_038000_SQ_TEX_DIM_2D_MSAA:
+		array_check.nsamples = 1 << llevel;
+		llevel = 0;
+		break;
+	default:
+		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+		return -EINVAL;
+	}
+	if (!r600_fmt_is_valid_texture(format, p->family)) {
+		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+			 __func__, __LINE__, format);
+		return -EINVAL;
+	}
+
+	if (r600_get_array_mode_alignment(&array_check,
+					  &pitch_align, &height_align, &depth_align, &base_align)) {
+		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
+			 __func__, __LINE__, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+
+	/* XXX check height as well... */
+
+	if (!IS_ALIGNED(pitch, pitch_align)) {
+		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(base_offset, base_align)) {
+		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(mip_offset, base_align)) {
+		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+
+	if (blevel > llevel) {
+		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
+			 blevel, llevel);
+	}
+	if (is_array) {
+		barray = G_038014_BASE_ARRAY(word5);
+		larray = G_038014_LAST_ARRAY(word5);
+
+		nfaces = larray - barray + 1;
+	}
+	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
+			  pitch_align, height_align, base_align,
+			  &l0_size, &mipmap_size);
+	/* using get ib will give us the offset into the texture bo */
+	if ((l0_size + word2) > radeon_bo_size(texture)) {
+		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+			 w0, h0, pitch_align, height_align,
+			 array_check.array_mode, format, word2,
+			 l0_size, radeon_bo_size(texture));
+		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
+		return -EINVAL;
+	}
+	/* using get ib will give us the offset into the mipmap bo */
+	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
+		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
+	}
+	return 0;
+}
+
+static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	u32 m, i;
+
+	i = (reg >> 7);
+	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return false;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (!(r600_reg_safe_bm[i] & m))
+		return true;
+	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+	return false;
+}
+
+static int r600_packet3_check(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt)
+{
+	struct radeon_bo_list *reloc;
+	struct r600_cs_track *track;
+	volatile u32 *ib;
+	unsigned idx;
+	unsigned i;
+	unsigned start_reg, end_reg, reg;
+	int r;
+	u32 idx_value;
+
+	track = (struct r600_cs_track *)p->track;
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (pkt->opcode) {
+	case PACKET3_SET_PREDICATION:
+	{
+		int pred_op;
+		int tmp;
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		tmp = radeon_get_ib_value(p, idx + 1);
+		pred_op = (tmp >> 16) & 0x7;
+
+		/* for the clear predicate operation */
+		if (pred_op == 0)
+			return 0;
+
+		if (pred_op > 2) {
+			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+			return -EINVAL;
+		}
+
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 (idx_value & 0xfffffff0) +
+			 ((u64)(tmp & 0xff) << 32);
+
+		ib[idx + 0] = offset;
+		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+	}
+	break;
+
+	case PACKET3_START_3D_CMDBUF:
+		if (p->family >= CHIP_RV770 || pkt->count) {
+			DRM_ERROR("bad START_3D\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CONTEXT_CONTROL:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad CONTEXT_CONTROL\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_NUM_INSTANCES:
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_DRAW_INDEX:
+	{
+		uint64_t offset;
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 idx_value +
+			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_AUTO:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+			return -EINVAL;
+		}
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_IMMD_BE:
+	case PACKET3_DRAW_INDEX_IMMD:
+		if (pkt->count < 2) {
+			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+			return -EINVAL;
+		}
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_WAIT_REG_MEM:
+		if (pkt->count != 5) {
+			DRM_ERROR("bad WAIT_REG_MEM\n");
+			return -EINVAL;
+		}
+		/* bit 4 is reg (0) or mem (1) */
+		if (idx_value & 0x10) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad WAIT_REG_MEM\n");
+				return -EINVAL;
+			}
+
+			offset = reloc->gpu_offset +
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else if (idx_value & 0x100) {
+			DRM_ERROR("cannot use PFP on REG wait\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			DRM_ERROR("CP DMA SAS not supported\n");
+			return -EINVAL;
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad CP DMA SRC\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx) +
+				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+			offset = reloc->gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+					 tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx] = offset;
+			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			DRM_ERROR("CP DMA DAS not supported\n");
+			return -EINVAL;
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad CP DMA DST\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx+2) +
+				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+			offset = reloc->gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+					 tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx+2] = offset;
+			ib[idx+3] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	}
+	case PACKET3_SURFACE_SYNC:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad SURFACE_SYNC\n");
+			return -EINVAL;
+		}
+		/* 0xffffffff/0x0 is flush all cache flag */
+		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+		    radeon_get_ib_value(p, idx + 2) != 0) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad SURFACE_SYNC\n");
+				return -EINVAL;
+			}
+			ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_EVENT_WRITE:
+		if (pkt->count != 2 && pkt->count != 0) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad EVENT_WRITE\n");
+				return -EINVAL;
+			}
+			offset = reloc->gpu_offset +
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = offset & 0xfffffff8;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_EVENT_WRITE_EOP:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->gpu_offset +
+			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = r600_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_CONTEXT_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = r600_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_RESOURCE:
+		if (pkt->count % 7) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
+		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
+		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < (pkt->count / 7); i++) {
+			struct radeon_bo *texture, *mipmap;
+			u32 size, offset, base_offset, mip_offset;
+
+			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
+			case SQ_TEX_VTX_VALID_TEXTURE:
+				/* tex base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+					if (reloc->tiling_flags & RADEON_TILING_MACRO)
+						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+					else if (reloc->tiling_flags & RADEON_TILING_MICRO)
+						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+				}
+				texture = reloc->robj;
+				/* tex mip base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+				mipmap = reloc->robj;
+				r = r600_check_texture_resource(p,  idx+(i*7)+1,
+								texture, mipmap,
+								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
+								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
+								reloc->tiling_flags);
+				if (r)
+					return r;
+				ib[idx+1+(i*7)+2] += base_offset;
+				ib[idx+1+(i*7)+3] += mip_offset;
+				break;
+			case SQ_TEX_VTX_VALID_BUFFER:
+			{
+				uint64_t offset64;
+				/* vtx base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
+				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
+				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+					/* force size to size of the buffer */
+					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+						 size + offset, radeon_bo_size(reloc->robj));
+					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
+				}
+
+				offset64 = reloc->gpu_offset + offset;
+				ib[idx+1+(i*8)+0] = offset64;
+				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+						    (upper_32_bits(offset64) & 0xff);
+				break;
+			}
+			case SQ_TEX_VTX_INVALID_TEXTURE:
+			case SQ_TEX_VTX_INVALID_BUFFER:
+			default:
+				DRM_ERROR("bad SET_RESOURCE\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_ALU_CONST:
+		if (track->sq_config & DX9_CONSTS) {
+			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+			end_reg = 4 * pkt->count + start_reg - 4;
+			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
+			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
+			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
+				DRM_ERROR("bad SET_ALU_CONST\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_BOOL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+			DRM_ERROR("bad SET_BOOL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_LOOP_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+			DRM_ERROR("bad SET_LOOP_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CTL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+			DRM_ERROR("bad SET_CTL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_SAMPLER:
+		if (pkt->count % 3) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
+		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
+		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BASE_UPDATE:
+		/* RS780 and RS880 also need this */
+		if (p->family < CHIP_RS780) {
+			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+			return -EINVAL;
+		}
+		if (pkt->count != 1) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+			return -EINVAL;
+		}
+		if (idx_value > 3) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+			return -EINVAL;
+		}
+		{
+			u64 offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+				return -EINVAL;
+			}
+
+			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+				return -EINVAL;
+			}
+
+			offset = radeon_get_ib_value(p, idx+1) << 8;
+			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+					  offset, track->vgt_strmout_bo_offset[idx_value]);
+				return -EINVAL;
+			}
+
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_SURFACE_BASE_UPDATE:
+		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
+			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+			return -EINVAL;
+		}
+		/* Updating memory at DST_ADDRESS. */
+		if (idx_value & 0x1) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		/* Reading data from SRC_ADDRESS. */
+		if (((idx_value >> 1) & 0x3) == 2) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_MEM_WRITE:
+	{
+		u64 offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+			return -EINVAL;
+		}
+		offset = radeon_get_ib_value(p, idx+0);
+		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+		if (offset & 0x7) {
+			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+			return -EINVAL;
+		}
+		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+				  offset + 8, radeon_bo_size(reloc->robj));
+			return -EINVAL;
+		}
+		offset += reloc->gpu_offset;
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+		break;
+	}
+	case PACKET3_COPY_DW:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad COPY_DW (invalid count)\n");
+			return -EINVAL;
+		}
+		if (idx_value & 0x1) {
+			u64 offset;
+			/* SRC is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* SRC is a reg. */
+			reg = radeon_get_ib_value(p, idx+1) << 2;
+			if (!r600_is_safe_reg(p, reg, idx+1))
+				return -EINVAL;
+		}
+		if (idx_value & 0x2) {
+			u64 offset;
+			/* DST is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* DST is a reg. */
+			reg = radeon_get_ib_value(p, idx+3) << 2;
+			if (!r600_is_safe_reg(p, reg, idx+3))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r600_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r600_cs_track *track;
+	int r;
+
+	if (p->track == NULL) {
+		/* initialize tracker, we are in kms */
+		track = kzalloc(sizeof(*track), GFP_KERNEL);
+		if (track == NULL)
+			return -ENOMEM;
+		r600_cs_track_init(track);
+		if (p->rdev->family < CHIP_RV770) {
+			track->npipes = p->rdev->config.r600.tiling_npipes;
+			track->nbanks = p->rdev->config.r600.tiling_nbanks;
+			track->group_size = p->rdev->config.r600.tiling_group_size;
+		} else if (p->rdev->family <= CHIP_RV740) {
+			track->npipes = p->rdev->config.rv770.tiling_npipes;
+			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
+			track->group_size = p->rdev->config.rv770.tiling_group_size;
+		}
+		p->track = track;
+	}
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = r600_cs_parse_packet0(p, &pkt);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = r600_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			kfree(p->track);
+			p->track = NULL;
+			return -EINVAL;
+		}
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunk_ib->length_dw);
+#if 0
+	for (r = 0; r < p->ib.length_dw; r++) {
+		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	kfree(p->track);
+	p->track = NULL;
+	return 0;
+}
+
+/*
+ *  DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p:		parser structure holding parsing context.
+ * @cs_reloc:		reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_bo_list **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	unsigned idx;
+
+	*cs_reloc = NULL;
+	if (p->chunk_relocs == NULL) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	relocs_chunk = p->chunk_relocs;
+	idx = p->dma_reloc_idx;
+	if (idx >= p->nrelocs) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, p->nrelocs);
+		return -EINVAL;
+	}
+	*cs_reloc = &p->relocs[idx];
+	p->dma_reloc_idx++;
+	return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+	struct radeon_bo_list *src_reloc, *dst_reloc;
+	u32 header, cmd, count, tiled;
+	volatile u32 *ib = p->ib.ptr;
+	u32 idx, idx_value;
+	u64 src_offset, dst_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				p->idx += count + 5;
+			} else {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				p->idx += count + 3;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				idx_value = radeon_get_ib_value(p, idx + 2);
+				/* detile bit */
+				if (idx_value & (1 << 31)) {
+					/* tiled src, linear dst */
+					src_offset = radeon_get_ib_value(p, idx+1);
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+
+					dst_offset = radeon_get_ib_value(p, idx+5);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+					ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = radeon_get_ib_value(p, idx+5);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+					ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+				}
+				p->idx += 7;
+			} else {
+				if (p->family >= CHIP_RV770) {
+					src_offset = radeon_get_ib_value(p, idx+2);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+
+					ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+					ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+					p->idx += 5;
+				} else {
+					src_offset = radeon_get_ib_value(p, idx+2);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
+
+					ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+					ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16;
+					p->idx += 4;
+				}
+			}
+			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+				return -EINVAL;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			if (p->family < CHIP_RV770) {
+				DRM_ERROR("Constant Fill is 7xx only !\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			dst_offset = radeon_get_ib_value(p, idx+1);
+			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunk_ib->length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
new file mode 100644
index 0000000..fb65e6f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ */
+
+/**
+ * r600_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else
+		rptr = RREG32(DMA_RB_RPTR);
+
+	return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (r6xx+).
+ */
+void r600_dma_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	u32 rb_cntl, dma_cntl, ib_cntl;
+	u32 rb_bufsz;
+	int r;
+
+	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+	/* Set ring buffer size in dwords */
+	rb_bufsz = order_base_2(ring->ring_size / 4);
+	rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(DMA_RB_RPTR, 0);
+	WREG32(DMA_RB_WPTR, 0);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(DMA_RB_RPTR_ADDR_HI,
+	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+	WREG32(DMA_RB_RPTR_ADDR_LO,
+	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+	if (rdev->wb.enabled)
+		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+	/* enable DMA IBs */
+	ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+	ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+	WREG32(DMA_IB_CNTL, ib_cntl);
+
+	dma_cntl = RREG32(DMA_CNTL);
+	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_RV770)
+		WREG32(DMA_MODE, 1);
+
+	ring->wptr = 0;
+	WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+	ring->ready = true;
+
+	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+	r600_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & RADEON_RESET_DMA)) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	unsigned i;
+	int r;
+	unsigned index;
+	u32 tmp;
+	u64 gpu_addr;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		index = R600_WB_DMA_RING_TEST_OFFSET;
+	else
+		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+	gpu_addr = rdev->wb.gpu_addr + index;
+
+	tmp = 0xCAFEDEAD;
+	rdev->wb.wb[index/4] = cpu_to_le32(tmp);
+
+	r = radeon_ring_lock(rdev, ring, 4);
+	if (r) {
+		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+	radeon_ring_write(ring, lower_32_bits(gpu_addr));
+	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	return r;
+}
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, lower_32_bits(fence->seq));
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait)
+{
+	u64 addr = semaphore->gpu_addr;
+	u32 s = emit_wait ? 0 : 1;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+
+	return true;
+}
+
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	unsigned i;
+	unsigned index;
+	int r;
+	u32 tmp = 0;
+	u64 gpu_addr;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		index = R600_WB_DMA_RING_TEST_OFFSET;
+	else
+		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+	gpu_addr = rdev->wb.gpu_addr + index;
+
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+	ib.ptr[1] = lower_32_bits(gpu_addr);
+	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
+	ib.ptr[3] = 0xDEADBEEF;
+	ib.length_dw = 4;
+
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		return r;
+	}
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		return r;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		return -ETIMEDOUT;
+	}
+	r = 0;
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+	radeon_ib_free(rdev, &ib);
+	return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @resv: reservation object to sync to
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
+				   uint64_t src_offset, uint64_t dst_offset,
+				   unsigned num_gpu_pages,
+				   struct reservation_object *resv)
+{
+	struct radeon_fence *fence;
+	struct radeon_sync sync;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	radeon_sync_create(&sync);
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_rings(rdev, &sync, ring->idx);
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFE)
+			cur_size_in_dw = 0xFFFE;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+					 (upper_32_bits(src_offset) & 0xff)));
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_sync_free(rdev, &sync, fence);
+
+	return fence;
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
new file mode 100644
index 0000000..5e044c9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -0,0 +1,1371 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+#include "r600_dpm.h"
+#include "atom.h"
+
+const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
+{
+	R600_UTC_DFLT_00,
+	R600_UTC_DFLT_01,
+	R600_UTC_DFLT_02,
+	R600_UTC_DFLT_03,
+	R600_UTC_DFLT_04,
+	R600_UTC_DFLT_05,
+	R600_UTC_DFLT_06,
+	R600_UTC_DFLT_07,
+	R600_UTC_DFLT_08,
+	R600_UTC_DFLT_09,
+	R600_UTC_DFLT_10,
+	R600_UTC_DFLT_11,
+	R600_UTC_DFLT_12,
+	R600_UTC_DFLT_13,
+	R600_UTC_DFLT_14,
+};
+
+const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
+{
+	R600_DTC_DFLT_00,
+	R600_DTC_DFLT_01,
+	R600_DTC_DFLT_02,
+	R600_DTC_DFLT_03,
+	R600_DTC_DFLT_04,
+	R600_DTC_DFLT_05,
+	R600_DTC_DFLT_06,
+	R600_DTC_DFLT_07,
+	R600_DTC_DFLT_08,
+	R600_DTC_DFLT_09,
+	R600_DTC_DFLT_10,
+	R600_DTC_DFLT_11,
+	R600_DTC_DFLT_12,
+	R600_DTC_DFLT_13,
+	R600_DTC_DFLT_14,
+};
+
+void r600_dpm_print_class_info(u32 class, u32 class2)
+{
+	const char *s;
+
+	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+	default:
+		s = "none";
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		s = "battery";
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+		s = "balanced";
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		s = "performance";
+		break;
+	}
+	printk("\tui class: %s\n", s);
+
+	printk("\tinternal class:");
+	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
+	    (class2 == 0))
+		pr_cont(" none");
+	else {
+		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+			pr_cont(" boot");
+		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+			pr_cont(" thermal");
+		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+			pr_cont(" limited_pwr");
+		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
+			pr_cont(" rest");
+		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
+			pr_cont(" forced");
+		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+			pr_cont(" 3d_perf");
+		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
+			pr_cont(" ovrdrv");
+		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+			pr_cont(" uvd");
+		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
+			pr_cont(" 3d_low");
+		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+			pr_cont(" acpi");
+		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+			pr_cont(" uvd_hd2");
+		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+			pr_cont(" uvd_hd");
+		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+			pr_cont(" uvd_sd");
+		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+			pr_cont(" limited_pwr2");
+		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+			pr_cont(" ulv");
+		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+			pr_cont(" uvd_mvc");
+	}
+	pr_cont("\n");
+}
+
+void r600_dpm_print_cap_info(u32 caps)
+{
+	printk("\tcaps:");
+	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+		pr_cont(" single_disp");
+	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
+		pr_cont(" video");
+	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
+		pr_cont(" no_dc");
+	pr_cont("\n");
+}
+
+void r600_dpm_print_ps_status(struct radeon_device *rdev,
+			      struct radeon_ps *rps)
+{
+	printk("\tstatus:");
+	if (rps == rdev->pm.dpm.current_ps)
+		pr_cont(" c");
+	if (rps == rdev->pm.dpm.requested_ps)
+		pr_cont(" r");
+	if (rps == rdev->pm.dpm.boot_ps)
+		pr_cont(" b");
+	pr_cont("\n");
+}
+
+u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 vblank_in_pixels;
+	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+
+	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			radeon_crtc = to_radeon_crtc(crtc);
+			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+				vblank_in_pixels =
+					radeon_crtc->hw_mode.crtc_htotal *
+					(radeon_crtc->hw_mode.crtc_vblank_end -
+					 radeon_crtc->hw_mode.crtc_vdisplay +
+					 (radeon_crtc->v_border * 2));
+
+				vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
+				break;
+			}
+		}
+	}
+
+	return vblank_time_us;
+}
+
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 vrefresh = 0;
+
+	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			radeon_crtc = to_radeon_crtc(crtc);
+			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+				vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
+				break;
+			}
+		}
+	}
+	return vrefresh;
+}
+
+void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+			    u32 *p, u32 *u)
+{
+	u32 b_c = 0;
+	u32 i_c;
+	u32 tmp;
+
+	i_c = (i * r_c) / 100;
+	tmp = i_c >> p_b;
+
+	while (tmp) {
+		b_c++;
+		tmp >>= 1;
+	}
+
+	*u = (b_c + 1) / 2;
+	*p = i_c / (1 << (2 * (*u)));
+}
+
+int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+	u32 k, a, ah, al;
+	u32 t1;
+
+	if ((fl == 0) || (fh == 0) || (fl > fh))
+		return -EINVAL;
+
+	k = (100 * fh) / fl;
+	t1 = (t * (k - 100));
+	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+	a = (a + 5) / 10;
+	ah = ((a * t) + 5000) / 10000;
+	al = a - ah;
+
+	*th = t - ah;
+	*tl = t + al;
+
+	return 0;
+}
+
+void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
+{
+	int i;
+
+	if (enable) {
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
+	} else {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+
+		WREG32(CG_RLC_REQ_AND_RSP, 0x2);
+
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
+				break;
+			udelay(1);
+		}
+
+		WREG32(CG_RLC_REQ_AND_RSP, 0x0);
+
+		WREG32(GRBM_PWR_CNTL, 0x1);
+		RREG32(GRBM_PWR_CNTL);
+	}
+}
+
+void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	else
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+void r600_enable_acpi_pm(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+}
+
+bool r600_dynamicpm_enabled(struct radeon_device *rdev)
+{
+	if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
+		return true;
+	else
+		return false;
+}
+
+void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+}
+
+void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
+	else
+		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
+}
+
+void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
+	else
+		WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
+}
+
+void r600_wait_for_spll_change(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
+			break;
+		udelay(1);
+	}
+}
+
+void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
+{
+	WREG32(CG_BSP, BSP(p) | BSU(u));
+}
+
+void r600_set_at(struct radeon_device *rdev,
+		 u32 l_to_m, u32 m_to_h,
+		 u32 h_to_m, u32 m_to_l)
+{
+	WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
+	WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
+}
+
+void r600_set_tc(struct radeon_device *rdev,
+		 u32 index, u32 u_t, u32 d_t)
+{
+	WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
+}
+
+void r600_select_td(struct radeon_device *rdev,
+		    enum r600_td td)
+{
+	if (td == R600_TD_AUTO)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+	if (td == R600_TD_UP)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+	if (td == R600_TD_DOWN)
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
+{
+	WREG32(CG_FTV, vrv);
+}
+
+void r600_set_tpu(struct radeon_device *rdev, u32 u)
+{
+	WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
+}
+
+void r600_set_tpc(struct radeon_device *rdev, u32 c)
+{
+	WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
+}
+
+void r600_set_sstu(struct radeon_device *rdev, u32 u)
+{
+	WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
+}
+
+void r600_set_sst(struct radeon_device *rdev, u32 t)
+{
+	WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
+}
+
+void r600_set_git(struct radeon_device *rdev, u32 t)
+{
+	WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
+}
+
+void r600_set_fctu(struct radeon_device *rdev, u32 u)
+{
+	WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
+}
+
+void r600_set_fct(struct radeon_device *rdev, u32 t)
+{
+	WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
+}
+
+void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
+{
+	WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
+}
+
+void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
+{
+	WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
+}
+
+void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
+{
+	WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
+}
+
+void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
+{
+	WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
+}
+
+void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
+{
+	WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
+}
+
+void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
+{
+	WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
+}
+
+void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
+{
+	WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
+}
+
+void r600_engine_clock_entry_enable(struct radeon_device *rdev,
+				    u32 index, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
+			 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
+	else
+		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
+			 0, ~STEP_0_SPLL_ENTRY_VALID);
+}
+
+void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
+						   u32 index, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
+			 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
+	else
+		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
+			 0, ~STEP_0_SPLL_STEP_ENABLE);
+}
+
+void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
+						 u32 index, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
+			 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
+	else
+		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
+			 0, ~STEP_0_POST_DIV_EN);
+}
+
+void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
+					      u32 index, u32 divider)
+{
+	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
+		 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
+}
+
+void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
+						   u32 index, u32 divider)
+{
+	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
+		 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
+}
+
+void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
+						  u32 index, u32 divider)
+{
+	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
+		 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
+}
+
+void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
+					   u32 index, u32 step_time)
+{
+	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
+		 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
+}
+
+void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
+{
+	WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
+}
+
+void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
+{
+	WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
+}
+
+void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
+{
+	WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
+}
+
+void r600_voltage_control_enable_pins(struct radeon_device *rdev,
+				      u64 mask)
+{
+	WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
+	WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
+}
+
+
+void r600_voltage_control_program_voltages(struct radeon_device *rdev,
+					   enum r600_power_level index, u64 pins)
+{
+	u32 tmp, mask;
+	u32 ix = 3 - (3 & index);
+
+	WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
+
+	mask = 7 << (3 * ix);
+	tmp = RREG32(VID_UPPER_GPIO_CNTL);
+	tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
+	WREG32(VID_UPPER_GPIO_CNTL, tmp);
+}
+
+void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
+						    u64 mask)
+{
+	u32 gpio;
+
+	gpio = RREG32(GPIOPAD_MASK);
+	gpio &= ~mask;
+	WREG32(GPIOPAD_MASK, gpio);
+
+	gpio = RREG32(GPIOPAD_EN);
+	gpio &= ~mask;
+	WREG32(GPIOPAD_EN, gpio);
+
+	gpio = RREG32(GPIOPAD_A);
+	gpio &= ~mask;
+	WREG32(GPIOPAD_A, gpio);
+}
+
+void r600_power_level_enable(struct radeon_device *rdev,
+			     enum r600_power_level index, bool enable)
+{
+	u32 ix = 3 - (3 & index);
+
+	if (enable)
+		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
+			 ~CTXSW_FREQ_STATE_ENABLE);
+	else
+		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
+			 ~CTXSW_FREQ_STATE_ENABLE);
+}
+
+void r600_power_level_set_voltage_index(struct radeon_device *rdev,
+					enum r600_power_level index, u32 voltage_index)
+{
+	u32 ix = 3 - (3 & index);
+
+	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
+		 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
+}
+
+void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
+					  enum r600_power_level index, u32 mem_clock_index)
+{
+	u32 ix = 3 - (3 & index);
+
+	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
+		 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
+}
+
+void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
+					  enum r600_power_level index, u32 eng_clock_index)
+{
+	u32 ix = 3 - (3 & index);
+
+	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
+		 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
+}
+
+void r600_power_level_set_watermark_id(struct radeon_device *rdev,
+				       enum r600_power_level index,
+				       enum r600_display_watermark watermark_id)
+{
+	u32 ix = 3 - (3 & index);
+	u32 tmp = 0;
+
+	if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
+		tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
+	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
+}
+
+void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
+				    enum r600_power_level index, bool compatible)
+{
+	u32 ix = 3 - (3 & index);
+	u32 tmp = 0;
+
+	if (compatible)
+		tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
+	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
+}
+
+enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
+	tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
+	return tmp;
+}
+
+enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
+	tmp >>= TARGET_PROFILE_INDEX_SHIFT;
+	return tmp;
+}
+
+void r600_power_level_set_enter_index(struct radeon_device *rdev,
+				      enum r600_power_level index)
+{
+	WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
+		 ~DYN_PWR_ENTER_INDEX_MASK);
+}
+
+void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
+				       enum r600_power_level index)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (r600_power_level_get_target_index(rdev) != index)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (r600_power_level_get_current_index(rdev) != index)
+			break;
+		udelay(1);
+	}
+}
+
+void r600_wait_for_power_level(struct radeon_device *rdev,
+			       enum r600_power_level index)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (r600_power_level_get_target_index(rdev) == index)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (r600_power_level_get_current_index(rdev) == index)
+			break;
+		udelay(1);
+	}
+}
+
+void r600_start_dpm(struct radeon_device *rdev)
+{
+	r600_enable_sclk_control(rdev, false);
+	r600_enable_mclk_control(rdev, false);
+
+	r600_dynamicpm_enable(rdev, true);
+
+	radeon_wait_for_vblank(rdev, 0);
+	radeon_wait_for_vblank(rdev, 1);
+
+	r600_enable_spll_bypass(rdev, true);
+	r600_wait_for_spll_change(rdev);
+	r600_enable_spll_bypass(rdev, false);
+	r600_wait_for_spll_change(rdev);
+
+	r600_enable_spll_bypass(rdev, true);
+	r600_wait_for_spll_change(rdev);
+	r600_enable_spll_bypass(rdev, false);
+	r600_wait_for_spll_change(rdev);
+
+	r600_enable_sclk_control(rdev, true);
+	r600_enable_mclk_control(rdev, true);
+}
+
+void r600_stop_dpm(struct radeon_device *rdev)
+{
+	r600_dynamicpm_enable(rdev, false);
+}
+
+int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	return 0;
+}
+
+void r600_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+
+}
+
+bool r600_is_uvd_state(u32 class, u32 class2)
+{
+	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+		return true;
+	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+		return true;
+	return false;
+}
+
+static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
+					      int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+	rdev->pm.dpm.thermal.min_temp = low_temp;
+	rdev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
+{
+	switch (sensor) {
+	case THERMAL_TYPE_RV6XX:
+	case THERMAL_TYPE_RV770:
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_SUMO:
+	case THERMAL_TYPE_NI:
+	case THERMAL_TYPE_SI:
+	case THERMAL_TYPE_CI:
+	case THERMAL_TYPE_KV:
+		return true;
+	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		return false; /* need special handling */
+	case THERMAL_TYPE_NONE:
+	case THERMAL_TYPE_EXTERNAL:
+	case THERMAL_TYPE_EXTERNAL_GPIO:
+	default:
+		return false;
+	}
+}
+
+int r600_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+		if (ret)
+			return ret;
+		rdev->irq.dpm_thermal = true;
+		radeon_irq_set(rdev);
+	}
+
+	return 0;
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+	struct _ATOM_PPLIB_FANTABLE fan;
+	struct _ATOM_PPLIB_FANTABLE2 fan2;
+	struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
+					    ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
+{
+	u32 size = atom_table->ucNumEntries *
+		sizeof(struct radeon_clock_voltage_dependency_entry);
+	int i;
+	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
+
+	radeon_table->entries = kzalloc(size, GFP_KERNEL);
+	if (!radeon_table->entries)
+		return -ENOMEM;
+
+	entry = &atom_table->entries[0];
+	for (i = 0; i < atom_table->ucNumEntries; i++) {
+		radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+			(entry->ucClockHigh << 16);
+		radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
+	}
+	radeon_table->count = atom_table->ucNumEntries;
+
+	return 0;
+}
+
+int r600_get_platform_caps(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+	rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+	rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+
+	return 0;
+}
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+
+int r600_parse_extended_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	union power_info *power_info;
+	union fan_info *fan_info;
+	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	int ret, i;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	/* fan table */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+		if (power_info->pplib3.usFanTableOffset) {
+			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
+						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
+			rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
+			rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
+			rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
+			rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
+			rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
+			rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
+			rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
+			if (fan_info->fan.ucFanTableFormat >= 2)
+				rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
+			else
+				rdev->pm.dpm.fan.t_max = 10900;
+			rdev->pm.dpm.fan.cycle_delay = 100000;
+			if (fan_info->fan.ucFanTableFormat >= 3) {
+				rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+				rdev->pm.dpm.fan.default_max_fan_pwm =
+					le16_to_cpu(fan_info->fan3.usFanPWMMax);
+				rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+				rdev->pm.dpm.fan.fan_output_sensitivity =
+					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+			}
+			rdev->pm.dpm.fan.ucode_fan_control = true;
+		}
+	}
+
+	/* clock dependancy tables, shedding tables */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
+		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
+			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							       dep_table);
+			if (ret)
+				return ret;
+		}
+		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
+			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							       dep_table);
+			if (ret) {
+				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
+				return ret;
+			}
+		}
+		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
+			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							       dep_table);
+			if (ret) {
+				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
+				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
+				return ret;
+			}
+		}
+		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+							       dep_table);
+			if (ret) {
+				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
+				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
+				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+				return ret;
+			}
+		}
+		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
+			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
+				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
+			if (clk_v->ucNumEntries) {
+				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
+					le16_to_cpu(clk_v->entries[0].usSclkLow) |
+					(clk_v->entries[0].ucSclkHigh << 16);
+				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
+					le16_to_cpu(clk_v->entries[0].usMclkLow) |
+					(clk_v->entries[0].ucMclkHigh << 16);
+				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
+					le16_to_cpu(clk_v->entries[0].usVddc);
+				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
+					le16_to_cpu(clk_v->entries[0].usVddci);
+			}
+		}
+		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
+			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
+				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
+
+			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
+				kcalloc(psl->ucNumEntries,
+					sizeof(struct radeon_phase_shedding_limits_entry),
+					GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+
+			entry = &psl->entries[0];
+			for (i = 0; i < psl->ucNumEntries; i++) {
+				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
+					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
+				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
+					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
+				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
+			}
+			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
+				psl->ucNumEntries;
+		}
+	}
+
+	/* cac data */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
+		rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
+		rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
+		rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
+		rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
+		if (rdev->pm.dpm.tdp_od_limit)
+			rdev->pm.dpm.power_control = true;
+		else
+			rdev->pm.dpm.power_control = false;
+		rdev->pm.dpm.tdp_adjustment = 0;
+		rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
+		rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
+		rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
+		if (power_info->pplib5.usCACLeakageTableOffset) {
+			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
+				(ATOM_PPLIB_CAC_Leakage_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+			ATOM_PPLIB_CAC_Leakage_Record *entry;
+			u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
+			rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+			entry = &cac_table->entries[0];
+			for (i = 0; i < cac_table->ucNumEntries; i++) {
+				if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+						le16_to_cpu(entry->usVddc1);
+					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+						le16_to_cpu(entry->usVddc2);
+					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+						le16_to_cpu(entry->usVddc3);
+				} else {
+					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+						le16_to_cpu(entry->usVddc);
+					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+						le32_to_cpu(entry->ulLeakageValue);
+				}
+				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
+			}
+			rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
+		}
+	}
+
+	/* ext tables */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+			ext_hdr->usVCETableOffset) {
+			VCEClockInfoArray *array = (VCEClockInfoArray *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
+			ATOM_PPLIB_VCE_State_Table *states =
+				(ATOM_PPLIB_VCE_State_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
+				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
+			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+			ATOM_PPLIB_VCE_State_Record *state_entry;
+			VCEClockInfo *vce_clk;
+			u32 size = limits->numEntries *
+				sizeof(struct radeon_vce_clock_voltage_dependency_entry);
+			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			state_entry = &states->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				vce_clk = (VCEClockInfo *)
+					((u8 *)&array->entries[0] +
+					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+			}
+			for (i = 0; i < states->numEntries; i++) {
+				if (i >= RADEON_MAX_VCE_LEVELS)
+					break;
+				vce_clk = (VCEClockInfo *)
+					((u8 *)&array->entries[0] +
+					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+				rdev->pm.dpm.vce_states[i].evclk =
+					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+				rdev->pm.dpm.vce_states[i].ecclk =
+					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+				rdev->pm.dpm.vce_states[i].clk_idx =
+					state_entry->ucClockInfoIndex & 0x3f;
+				rdev->pm.dpm.vce_states[i].pstate =
+					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
+				state_entry = (ATOM_PPLIB_VCE_State_Record *)
+					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+			ext_hdr->usUVDTableOffset) {
+			UVDClockInfoArray *array = (UVDClockInfoArray *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+			u32 size = limits->numEntries *
+				sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
+			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				UVDClockInfo *uvd_clk = (UVDClockInfo *)
+					((u8 *)&array->entries[0] +
+					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+			ext_hdr->usSAMUTableOffset) {
+			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+			u32 size = limits->numEntries *
+				sizeof(struct radeon_clock_voltage_dependency_entry);
+			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
+		    ext_hdr->usPPMTableOffset) {
+			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usPPMTableOffset));
+			rdev->pm.dpm.dyn_state.ppm_table =
+				kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.ppm_table) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
+			rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
+				le16_to_cpu(ppm->usCpuCoreNumber);
+			rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
+				le32_to_cpu(ppm->ulPlatformTDP);
+			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
+				le32_to_cpu(ppm->ulSmallACPlatformTDP);
+			rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
+				le32_to_cpu(ppm->ulPlatformTDC);
+			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
+				le32_to_cpu(ppm->ulSmallACPlatformTDC);
+			rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
+				le32_to_cpu(ppm->ulApuTDP);
+			rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
+				le32_to_cpu(ppm->ulDGpuTDP);
+			rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
+				le32_to_cpu(ppm->ulDGpuUlvPower);
+			rdev->pm.dpm.dyn_state.ppm_table->tj_max =
+				le32_to_cpu(ppm->ulTjmax);
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+			ext_hdr->usACPTableOffset) {
+			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+			u32 size = limits->numEntries *
+				sizeof(struct radeon_clock_voltage_dependency_entry);
+			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+			ext_hdr->usPowerTuneTableOffset) {
+			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+			ATOM_PowerTune_Table *pt;
+			rdev->pm.dpm.dyn_state.cac_tdp_table =
+				kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
+			if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
+				r600_free_extended_power_table(rdev);
+				return -ENOMEM;
+			}
+			if (rev > 0) {
+				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+					le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
+				pt = &ppt->power_tune_table;
+			} else {
+				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+				pt = &ppt->power_tune_table;
+			}
+			rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+			rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+				le16_to_cpu(pt->usConfigurableTDP);
+			rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+			rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+				le16_to_cpu(pt->usBatteryPowerLimit);
+			rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+				le16_to_cpu(pt->usSmallPowerLimit);
+			rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+				le16_to_cpu(pt->usLowCACLeakage);
+			rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+				le16_to_cpu(pt->usHighCACLeakage);
+		}
+	}
+
+	return 0;
+}
+
+void r600_free_extended_power_table(struct radeon_device *rdev)
+{
+	struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
+
+	kfree(dyn_state->vddc_dependency_on_sclk.entries);
+	kfree(dyn_state->vddci_dependency_on_mclk.entries);
+	kfree(dyn_state->vddc_dependency_on_mclk.entries);
+	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+	kfree(dyn_state->cac_leakage_table.entries);
+	kfree(dyn_state->phase_shedding_limits_table.entries);
+	kfree(dyn_state->ppm_table);
+	kfree(dyn_state->cac_tdp_table);
+	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
+}
+
+enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
+					       u32 sys_mask,
+					       enum radeon_pcie_gen asic_gen,
+					       enum radeon_pcie_gen default_gen)
+{
+	switch (asic_gen) {
+	case RADEON_PCIE_GEN1:
+		return RADEON_PCIE_GEN1;
+	case RADEON_PCIE_GEN2:
+		return RADEON_PCIE_GEN2;
+	case RADEON_PCIE_GEN3:
+		return RADEON_PCIE_GEN3;
+	default:
+		if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
+			return RADEON_PCIE_GEN3;
+		else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
+			return RADEON_PCIE_GEN2;
+		else
+			return RADEON_PCIE_GEN1;
+	}
+	return RADEON_PCIE_GEN1;
+}
+
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+			       u16 asic_lanes,
+			       u16 default_lanes)
+{
+	switch (asic_lanes) {
+	case 0:
+	default:
+		return default_lanes;
+	case 1:
+		return 1;
+	case 2:
+		return 2;
+	case 4:
+		return 4;
+	case 8:
+		return 8;
+	case 12:
+		return 12;
+	case 16:
+		return 16;
+	}
+}
+
+u8 r600_encode_pci_lane_width(u32 lanes)
+{
+	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
+
+	if (lanes > 16)
+		return 0;
+
+	return encoded_lanes[lanes];
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
new file mode 100644
index 0000000..bd499d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __R600_DPM_H__
+#define __R600_DPM_H__
+
+#define R600_ASI_DFLT                                10000
+#define R600_BSP_DFLT                                0x41EB
+#define R600_BSU_DFLT                                0x2
+#define R600_AH_DFLT                                 5
+#define R600_RLP_DFLT                                25
+#define R600_RMP_DFLT                                65
+#define R600_LHP_DFLT                                40
+#define R600_LMP_DFLT                                15
+#define R600_TD_DFLT                                 0
+#define R600_UTC_DFLT_00                             0x24
+#define R600_UTC_DFLT_01                             0x22
+#define R600_UTC_DFLT_02                             0x22
+#define R600_UTC_DFLT_03                             0x22
+#define R600_UTC_DFLT_04                             0x22
+#define R600_UTC_DFLT_05                             0x22
+#define R600_UTC_DFLT_06                             0x22
+#define R600_UTC_DFLT_07                             0x22
+#define R600_UTC_DFLT_08                             0x22
+#define R600_UTC_DFLT_09                             0x22
+#define R600_UTC_DFLT_10                             0x22
+#define R600_UTC_DFLT_11                             0x22
+#define R600_UTC_DFLT_12                             0x22
+#define R600_UTC_DFLT_13                             0x22
+#define R600_UTC_DFLT_14                             0x22
+#define R600_DTC_DFLT_00                             0x24
+#define R600_DTC_DFLT_01                             0x22
+#define R600_DTC_DFLT_02                             0x22
+#define R600_DTC_DFLT_03                             0x22
+#define R600_DTC_DFLT_04                             0x22
+#define R600_DTC_DFLT_05                             0x22
+#define R600_DTC_DFLT_06                             0x22
+#define R600_DTC_DFLT_07                             0x22
+#define R600_DTC_DFLT_08                             0x22
+#define R600_DTC_DFLT_09                             0x22
+#define R600_DTC_DFLT_10                             0x22
+#define R600_DTC_DFLT_11                             0x22
+#define R600_DTC_DFLT_12                             0x22
+#define R600_DTC_DFLT_13                             0x22
+#define R600_DTC_DFLT_14                             0x22
+#define R600_VRC_DFLT                                0x0000C003
+#define R600_VOLTAGERESPONSETIME_DFLT                1000
+#define R600_BACKBIASRESPONSETIME_DFLT               1000
+#define R600_VRU_DFLT                                0x3
+#define R600_SPLLSTEPTIME_DFLT                       0x1000
+#define R600_SPLLSTEPUNIT_DFLT                       0x3
+#define R600_TPU_DFLT                                0
+#define R600_TPC_DFLT                                0x200
+#define R600_SSTU_DFLT                               0
+#define R600_SST_DFLT                                0x00C8
+#define R600_GICST_DFLT                              0x200
+#define R600_FCT_DFLT                                0x0400
+#define R600_FCTU_DFLT                               0
+#define R600_CTXCGTT3DRPHC_DFLT                      0x20
+#define R600_CTXCGTT3DRSDC_DFLT                      0x40
+#define R600_VDDC3DOORPHC_DFLT                       0x100
+#define R600_VDDC3DOORSDC_DFLT                       0x7
+#define R600_VDDC3DOORSU_DFLT                        0
+#define R600_MPLLLOCKTIME_DFLT                       100
+#define R600_MPLLRESETTIME_DFLT                      150
+#define R600_VCOSTEPPCT_DFLT                          20
+#define R600_ENDINGVCOSTEPPCT_DFLT                    5
+#define R600_REFERENCEDIVIDER_DFLT                    4
+
+#define R600_PM_NUMBER_OF_TC 15
+#define R600_PM_NUMBER_OF_SCLKS 20
+#define R600_PM_NUMBER_OF_MCLKS 4
+#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum r600_power_level {
+	R600_POWER_LEVEL_LOW = 0,
+	R600_POWER_LEVEL_MEDIUM = 1,
+	R600_POWER_LEVEL_HIGH = 2,
+	R600_POWER_LEVEL_CTXSW = 3,
+};
+
+enum r600_td {
+	R600_TD_AUTO,
+	R600_TD_UP,
+	R600_TD_DOWN,
+};
+
+enum r600_display_watermark {
+	R600_DISPLAY_WATERMARK_LOW = 0,
+	R600_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum r600_display_gap
+{
+    R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    R600_PM_DISPLAY_GAP_VBLANK       = 1,
+    R600_PM_DISPLAY_GAP_WATERMARK    = 2,
+    R600_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+
+extern const u32 r600_utc[R600_PM_NUMBER_OF_TC];
+extern const u32 r600_dtc[R600_PM_NUMBER_OF_TC];
+
+void r600_dpm_print_class_info(u32 class, u32 class2);
+void r600_dpm_print_cap_info(u32 caps);
+void r600_dpm_print_ps_status(struct radeon_device *rdev,
+			      struct radeon_ps *rps);
+u32 r600_dpm_get_vblank_time(struct radeon_device *rdev);
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev);
+bool r600_is_uvd_state(u32 class, u32 class2);
+void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+			    u32 *p, u32 *u);
+int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
+void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable);
+void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable);
+void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable);
+void r600_enable_acpi_pm(struct radeon_device *rdev);
+void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable);
+bool r600_dynamicpm_enabled(struct radeon_device *rdev);
+void r600_enable_sclk_control(struct radeon_device *rdev, bool enable);
+void r600_enable_mclk_control(struct radeon_device *rdev, bool enable);
+void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable);
+void r600_wait_for_spll_change(struct radeon_device *rdev);
+void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p);
+void r600_set_at(struct radeon_device *rdev,
+		 u32 l_to_m, u32 m_to_h,
+		 u32 h_to_m, u32 m_to_l);
+void r600_set_tc(struct radeon_device *rdev, u32 index, u32 u_t, u32 d_t);
+void r600_select_td(struct radeon_device *rdev, enum r600_td td);
+void r600_set_vrc(struct radeon_device *rdev, u32 vrv);
+void r600_set_tpu(struct radeon_device *rdev, u32 u);
+void r600_set_tpc(struct radeon_device *rdev, u32 c);
+void r600_set_sstu(struct radeon_device *rdev, u32 u);
+void r600_set_sst(struct radeon_device *rdev, u32 t);
+void r600_set_git(struct radeon_device *rdev, u32 t);
+void r600_set_fctu(struct radeon_device *rdev, u32 u);
+void r600_set_fct(struct radeon_device *rdev, u32 t);
+void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p);
+void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s);
+void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u);
+void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p);
+void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s);
+void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time);
+void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time);
+void r600_engine_clock_entry_enable(struct radeon_device *rdev,
+				    u32 index, bool enable);
+void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
+						   u32 index, bool enable);
+void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
+						 u32 index, bool enable);
+void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
+					      u32 index, u32 divider);
+void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
+						   u32 index, u32 divider);
+void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
+						  u32 index, u32 divider);
+void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
+					   u32 index, u32 step_time);
+void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u);
+void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u);
+void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt);
+void r600_voltage_control_enable_pins(struct radeon_device *rdev,
+				      u64 mask);
+void r600_voltage_control_program_voltages(struct radeon_device *rdev,
+					   enum r600_power_level index, u64 pins);
+void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
+						    u64 mask);
+void r600_power_level_enable(struct radeon_device *rdev,
+			     enum r600_power_level index, bool enable);
+void r600_power_level_set_voltage_index(struct radeon_device *rdev,
+					enum r600_power_level index, u32 voltage_index);
+void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
+					  enum r600_power_level index, u32 mem_clock_index);
+void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
+					  enum r600_power_level index, u32 eng_clock_index);
+void r600_power_level_set_watermark_id(struct radeon_device *rdev,
+				       enum r600_power_level index,
+				       enum r600_display_watermark watermark_id);
+void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
+				    enum r600_power_level index, bool compatible);
+enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev);
+enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev);
+void r600_power_level_set_enter_index(struct radeon_device *rdev,
+				      enum r600_power_level index);
+void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
+				       enum r600_power_level index);
+void r600_wait_for_power_level(struct radeon_device *rdev,
+			       enum r600_power_level index);
+void r600_start_dpm(struct radeon_device *rdev);
+void r600_stop_dpm(struct radeon_device *rdev);
+
+bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
+
+int r600_get_platform_caps(struct radeon_device *rdev);
+
+int r600_parse_extended_power_table(struct radeon_device *rdev);
+void r600_free_extended_power_table(struct radeon_device *rdev);
+
+enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
+					       u32 sys_mask,
+					       enum radeon_pcie_gen asic_gen,
+					       enum radeon_pcie_gen default_gen);
+
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+			       u16 asic_lanes,
+			       u16 default_lanes);
+u8 r600_encode_pci_lane_width(u32 lanes);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 0000000..ab32830
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include <linux/hdmi.h>
+#include <linux/gcd.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "r600d.h"
+#include "atom.h"
+
+/*
+ * HDMI color format
+ */
+enum r600_hdmi_color_format {
+	RGB = 0,
+	YCC_422 = 1,
+	YCC_444 = 2
+};
+
+/*
+ * IEC60958 status bits
+ */
+enum r600_hdmi_iec_status_bits {
+	AUDIO_STATUS_DIG_ENABLE   = 0x01,
+	AUDIO_STATUS_V            = 0x02,
+	AUDIO_STATUS_VCFG         = 0x04,
+	AUDIO_STATUS_EMPHASIS     = 0x08,
+	AUDIO_STATUS_COPYRIGHT    = 0x10,
+	AUDIO_STATUS_NONAUDIO     = 0x20,
+	AUDIO_STATUS_PROFESSIONAL = 0x40,
+	AUDIO_STATUS_LEVEL        = 0x80
+};
+
+static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
+{
+	struct r600_audio_pin status = {};
+	uint32_t value;
+
+	value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+
+	/* number of channels */
+	status.channels = (value & 0x7) + 1;
+
+	/* bits per sample */
+	switch ((value & 0xF0) >> 4) {
+	case 0x0:
+		status.bits_per_sample = 8;
+		break;
+	case 0x1:
+		status.bits_per_sample = 16;
+		break;
+	case 0x2:
+		status.bits_per_sample = 20;
+		break;
+	case 0x3:
+		status.bits_per_sample = 24;
+		break;
+	case 0x4:
+		status.bits_per_sample = 32;
+		break;
+	default:
+		dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+			(int)value);
+		status.bits_per_sample = 16;
+	}
+
+	/* current sampling rate in HZ */
+	if (value & 0x4000)
+		status.rate = 44100;
+	else
+		status.rate = 48000;
+	status.rate *= ((value >> 11) & 0x7) + 1;
+	status.rate /= ((value >> 8) & 0x7) + 1;
+
+	value = RREG32(R600_AUDIO_STATUS_BITS);
+
+	/* iec 60958 status bits */
+	status.status_bits = value & 0xff;
+
+	/* iec 60958 category code */
+	status.category_code = (value >> 8) & 0xff;
+
+	return status;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+void r600_audio_update_hdmi(struct work_struct *work)
+{
+	struct radeon_device *rdev = container_of(work, struct radeon_device,
+						  audio_work);
+	struct drm_device *dev = rdev->ddev;
+	struct r600_audio_pin audio_status = r600_audio_status(rdev);
+	struct drm_encoder *encoder;
+	bool changed = false;
+
+	if (rdev->audio.pin[0].channels != audio_status.channels ||
+	    rdev->audio.pin[0].rate != audio_status.rate ||
+	    rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
+	    rdev->audio.pin[0].status_bits != audio_status.status_bits ||
+	    rdev->audio.pin[0].category_code != audio_status.category_code) {
+		rdev->audio.pin[0] = audio_status;
+		changed = true;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (!radeon_encoder_is_digital(encoder))
+			continue;
+		if (changed || r600_hdmi_buffer_status_changed(encoder))
+			r600_hdmi_update_audio_settings(encoder);
+	}
+}
+
+/* enable the audio stream */
+void r600_audio_enable(struct radeon_device *rdev,
+		       struct r600_audio_pin *pin,
+		       u8 enable_mask)
+{
+	u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
+
+	if (!pin)
+		return;
+
+	if (enable_mask) {
+		tmp |= AUDIO_ENABLED;
+		if (enable_mask & 1)
+			tmp |= PIN0_AUDIO_ENABLED;
+		if (enable_mask & 2)
+			tmp |= PIN1_AUDIO_ENABLED;
+		if (enable_mask & 4)
+			tmp |= PIN2_AUDIO_ENABLED;
+		if (enable_mask & 8)
+			tmp |= PIN3_AUDIO_ENABLED;
+	} else {
+		tmp &= ~(AUDIO_ENABLED |
+			 PIN0_AUDIO_ENABLED |
+			 PIN1_AUDIO_ENABLED |
+			 PIN2_AUDIO_ENABLED |
+			 PIN3_AUDIO_ENABLED);
+	}
+
+	WREG32(AZ_HOT_PLUG_CONTROL, tmp);
+}
+
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
+{
+	/* only one pin on 6xx-NI */
+	return &rdev->audio.pin[0];
+}
+
+void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+	const struct radeon_hdmi_acr *acr)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* DCE 3.0 uses register that's normally for CRC_CONTROL */
+	uint32_t acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
+				       HDMI0_ACR_PACKET_CONTROL;
+	WREG32_P(acr_ctl + offset,
+		HDMI0_ACR_SOURCE |		/* select SW CTS value */
+		HDMI0_ACR_AUTO_SEND,	/* allow hw to sent ACR packets when required */
+		~(HDMI0_ACR_SOURCE |
+		HDMI0_ACR_AUTO_SEND));
+
+	WREG32_P(HDMI0_ACR_32_0 + offset,
+		HDMI0_ACR_CTS_32(acr->cts_32khz),
+		~HDMI0_ACR_CTS_32_MASK);
+	WREG32_P(HDMI0_ACR_32_1 + offset,
+		HDMI0_ACR_N_32(acr->n_32khz),
+		~HDMI0_ACR_N_32_MASK);
+
+	WREG32_P(HDMI0_ACR_44_0 + offset,
+		HDMI0_ACR_CTS_44(acr->cts_44_1khz),
+		~HDMI0_ACR_CTS_44_MASK);
+	WREG32_P(HDMI0_ACR_44_1 + offset,
+		HDMI0_ACR_N_44(acr->n_44_1khz),
+		~HDMI0_ACR_N_44_MASK);
+
+	WREG32_P(HDMI0_ACR_48_0 + offset,
+		HDMI0_ACR_CTS_48(acr->cts_48khz),
+		~HDMI0_ACR_CTS_48_MASK);
+	WREG32_P(HDMI0_ACR_48_1 + offset,
+		HDMI0_ACR_N_48(acr->n_48khz),
+		~HDMI0_ACR_N_48_MASK);
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
+			 unsigned char *buffer, size_t size)
+{
+	uint8_t *frame = buffer + 3;
+
+	WREG32(HDMI0_AVI_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(HDMI0_AVI_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+	WREG32(HDMI0_AVI_INFO2 + offset,
+		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+	WREG32(HDMI0_AVI_INFO3 + offset,
+		frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+
+	WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
+		  HDMI0_AVI_INFO_LINE(2));	/* anything other than 0 */
+
+	WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+		  HDMI0_AVI_INFO_SEND |	/* enable AVI info frames */
+		  HDMI0_AVI_INFO_CONT);	/* send AVI info frames every frame/field */
+
+}
+
+/*
+ * build a Audio Info Frame
+ */
+static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
+					     const void *buffer, size_t size)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+	const u8 *frame = buffer + 3;
+
+	WREG32(HDMI0_AUDIO_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(HDMI0_AUDIO_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
+}
+
+/*
+ * test if audio buffer is filled enough to start playing
+ */
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
+}
+
+/*
+ * have buffer status changed since last call?
+ */
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	int status, result;
+
+	if (!dig->afmt || !dig->afmt->enabled)
+		return 0;
+
+	status = r600_hdmi_is_audio_buffer_filled(encoder);
+	result = dig->afmt->last_buffer_filled_status != status;
+	dig->afmt->last_buffer_filled_status = status;
+
+	return result;
+}
+
+/*
+ * write the audio workaround status to the hardware
+ */
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+	bool hdmi_audio_workaround = false; /* FIXME */
+	u32 value;
+
+	if (!hdmi_audio_workaround ||
+	    r600_hdmi_is_audio_buffer_filled(encoder))
+		value = 0; /* disable workaround */
+	else
+		value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+	WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		 value, ~HDMI0_AUDIO_TEST_EN);
+}
+
+void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
+			     struct radeon_crtc *crtc, unsigned int clock)
+{
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_atom_dig *dig;
+
+	if (!crtc)
+		return;
+
+	radeon_encoder = to_radeon_encoder(crtc->encoder);
+	dig = radeon_encoder->enc_priv;
+
+	if (!dig)
+		return;
+
+	if (dig->dig_encoder == 0) {
+		WREG32(DCCG_AUDIO_DTO0_PHASE, 24000 * 100);
+		WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+		WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+	} else {
+		WREG32(DCCG_AUDIO_DTO1_PHASE, 24000 * 100);
+		WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+		WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+	}
+}
+
+void r600_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
+		HDMI0_NULL_SEND |	/* send null packets when required */
+		HDMI0_GC_SEND |		/* send general control packets */
+		HDMI0_GC_CONT);		/* send general control packets every frame */
+}
+
+void r600_set_audio_packet(struct drm_encoder *encoder, u32 offset)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		HDMI0_AUDIO_SAMPLE_SEND |			/* send audio packets */
+		HDMI0_AUDIO_DELAY_EN(1) |			/* default audio delay */
+		HDMI0_AUDIO_PACKETS_PER_LINE(3) |	/* should be suffient for all audio modes and small enough for all hblanks */
+		HDMI0_60958_CS_UPDATE,				/* allow 60958 channel status fields to be updated */
+		~(HDMI0_AUDIO_SAMPLE_SEND |
+		HDMI0_AUDIO_DELAY_EN_MASK |
+		HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
+		HDMI0_60958_CS_UPDATE));
+
+	WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+		HDMI0_AUDIO_INFO_SEND |		/* enable audio info frames (frames won't be set until audio is enabled) */
+		HDMI0_AUDIO_INFO_UPDATE);	/* required for audio info values to be updated */
+
+	WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset,
+		HDMI0_AUDIO_INFO_LINE(2),	/* anything other than 0 */
+		~HDMI0_AUDIO_INFO_LINE_MASK);
+
+	WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset,
+		~(HDMI0_GENERIC0_SEND |
+		HDMI0_GENERIC0_CONT |
+		HDMI0_GENERIC0_UPDATE |
+		HDMI0_GENERIC1_SEND |
+		HDMI0_GENERIC1_CONT |
+		HDMI0_GENERIC0_LINE_MASK |
+		HDMI0_GENERIC1_LINE_MASK));
+
+	WREG32_P(HDMI0_60958_0 + offset,
+		HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
+		~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
+		HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
+
+	WREG32_P(HDMI0_60958_1 + offset,
+		HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
+		~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
+}
+
+void r600_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (mute)
+		WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE);
+	else
+		WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE);
+}
+
+/**
+ * r600_hdmi_update_audio_settings - Update audio infoframe
+ *
+ * @encoder: drm encoder
+ *
+ * Gets info about current audio stream and updates audio infoframe.
+ */
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct r600_audio_pin audio = r600_audio_status(rdev);
+	uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
+	struct hdmi_audio_infoframe frame;
+	uint32_t offset;
+	uint32_t value;
+	ssize_t err;
+
+	if (!dig->afmt || !dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
+		 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
+		  audio.channels, audio.rate, audio.bits_per_sample);
+	DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
+		  (int)audio.status_bits, (int)audio.category_code);
+
+	err = hdmi_audio_infoframe_init(&frame);
+	if (err < 0) {
+		DRM_ERROR("failed to setup audio infoframe\n");
+		return;
+	}
+
+	frame.channels = audio.channels;
+
+	err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		DRM_ERROR("failed to pack audio infoframe\n");
+		return;
+	}
+
+	value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset);
+	if (value & HDMI0_AUDIO_TEST_EN)
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		       value & ~HDMI0_AUDIO_TEST_EN);
+
+	WREG32_OR(HDMI0_CONTROL + offset,
+		  HDMI0_ERROR_ACK);
+
+	WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset,
+		   ~HDMI0_AUDIO_INFO_SOURCE);
+
+	r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
+
+	WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+		  HDMI0_AUDIO_INFO_CONT |
+		  HDMI0_AUDIO_INFO_UPDATE);
+}
+
+/*
+ * enable the HDMI engine
+ */
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u32 hdmi = HDMI0_ERROR_ACK;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* Older chipsets require setting HDMI and routing manually */
+	if (!ASIC_IS_DCE3(rdev)) {
+		if (enable)
+			hdmi |= HDMI0_ENABLE;
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+			if (enable) {
+				WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+			} else {
+				WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			if (enable) {
+				WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+			} else {
+				WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			if (enable) {
+				WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+			} else {
+				WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+			if (enable)
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+			break;
+		default:
+			dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+				radeon_encoder->encoder_id);
+			break;
+		}
+		WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
+	}
+
+	if (rdev->irq.installed) {
+		/* if irq is available use it */
+		/* XXX: shouldn't need this on any asics.  Double check DCE2/3 */
+		if (enable)
+			radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+		else
+			radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
+	}
+
+	dig->afmt->enabled = enable;
+
+	DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		  enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
+
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
new file mode 100644
index 0000000..3ef2026
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R600_REG_H__
+#define __R600_REG_H__
+
+#define R600_PCIE_PORT_INDEX                0x0038
+#define R600_PCIE_PORT_DATA                 0x003c
+
+#define R600_RCU_INDEX                      0x0100
+#define R600_RCU_DATA                       0x0104
+
+#define R600_UVD_CTX_INDEX                  0xf4a0
+#define R600_UVD_CTX_DATA                   0xf4a4
+
+#define R600_MC_VM_FB_LOCATION			0x2180
+#define		R600_MC_FB_BASE_MASK			0x0000FFFF
+#define		R600_MC_FB_BASE_SHIFT			0
+#define		R600_MC_FB_TOP_MASK			0xFFFF0000
+#define		R600_MC_FB_TOP_SHIFT			16
+#define R600_MC_VM_AGP_TOP			0x2184
+#define		R600_MC_AGP_TOP_MASK			0x0003FFFF
+#define		R600_MC_AGP_TOP_SHIFT			0
+#define R600_MC_VM_AGP_BOT			0x2188
+#define		R600_MC_AGP_BOT_MASK			0x0003FFFF
+#define		R600_MC_AGP_BOT_SHIFT			0
+#define R600_MC_VM_AGP_BASE			0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR	0x2190
+#define		R600_LOGICAL_PAGE_NUMBER_MASK		0x000FFFFF
+#define		R600_LOGICAL_PAGE_NUMBER_SHIFT		0
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR	0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR	0x2198
+
+#define R700_MC_VM_FB_LOCATION			0x2024
+#define		R700_MC_FB_BASE_MASK			0x0000FFFF
+#define		R700_MC_FB_BASE_SHIFT			0
+#define		R700_MC_FB_TOP_MASK			0xFFFF0000
+#define		R700_MC_FB_TOP_SHIFT			16
+#define R700_MC_VM_AGP_TOP			0x2028
+#define		R700_MC_AGP_TOP_MASK			0x0003FFFF
+#define		R700_MC_AGP_TOP_SHIFT			0
+#define R700_MC_VM_AGP_BOT			0x202c
+#define		R700_MC_AGP_BOT_MASK			0x0003FFFF
+#define		R700_MC_AGP_BOT_SHIFT			0
+#define R700_MC_VM_AGP_BASE			0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR	0x2034
+#define		R700_LOGICAL_PAGE_NUMBER_MASK		0x000FFFFF
+#define		R700_LOGICAL_PAGE_NUMBER_SHIFT		0
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR	0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR	0x203c
+
+#define R600_RAMCFG				       0x2408
+#       define R600_CHANSIZE                           (1 << 7)
+#       define R600_CHANSIZE_OVERRIDE                  (1 << 10)
+
+
+#define R600_GENERAL_PWRMGT                                        0x618
+#	define R600_OPEN_DRAIN_PADS				   (1 << 11)
+
+#define R600_LOWER_GPIO_ENABLE                                     0x710
+#define R600_CTXSW_VID_LOWER_GPIO_CNTL                             0x718
+#define R600_HIGH_VID_LOWER_GPIO_CNTL                              0x71c
+#define R600_MEDIUM_VID_LOWER_GPIO_CNTL                            0x720
+#define R600_LOW_VID_LOWER_GPIO_CNTL                               0x724
+
+#define R600_D1GRPH_SWAP_CONTROL                               0x610C
+#       define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
+
+#define R600_HDP_NONSURFACE_BASE                                0x2c04
+
+#define R600_BUS_CNTL                                           0x5420
+#       define R600_BIOS_ROM_DIS                                (1 << 1)
+#define R600_CONFIG_CNTL                                        0x5424
+#define R600_CONFIG_MEMSIZE                                     0x5428
+#define R600_CONFIG_F0_BASE                                     0x542C
+#define R600_CONFIG_APER_SIZE                                   0x5430
+
+#define	R600_BIF_FB_EN						0x5490
+#define		R600_FB_READ_EN					(1 << 0)
+#define		R600_FB_WRITE_EN				(1 << 1)
+
+#define R600_CITF_CNTL           				0x200c
+#define		R600_BLACKOUT_MASK				0x00000003
+
+#define R700_MC_CITF_CNTL           				0x25c0
+
+#define R600_ROM_CNTL                              0x1600
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define R600_CG_SPLL_FUNC_CNTL                     0x600
+#       define R600_SPLL_BYPASS_EN                 (1 << 3)
+#define R600_CG_SPLL_STATUS                        0x60c
+#       define R600_SPLL_CHG_STATUS                (1 << 1)
+
+#define R600_BIOS_0_SCRATCH               0x1724
+#define R600_BIOS_1_SCRATCH               0x1728
+#define R600_BIOS_2_SCRATCH               0x172c
+#define R600_BIOS_3_SCRATCH               0x1730
+#define R600_BIOS_4_SCRATCH               0x1734
+#define R600_BIOS_5_SCRATCH               0x1738
+#define R600_BIOS_6_SCRATCH               0x173c
+#define R600_BIOS_7_SCRATCH               0x1740
+
+/* Audio, these regs were reverse enginered,
+ * so the chance is high that the naming is wrong
+ * R6xx+ ??? */
+
+/* Audio clocks */
+#define R600_AUDIO_PLL1_MUL               0x0514
+#define R600_AUDIO_PLL1_DIV               0x0518
+#define R600_AUDIO_PLL2_MUL               0x0524
+#define R600_AUDIO_PLL2_DIV               0x0528
+#define R600_AUDIO_CLK_SRCSEL             0x0534
+
+/* Audio general */
+#define R600_AUDIO_ENABLE                 0x7300
+#define R600_AUDIO_TIMING                 0x7344
+
+/* Audio params */
+#define R600_AUDIO_VENDOR_ID              0x7380
+#define R600_AUDIO_REVISION_ID            0x7384
+#define R600_AUDIO_ROOT_NODE_COUNT        0x7388
+#define R600_AUDIO_NID1_NODE_COUNT        0x738c
+#define R600_AUDIO_NID1_TYPE              0x7390
+#define R600_AUDIO_SUPPORTED_SIZE_RATE    0x7394
+#define R600_AUDIO_SUPPORTED_CODEC        0x7398
+#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
+#define R600_AUDIO_NID2_CAPS              0x73a0
+#define R600_AUDIO_NID3_CAPS              0x73a4
+#define R600_AUDIO_NID3_PIN_CAPS          0x73a8
+
+/* Audio conn list */
+#define R600_AUDIO_CONN_LIST_LEN          0x73ac
+#define R600_AUDIO_CONN_LIST              0x73b0
+
+/* Audio verbs */
+#define R600_AUDIO_RATE_BPS_CHANNEL       0x73c0
+#define R600_AUDIO_PLAYING                0x73c4
+#define R600_AUDIO_IMPLEMENTATION_ID      0x73c8
+#define R600_AUDIO_CONFIG_DEFAULT         0x73cc
+#define R600_AUDIO_PIN_SENSE              0x73d0
+#define R600_AUDIO_PIN_WIDGET_CNTL        0x73d4
+#define R600_AUDIO_STATUS_BITS            0x73d8
+
+#define DCE2_HDMI_OFFSET0		(0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1		(0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0		(0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1		(0x7800 - 0x7400)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
new file mode 100644
index 0000000..2e00a52
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -0,0 +1,2370 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef R600D_H
+#define R600D_H
+
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define R6XX_MAX_SH_GPRS			256
+#define R6XX_MAX_TEMP_GPRS			16
+#define R6XX_MAX_SH_THREADS			256
+#define R6XX_MAX_SH_STACK_ENTRIES		4096
+#define R6XX_MAX_BACKENDS			8
+#define R6XX_MAX_BACKENDS_MASK			0xff
+#define R6XX_MAX_SIMDS				8
+#define R6XX_MAX_SIMDS_MASK			0xff
+#define R6XX_MAX_PIPES				8
+#define R6XX_MAX_PIPES_MASK			0xff
+
+/* tiling bits */
+#define     ARRAY_LINEAR_GENERAL              0x00000000
+#define     ARRAY_LINEAR_ALIGNED              0x00000001
+#define     ARRAY_1D_TILED_THIN1              0x00000002
+#define     ARRAY_2D_TILED_THIN1              0x00000004
+
+/* Registers */
+#define	ARB_POP						0x2418
+#define 	ENABLE_TC128					(1 << 30)
+#define	ARB_GDEC_RD_CNTL				0x246C
+
+#define	CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)				((x) << 16)
+
+#define R_028808_CB_COLOR_CONTROL			0x28808
+#define   S_028808_SPECIAL_OP(x)                       (((x) & 0x7) << 4)
+#define   G_028808_SPECIAL_OP(x)                       (((x) >> 4) & 0x7)
+#define   C_028808_SPECIAL_OP                          0xFFFFFF8F
+#define     V_028808_SPECIAL_NORMAL                     0x00
+#define     V_028808_SPECIAL_DISABLE                    0x01
+#define     V_028808_SPECIAL_RESOLVE_BOX                0x07
+
+#define	CB_COLOR0_BASE					0x28040
+#define	CB_COLOR1_BASE					0x28044
+#define	CB_COLOR2_BASE					0x28048
+#define	CB_COLOR3_BASE					0x2804C
+#define	CB_COLOR4_BASE					0x28050
+#define	CB_COLOR5_BASE					0x28054
+#define	CB_COLOR6_BASE					0x28058
+#define	CB_COLOR7_BASE					0x2805C
+#define	CB_COLOR7_FRAG					0x280FC
+
+#define CB_COLOR0_SIZE                                  0x28060
+#define CB_COLOR0_VIEW                                  0x28080
+#define R_028080_CB_COLOR0_VIEW                      0x028080
+#define   S_028080_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028080_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028080_SLICE_START                         0xFFFFF800
+#define   S_028080_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028080_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028080_SLICE_MAX                           0xFF001FFF
+#define R_028084_CB_COLOR1_VIEW                      0x028084
+#define R_028088_CB_COLOR2_VIEW                      0x028088
+#define R_02808C_CB_COLOR3_VIEW                      0x02808C
+#define R_028090_CB_COLOR4_VIEW                      0x028090
+#define R_028094_CB_COLOR5_VIEW                      0x028094
+#define R_028098_CB_COLOR6_VIEW                      0x028098
+#define R_02809C_CB_COLOR7_VIEW                      0x02809C
+#define R_028100_CB_COLOR0_MASK                      0x028100
+#define   S_028100_CMASK_BLOCK_MAX(x)                  (((x) & 0xFFF) << 0)
+#define   G_028100_CMASK_BLOCK_MAX(x)                  (((x) >> 0) & 0xFFF)
+#define   C_028100_CMASK_BLOCK_MAX                     0xFFFFF000
+#define   S_028100_FMASK_TILE_MAX(x)                   (((x) & 0xFFFFF) << 12)
+#define   G_028100_FMASK_TILE_MAX(x)                   (((x) >> 12) & 0xFFFFF)
+#define   C_028100_FMASK_TILE_MAX                      0x00000FFF
+#define R_028104_CB_COLOR1_MASK                      0x028104
+#define R_028108_CB_COLOR2_MASK                      0x028108
+#define R_02810C_CB_COLOR3_MASK                      0x02810C
+#define R_028110_CB_COLOR4_MASK                      0x028110
+#define R_028114_CB_COLOR5_MASK                      0x028114
+#define R_028118_CB_COLOR6_MASK                      0x028118
+#define R_02811C_CB_COLOR7_MASK                      0x02811C
+#define CB_COLOR0_INFO                                  0x280a0
+#	define CB_FORMAT(x)				((x) << 2)
+#       define CB_ARRAY_MODE(x)                         ((x) << 8)
+#	define CB_SOURCE_FORMAT(x)			((x) << 27)
+#	define CB_SF_EXPORT_FULL			0
+#	define CB_SF_EXPORT_NORM			1
+#define CB_COLOR0_TILE                                  0x280c0
+#define CB_COLOR0_FRAG                                  0x280e0
+#define CB_COLOR0_MASK                                  0x28100
+
+#define SQ_ALU_CONST_CACHE_PS_0				0x28940
+#define SQ_ALU_CONST_CACHE_PS_1				0x28944
+#define SQ_ALU_CONST_CACHE_PS_2				0x28948
+#define SQ_ALU_CONST_CACHE_PS_3				0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4				0x28950
+#define SQ_ALU_CONST_CACHE_PS_5				0x28954
+#define SQ_ALU_CONST_CACHE_PS_6				0x28958
+#define SQ_ALU_CONST_CACHE_PS_7				0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8				0x28960
+#define SQ_ALU_CONST_CACHE_PS_9				0x28964
+#define SQ_ALU_CONST_CACHE_PS_10			0x28968
+#define SQ_ALU_CONST_CACHE_PS_11			0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12			0x28970
+#define SQ_ALU_CONST_CACHE_PS_13			0x28974
+#define SQ_ALU_CONST_CACHE_PS_14			0x28978
+#define SQ_ALU_CONST_CACHE_PS_15			0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0				0x28980
+#define SQ_ALU_CONST_CACHE_VS_1				0x28984
+#define SQ_ALU_CONST_CACHE_VS_2				0x28988
+#define SQ_ALU_CONST_CACHE_VS_3				0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4				0x28990
+#define SQ_ALU_CONST_CACHE_VS_5				0x28994
+#define SQ_ALU_CONST_CACHE_VS_6				0x28998
+#define SQ_ALU_CONST_CACHE_VS_7				0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8				0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9				0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10			0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11			0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12			0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13			0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14			0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15			0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0				0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1				0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2				0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3				0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4				0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5				0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6				0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7				0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8				0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9				0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10			0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11			0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12			0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13			0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14			0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15			0x289fc
+
+#define	CONFIG_MEMSIZE					0x5428
+#define CONFIG_CNTL					0x5424
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define	CP_COHER_BASE					0x85F8
+#define	CP_DEBUG					0xC1FC
+#define	R_0086D8_CP_ME_CNTL			0x86D8
+#define		S_0086D8_CP_PFP_HALT(x)			(((x) & 1)<<26)
+#define		C_0086D8_CP_PFP_HALT(x)			((x) & 0xFBFFFFFF)
+#define		S_0086D8_CP_ME_HALT(x)			(((x) & 1)<<28)
+#define		C_0086D8_CP_ME_HALT(x)			((x) & 0xEFFFFFFF)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ_END(x)					((x) << 16)
+#define		ROQ_END(x)					((x) << 24)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_BASE					0xC100
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_ROQ_IB1_STAT					0x8784
+#define	CP_ROQ_IB2_STAT					0x8788
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	DB_DEBUG					0x9830
+#define		PREZ_MUST_WAIT_FOR_POSTZ_DONE			(1 << 31)
+#define	DB_DEPTH_BASE					0x2800C
+#define	DB_HTILE_DATA_BASE				0x28014
+#define	DB_HTILE_SURFACE				0x28D24
+#define   S_028D24_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
+#define   G_028D24_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
+#define   C_028D24_HTILE_WIDTH                         0xFFFFFFFE
+#define   S_028D24_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
+#define   G_028D24_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
+#define   C_028D24_HTILE_HEIGHT                         0xFFFFFFFD
+#define   G_028D24_LINEAR(x)                           (((x) >> 2) & 0x1)
+#define	DB_WATERMARKS					0x9838
+#define		DEPTH_FREE(x)					((x) << 0)
+#define		DEPTH_FLUSH(x)					((x) << 5)
+#define		DEPTH_PENDING_FREE(x)				((x) << 15)
+#define		DEPTH_CACHELINE_FREE(x)				((x) << 20)
+
+#define	DCP_TILING_CONFIG				0x6CA0
+#define		PIPE_TILING(x)					((x) << 1)
+#define 	BANK_TILING(x)					((x) << 4)
+#define		GROUP_SIZE(x)					((x) << 6)
+#define		ROW_TILING(x)					((x) << 8)
+#define		BANK_SWAPS(x)					((x) << 11)
+#define		SAMPLE_SPLIT(x)					((x) << 14)
+#define		BACKEND_MAP(x)					((x) << 16)
+
+#define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define SQ_CONFIG                                         0x8c00
+#       define VC_ENABLE                                  (1 << 0)
+#       define EXPORT_SRC_C                               (1 << 1)
+#       define DX9_CONSTS                                 (1 << 2)
+#       define ALU_INST_PREFER_VECTOR                     (1 << 3)
+#       define DX10_CLAMP                                 (1 << 4)
+#       define CLAUSE_SEQ_PRIO(x)                         ((x) << 8)
+#       define PS_PRIO(x)                                 ((x) << 24)
+#       define VS_PRIO(x)                                 ((x) << 26)
+#       define GS_PRIO(x)                                 ((x) << 28)
+#       define ES_PRIO(x)                                 ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1                            0x8c04
+#       define NUM_PS_GPRS(x)                             ((x) << 0)
+#       define NUM_VS_GPRS(x)                             ((x) << 16)
+#       define NUM_CLAUSE_TEMP_GPRS(x)                    ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2                            0x8c08
+#       define NUM_GS_GPRS(x)                             ((x) << 0)
+#       define NUM_ES_GPRS(x)                             ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT                           0x8c0c
+#       define NUM_PS_THREADS(x)                          ((x) << 0)
+#       define NUM_VS_THREADS(x)                          ((x) << 8)
+#       define NUM_GS_THREADS(x)                          ((x) << 16)
+#       define NUM_ES_THREADS(x)                          ((x) << 24)
+#define SQ_STACK_RESOURCE_MGMT_1                          0x8c10
+#       define NUM_PS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define NUM_VS_STACK_ENTRIES(x)                    ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2                          0x8c14
+#       define NUM_GS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define NUM_ES_STACK_ENTRIES(x)                    ((x) << 16)
+#define SQ_ESGS_RING_BASE                               0x8c40
+#define SQ_GSVS_RING_BASE                               0x8c48
+#define SQ_ESTMP_RING_BASE                              0x8c50
+#define SQ_GSTMP_RING_BASE                              0x8c58
+#define SQ_VSTMP_RING_BASE                              0x8c60
+#define SQ_PSTMP_RING_BASE                              0x8c68
+#define SQ_FBUF_RING_BASE                               0x8c70
+#define SQ_REDUC_RING_BASE                              0x8c78
+
+#define GRBM_CNTL                                       0x8000
+#       define GRBM_READ_TIMEOUT(x)                     ((x) << 0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000001F
+#define		GUI_ACTIVE					(1<<31)
+#define	GRBM_STATUS2					0x8014
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1<<0)
+
+#define	CG_THERMAL_CTRL					0x7F0
+#define		DIG_THERM_DPM(x)			((x) << 12)
+#define		DIG_THERM_DPM_MASK			0x000FF000
+#define		DIG_THERM_DPM_SHIFT			12
+#define	CG_THERMAL_STATUS				0x7F4
+#define		ASIC_T(x)			        ((x) << 0)
+#define		ASIC_T_MASK			        0x1FF
+#define		ASIC_T_SHIFT			        0
+#define	CG_THERMAL_INT					0x7F8
+#define		DIG_THERM_INTH(x)			((x) << 8)
+#define		DIG_THERM_INTH_MASK			0x0000FF00
+#define		DIG_THERM_INTH_SHIFT			8
+#define		DIG_THERM_INTL(x)			((x) << 16)
+#define		DIG_THERM_INTL_MASK			0x00FF0000
+#define		DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+
+#define	RV770_CG_THERMAL_INT				0x734
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+#define HDP_DEBUG1                                      0x2F34
+
+#define MC_CONFIG					0x2000
+#define MC_VM_AGP_TOP					0x2184
+#define MC_VM_AGP_BOT					0x2188
+#define	MC_VM_AGP_BASE					0x218C
+#define MC_VM_FB_LOCATION				0x2180
+#define MC_VM_L1_TLB_MCB_RD_UVD_CNTL			0x2124
+#define 	ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L1_STRICT_ORDERING			(1 << 2)
+#define		SYSTEM_ACCESS_MODE_MASK				0x000000C0
+#define		SYSTEM_ACCESS_MODE_SHIFT			6
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 6)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 6)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 6)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 6)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 8)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE	(1 << 8)
+#define		ENABLE_SEMAPHORE_MODE				(1 << 10)
+#define		ENABLE_WAIT_L2_QUERY				(1 << 11)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			(((x) & 7) << 12)
+#define		EFFECTIVE_L1_TLB_SIZE_MASK			0x00007000
+#define		EFFECTIVE_L1_TLB_SIZE_SHIFT			12
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		EFFECTIVE_L1_QUEUE_SIZE_MASK			0x00038000
+#define		EFFECTIVE_L1_QUEUE_SIZE_SHIFT			15
+#define MC_VM_L1_TLB_MCD_RD_A_CNTL			0x219C
+#define MC_VM_L1_TLB_MCD_RD_B_CNTL			0x21A0
+#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL			0x21FC
+#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL			0x2204
+#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL			0x2208
+#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL			0x220C
+#define	MC_VM_L1_TLB_MCB_RD_SYS_CNTL			0x2200
+#define MC_VM_L1_TLB_MCB_WR_UVD_CNTL			0x212c
+#define MC_VM_L1_TLB_MCD_WR_A_CNTL			0x21A4
+#define MC_VM_L1_TLB_MCD_WR_B_CNTL			0x21A8
+#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL			0x2210
+#define MC_VM_L1_TLB_MCB_WR_HDP_CNTL			0x2218
+#define MC_VM_L1_TLB_MCB_WR_PDMA_CNTL			0x221C
+#define MC_VM_L1_TLB_MCB_WR_SEM_CNTL			0x2220
+#define MC_VM_L1_TLB_MCB_WR_SYS_CNTL			0x2214
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2190
+#define		LOGICAL_PAGE_NUMBER_MASK			0x000FFFFF
+#define		LOGICAL_PAGE_NUMBER_SHIFT			0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2194
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x2198
+
+#define RS_DQ_RD_RET_CONF				0x2348
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define PA_SC_AA_CONFIG					0x28C04
+#define	PA_SC_AA_SAMPLE_LOCS_2S				0x8B40
+#define	PA_SC_AA_SAMPLE_LOCS_4S				0x8B44
+#define	PA_SC_AA_SAMPLE_LOCS_8S_WD0			0x8B48
+#define	PA_SC_AA_SAMPLE_LOCS_8S_WD1			0x8B4C
+#define		S0_X(x)						((x) << 0)
+#define		S0_Y(x)						((x) << 4)
+#define		S1_X(x)						((x) << 8)
+#define		S1_Y(x)						((x) << 12)
+#define		S2_X(x)						((x) << 16)
+#define		S2_Y(x)						((x) << 20)
+#define		S3_X(x)						((x) << 24)
+#define		S3_Y(x)						((x) << 28)
+#define		S4_X(x)						((x) << 0)
+#define		S4_Y(x)						((x) << 4)
+#define		S5_X(x)						((x) << 8)
+#define		S5_Y(x)						((x) << 12)
+#define		S6_X(x)						((x) << 16)
+#define		S6_Y(x)						((x) << 20)
+#define		S7_X(x)						((x) << 24)
+#define		S7_Y(x)						((x) << 28)
+#define PA_SC_CLIPRECT_RULE				0x2820c
+#define	PA_SC_ENHANCE					0x8BF0
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_TILE_CNT(x)			((x) << 12)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define PA_SC_MODE_CNTL					0x28A4C
+#define	PA_SC_MULTI_CHIP_CNTL				0x8B20
+
+#define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+#define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+#define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+
+#define	PCIE_PORT_INDEX					0x0038
+#define	PCIE_PORT_DATA					0x003C
+
+#define CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+
+#define RAMCFG						0x2408
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000001
+#define		NOOFRANK_SHIFT					1
+#define		NOOFRANK_MASK					0x00000002
+#define		NOOFROWS_SHIFT					2
+#define		NOOFROWS_MASK					0x0000001C
+#define		NOOFCOLS_SHIFT					5
+#define		NOOFCOLS_MASK					0x00000060
+#define		CHANSIZE_SHIFT					7
+#define		CHANSIZE_MASK					0x00000080
+#define		BURSTLENGTH_SHIFT				8
+#define		BURSTLENGTH_MASK				0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 10)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define		DISABLE_INTERP_1				(1 << 5)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+#define	SPI_PS_IN_CONTROL_1				0x286D0
+#define		GEN_INDEX_PIX					(1<<0)
+#define		GEN_INDEX_PIX_ADDR(x)				((x)<<1)
+#define		FRONT_FACE_ENA					(1<<8)
+#define		FRONT_FACE_CHAN(x)				((x)<<9)
+#define		FRONT_FACE_ALL_BITS				(1<<11)
+#define		FRONT_FACE_ADDR(x)				((x)<<12)
+#define		FOG_ADDR(x)					((x)<<17)
+#define		FIXED_PT_POSITION_ENA				(1<<24)
+#define		FIXED_PT_POSITION_ADDR(x)			((x)<<25)
+
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define	SQ_PGM_START_ES					0x28880
+#define	SQ_PGM_START_FS					0x28894
+#define	SQ_PGM_START_GS					0x2886C
+#define	SQ_PGM_START_PS					0x28840
+#define SQ_PGM_RESOURCES_PS                             0x28850
+#define SQ_PGM_EXPORTS_PS                               0x28854
+#define SQ_PGM_CF_OFFSET_PS                             0x288cc
+#define	SQ_PGM_START_VS					0x28858
+#define SQ_PGM_RESOURCES_VS                             0x28868
+#define SQ_PGM_CF_OFFSET_VS                             0x288d0
+
+#define SQ_VTX_CONSTANT_WORD0_0				0x30000
+#define SQ_VTX_CONSTANT_WORD1_0				0x30004
+#define SQ_VTX_CONSTANT_WORD2_0				0x30008
+#	define SQ_VTXC_BASE_ADDR_HI(x)			((x) << 0)
+#	define SQ_VTXC_STRIDE(x)			((x) << 8)
+#	define SQ_VTXC_ENDIAN_SWAP(x)			((x) << 30)
+#	define SQ_ENDIAN_NONE				0
+#	define SQ_ENDIAN_8IN16				1
+#	define SQ_ENDIAN_8IN32				2
+#define SQ_VTX_CONSTANT_WORD3_0				0x3000c
+#define	SQ_VTX_CONSTANT_WORD6_0				0x38018
+#define		S__SQ_VTX_CONSTANT_TYPE(x)			(((x) & 3) << 30)
+#define		G__SQ_VTX_CONSTANT_TYPE(x)			(((x) >> 30) & 3)
+#define			SQ_TEX_VTX_INVALID_TEXTURE			0x0
+#define			SQ_TEX_VTX_INVALID_BUFFER			0x1
+#define			SQ_TEX_VTX_VALID_TEXTURE			0x2
+#define			SQ_TEX_VTX_VALID_BUFFER				0x3
+
+
+#define	SX_MISC						0x28350
+#define	SX_MEMORY_EXPORT_BASE				0x9010
+#define	SX_DEBUG_1					0x9054
+#define		SMX_EVENT_RELEASE				(1 << 0)
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+#define		BILINEAR_PRECISION_6_BIT			(0 << 31)
+#define		BILINEAR_PRECISION_8_BIT			(1 << 31)
+
+#define	TC_CNTL						0x9608
+#define		TC_L2_SIZE(x)					((x)<<5)
+#define		L2_DISABLE_LATE_HIT				(1<<9)
+
+#define	VC_ENHANCE					0x9714
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x)<<0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define	VGT_DMA_BASE					0x287E8
+#define	VGT_DMA_BASE_HI					0x287E4
+#define	VGT_ES_PER_GS					0x88CC
+#define	VGT_GS_PER_ES					0x88C8
+#define	VGT_GS_PER_VS					0x88E8
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_STRMOUT_BASE_OFFSET_0			0x28B10
+#define	VGT_STRMOUT_BASE_OFFSET_1			0x28B14
+#define	VGT_STRMOUT_BASE_OFFSET_2			0x28B18
+#define	VGT_STRMOUT_BASE_OFFSET_3			0x28B1c
+#define	VGT_STRMOUT_BASE_OFFSET_HI_0			0x28B44
+#define	VGT_STRMOUT_BASE_OFFSET_HI_1			0x28B48
+#define	VGT_STRMOUT_BASE_OFFSET_HI_2			0x28B4c
+#define	VGT_STRMOUT_BASE_OFFSET_HI_3			0x28B50
+#define	VGT_STRMOUT_BUFFER_BASE_0			0x28AD8
+#define	VGT_STRMOUT_BUFFER_BASE_1			0x28AE8
+#define	VGT_STRMOUT_BUFFER_BASE_2			0x28AF8
+#define	VGT_STRMOUT_BUFFER_BASE_3			0x28B08
+#define	VGT_STRMOUT_BUFFER_OFFSET_0			0x28ADC
+#define	VGT_STRMOUT_BUFFER_OFFSET_1			0x28AEC
+#define	VGT_STRMOUT_BUFFER_OFFSET_2			0x28AFC
+#define	VGT_STRMOUT_BUFFER_OFFSET_3			0x28B0C
+#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
+
+#define	VGT_STRMOUT_EN					0x28AB0
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define VM_CONTEXT0_INVALIDATION_LOW_ADDR		0x1490
+#define VM_CONTEXT0_INVALIDATION_HIGH_ADDR		0x14B0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x1574
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x1594
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x15B4
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1554
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 13)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT_0(x)				(((x) & 0x1f) << 0)
+#define		BANK_SELECT_1(x)				(((x) & 0x1f) << 5)
+#define		L2_CACHE_UPDATE_MODE(x)				(((x) & 3) << 10)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+
+#define	WAIT_UNTIL					0x8040
+#define         WAIT_CP_DMA_IDLE_bit                            (1 << 8)
+#define         WAIT_2D_IDLE_bit                                (1 << 14)
+#define         WAIT_3D_IDLE_bit                                (1 << 15)
+#define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
+#define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
+
+/* async DMA */
+#define DMA_TILING_CONFIG                                 0x3ec4
+#define DMA_CONFIG                                        0x3e4c
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_MODE                                          0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd /* 7xx only */
+#define	DMA_PACKET_NOP					  0xf
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_RB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define RLC_CNTL                                          0x3f00
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_HB_BASE                                       0x3f10
+#define RLC_HB_CNTL                                       0x3f0c
+#define RLC_HB_RPTR                                       0x3f20
+#define RLC_HB_WPTR                                       0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR                              0x3f14
+#define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB				  0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB				  0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT			  0x3f40
+#define RLC_MC_CNTL                                       0x3f44
+#define RLC_UCODE_CNTL                                    0x3f48
+#define RLC_UCODE_ADDR                                    0x3f2c
+#define RLC_UCODE_DATA                                    0x3f30
+
+#define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_BIF                             (1 << 1)
+#       define SOFT_RESET_DMA                             (1 << 12)
+#       define SOFT_RESET_RLC                             (1 << 13)
+#       define SOFT_RESET_UVD                             (1 << 18)
+#       define RV770_SOFT_RESET_DMA                       (1 << 20)
+
+#define BIF_SCRATCH0                                      0x5438
+
+#define BUS_CNTL                                          0x5420
+#       define BIOS_ROM_DIS                               (1 << 1)
+#       define VGA_COHE_SPEC_TIMER_DIS                    (1 << 9)
+
+#define CP_INT_CNTL                                       0xc124
+#       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
+#       define SCRATCH_INT_ENABLE                         (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                      (1 << 26)
+#       define IB2_INT_ENABLE                             (1 << 29)
+#       define IB1_INT_ENABLE                             (1 << 30)
+#       define RB_INT_ENABLE                              (1 << 31)
+#define CP_INT_STATUS                                     0xc128
+#       define SCRATCH_INT_STAT                           (1 << 25)
+#       define TIME_STAMP_INT_STAT                        (1 << 26)
+#       define IB2_INT_STAT                               (1 << 29)
+#       define IB1_INT_STAT                               (1 << 30)
+#       define RB_INT_STAT                                (1 << 31)
+
+#define GRBM_INT_CNTL                                     0x8060
+#       define RDERR_INT_ENABLE                           (1 << 0)
+#       define WAIT_COUNT_TIMEOUT_INT_ENABLE              (1 << 1)
+#       define GUI_IDLE_INT_ENABLE                        (1 << 19)
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define D1MODE_VBLANK_STATUS                              0x6534
+#define D2MODE_VBLANK_STATUS                              0x6d34
+#       define DxMODE_VBLANK_OCCURRED                     (1 << 0)
+#       define DxMODE_VBLANK_ACK                          (1 << 4)
+#       define DxMODE_VBLANK_STAT                         (1 << 12)
+#       define DxMODE_VBLANK_INTERRUPT                    (1 << 16)
+#       define DxMODE_VBLANK_INTERRUPT_TYPE               (1 << 17)
+#define D1MODE_VLINE_STATUS                               0x653c
+#define D2MODE_VLINE_STATUS                               0x6d3c
+#       define DxMODE_VLINE_OCCURRED                      (1 << 0)
+#       define DxMODE_VLINE_ACK                           (1 << 4)
+#       define DxMODE_VLINE_STAT                          (1 << 12)
+#       define DxMODE_VLINE_INTERRUPT                     (1 << 16)
+#       define DxMODE_VLINE_INTERRUPT_TYPE                (1 << 17)
+#define DxMODE_INT_MASK                                   0x6540
+#       define D1MODE_VBLANK_INT_MASK                     (1 << 0)
+#       define D1MODE_VLINE_INT_MASK                      (1 << 4)
+#       define D2MODE_VBLANK_INT_MASK                     (1 << 8)
+#       define D2MODE_VLINE_INT_MASK                      (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS                        0x7ddc
+#       define DC_HPD1_INTERRUPT                          (1 << 18)
+#       define DC_HPD2_INTERRUPT                          (1 << 19)
+#define DISP_INTERRUPT_STATUS                             0x7edc
+#       define LB_D1_VLINE_INTERRUPT                      (1 << 2)
+#       define LB_D2_VLINE_INTERRUPT                      (1 << 3)
+#       define LB_D1_VBLANK_INTERRUPT                     (1 << 4)
+#       define LB_D2_VBLANK_INTERRUPT                     (1 << 5)
+#       define DACA_AUTODETECT_INTERRUPT                  (1 << 16)
+#       define DACB_AUTODETECT_INTERRUPT                  (1 << 17)
+#       define DC_HOT_PLUG_DETECT1_INTERRUPT              (1 << 18)
+#       define DC_HOT_PLUG_DETECT2_INTERRUPT              (1 << 19)
+#       define DC_I2C_SW_DONE_INTERRUPT                   (1 << 20)
+#       define DC_I2C_HW_DONE_INTERRUPT                   (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE                    0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE               0x7de8
+#       define DC_HPD4_INTERRUPT                          (1 << 14)
+#       define DC_HPD4_RX_INTERRUPT                       (1 << 15)
+#       define DC_HPD3_INTERRUPT                          (1 << 28)
+#       define DC_HPD1_RX_INTERRUPT                       (1 << 29)
+#       define DC_HPD2_RX_INTERRUPT                       (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2              0x7dec
+#       define DC_HPD3_RX_INTERRUPT                       (1 << 0)
+#       define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 1)
+#       define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 2)
+#       define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 3)
+#       define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 4)
+#       define AUX1_SW_DONE_INTERRUPT                     (1 << 5)
+#       define AUX1_LS_DONE_INTERRUPT                     (1 << 6)
+#       define AUX2_SW_DONE_INTERRUPT                     (1 << 7)
+#       define AUX2_LS_DONE_INTERRUPT                     (1 << 8)
+#       define AUX3_SW_DONE_INTERRUPT                     (1 << 9)
+#       define AUX3_LS_DONE_INTERRUPT                     (1 << 10)
+#       define AUX4_SW_DONE_INTERRUPT                     (1 << 11)
+#       define AUX4_LS_DONE_INTERRUPT                     (1 << 12)
+#       define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 13)
+#       define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 14)
+/* DCE 3.2 */
+#       define AUX5_SW_DONE_INTERRUPT                     (1 << 15)
+#       define AUX5_LS_DONE_INTERRUPT                     (1 << 16)
+#       define AUX6_SW_DONE_INTERRUPT                     (1 << 17)
+#       define AUX6_LS_DONE_INTERRUPT                     (1 << 18)
+#       define DC_HPD5_INTERRUPT                          (1 << 19)
+#       define DC_HPD5_RX_INTERRUPT                       (1 << 20)
+#       define DC_HPD6_INTERRUPT                          (1 << 21)
+#       define DC_HPD6_RX_INTERRUPT                       (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL                          0x7828
+#define DACB_AUTO_DETECT_CONTROL                          0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL                     0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL                     0x7128
+#       define DACx_AUTODETECT_MODE(x)                    ((x) << 0)
+#       define DACx_AUTODETECT_MODE_NONE                  0
+#       define DACx_AUTODETECT_MODE_CONNECT               1
+#       define DACx_AUTODETECT_MODE_DISCONNECT            2
+#       define DACx_AUTODETECT_FRAME_TIME_COUNTER(x)      ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+#       define DACx_AUTODETECT_CHECK_MASK(x)              ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL                  0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL                  0x7138
+#define DACA_AUTODETECT_INT_CONTROL                       0x7838
+#define DACB_AUTODETECT_INT_CONTROL                       0x7a38
+#       define DACx_AUTODETECT_ACK                        (1 << 0)
+#       define DACx_AUTODETECT_INT_ENABLE                 (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL                       0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL                       0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL                       0x7d24
+#       define DC_HOT_PLUG_DETECTx_EN                     (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS                    0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS                    0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS                    0x7d28
+#       define DC_HOT_PLUG_DETECTx_INT_STATUS             (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_SENSE                  (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS                                0x7d00
+#define DC_HPD2_INT_STATUS                                0x7d0c
+#define DC_HPD3_INT_STATUS                                0x7d18
+#define DC_HPD4_INT_STATUS                                0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS                                0x7dc0
+#define DC_HPD6_INT_STATUS                                0x7df4
+#       define DC_HPDx_INT_STATUS                         (1 << 0)
+#       define DC_HPDx_SENSE                              (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                      (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL                   0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL                   0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL                   0x7d2c
+#       define DC_HOT_PLUG_DETECTx_INT_ACK                (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_INT_POLARITY           (1 << 8)
+#       define DC_HOT_PLUG_DETECTx_INT_EN                 (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL                               0x7d04
+#define DC_HPD2_INT_CONTROL                               0x7d10
+#define DC_HPD3_INT_CONTROL                               0x7d1c
+#define DC_HPD4_INT_CONTROL                               0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL                               0x7dc4
+#define DC_HPD6_INT_CONTROL                               0x7df8
+#       define DC_HPDx_INT_ACK                            (1 << 0)
+#       define DC_HPDx_INT_POLARITY                       (1 << 8)
+#       define DC_HPDx_INT_EN                             (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                         (1 << 20)
+#       define DC_HPDx_RX_INT_EN                          (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL                                   0x7d08
+#define DC_HPD2_CONTROL                                   0x7d14
+#define DC_HPD3_CONTROL                                   0x7d20
+#define DC_HPD4_CONTROL                                   0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL                                   0x7dc8
+#define DC_HPD6_CONTROL                                   0x7dfc
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+/* DCE 3.2 */
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define D1GRPH_INTERRUPT_STATUS                           0x6158
+#define D2GRPH_INTERRUPT_STATUS                           0x6958
+#       define DxGRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define DxGRPH_PFLIP_INT_CLEAR                     (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL                          0x615c
+#define D2GRPH_INTERRUPT_CONTROL                          0x695c
+#       define DxGRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define DxGRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#       define LC_POINT_7_PLUS_EN                         (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+/* Audio */
+#define AZ_HOT_PLUG_CONTROL               0x7300
+#       define AZ_FORCE_CODEC_WAKE        (1 << 0)
+#       define JACK_DETECTION_ENABLE      (1 << 4)
+#       define UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+#       define CODEC_HOT_PLUG_ENABLE      (1 << 12)
+#       define AUDIO_ENABLED              (1 << 31)
+/* DCE3 adds */
+#       define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+#       define PIN0_AUDIO_ENABLED         (1 << 24)
+#       define PIN1_AUDIO_ENABLED         (1 << 25)
+#       define PIN2_AUDIO_ENABLED         (1 << 26)
+#       define PIN3_AUDIO_ENABLED         (1 << 27)
+
+/* Audio clocks DCE 2.0/3.0 */
+#define AUDIO_DTO                         0x7340
+#       define AUDIO_DTO_PHASE(x)         (((x) & 0xffff) << 0)
+#       define AUDIO_DTO_MODULE(x)        (((x) & 0xffff) << 16)
+
+/* Audio clocks DCE 3.2 */
+#define DCCG_AUDIO_DTO0_PHASE             0x0514
+#define DCCG_AUDIO_DTO0_MODULE            0x0518
+#define DCCG_AUDIO_DTO0_LOAD              0x051c
+#       define DTO_LOAD                   (1 << 31)
+#define DCCG_AUDIO_DTO0_CNTL              0x0520
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+#       define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
+
+#define DCCG_AUDIO_DTO1_PHASE             0x0524
+#define DCCG_AUDIO_DTO1_MODULE            0x0528
+#define DCCG_AUDIO_DTO1_LOAD              0x052c
+#define DCCG_AUDIO_DTO1_CNTL              0x0530
+
+#define DCCG_AUDIO_DTO_SELECT             0x0534
+
+/* digital blocks */
+#define TMDSA_CNTL                       0x7880
+#       define TMDSA_HDMI_EN             (1 << 2)
+#define LVTMA_CNTL                       0x7a80
+#       define LVTMA_HDMI_EN             (1 << 2)
+#define DDIA_CNTL                        0x7200
+#       define DDIA_HDMI_EN              (1 << 2)
+#define DIG0_CNTL                        0x75a0
+#       define DIG_MODE(x)               (((x) & 7) << 8)
+#       define DIG_MODE_DP               0
+#       define DIG_MODE_LVDS             1
+#       define DIG_MODE_TMDS_DVI         2
+#       define DIG_MODE_TMDS_HDMI        3
+#       define DIG_MODE_SDVO             4
+#define DIG1_CNTL                        0x79a0
+
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER          0x71bc
+#define		SPEAKER_ALLOCATION(x)			(((x) & 0x7f) << 0)
+#define		SPEAKER_ALLOCATION_MASK			(0x7f << 0)
+#define		SPEAKER_ALLOCATION_SHIFT		0
+#define		HDMI_CONNECTION				(1 << 16)
+#define		DP_CONNECTION				(1 << 17)
+
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x71fc /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
+ * instance of the blocks while r6xx has 2.  DCE 3.0 cards are slightly
+ * different due to the new DIG blocks, but also have 2 instances.
+ * DCE 3.0 HDMI blocks are part of each DIG encoder.
+ */
+
+/* rs6xx/rs740/r6xx/dce3 */
+#define HDMI0_CONTROL                0x7400
+/* rs6xx/rs740/r6xx */
+#       define HDMI0_ENABLE          (1 << 0)
+#       define HDMI0_STREAM(x)       (((x) & 3) << 2)
+#       define HDMI0_STREAM_TMDSA    0
+#       define HDMI0_STREAM_LVTMA    1
+#       define HDMI0_STREAM_DVOA     2
+#       define HDMI0_STREAM_DDIA     3
+/* rs6xx/r6xx/dce3 */
+#       define HDMI0_ERROR_ACK       (1 << 8)
+#       define HDMI0_ERROR_MASK      (1 << 9)
+#define HDMI0_STATUS                 0x7404
+#       define HDMI0_ACTIVE_AVMUTE   (1 << 0)
+#       define HDMI0_AUDIO_ENABLE    (1 << 4)
+#       define HDMI0_AZ_FORMAT_WTRIG     (1 << 28)
+#       define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
+#define HDMI0_AUDIO_PACKET_CONTROL   0x7408
+#       define HDMI0_AUDIO_SAMPLE_SEND  (1 << 0)
+#       define HDMI0_AUDIO_DELAY_EN(x)  (((x) & 3) << 4)
+#       define HDMI0_AUDIO_DELAY_EN_MASK	(3 << 4)
+#       define HDMI0_AUDIO_SEND_MAX_PACKETS  (1 << 8)
+#       define HDMI0_AUDIO_TEST_EN         (1 << 12)
+#       define HDMI0_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#       define HDMI0_AUDIO_PACKETS_PER_LINE_MASK	(0x1f << 16)
+#       define HDMI0_AUDIO_CHANNEL_SWAP    (1 << 24)
+#       define HDMI0_60958_CS_UPDATE       (1 << 26)
+#       define HDMI0_AZ_FORMAT_WTRIG_MASK  (1 << 28)
+#       define HDMI0_AZ_FORMAT_WTRIG_ACK   (1 << 29)
+#define HDMI0_AUDIO_CRC_CONTROL      0x740c
+#       define HDMI0_AUDIO_CRC_EN    (1 << 0)
+#define DCE3_HDMI0_ACR_PACKET_CONTROL	0x740c
+#define HDMI0_VBI_PACKET_CONTROL     0x7410
+#       define HDMI0_NULL_SEND       (1 << 0)
+#       define HDMI0_GC_SEND         (1 << 4)
+#       define HDMI0_GC_CONT         (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI0_INFOFRAME_CONTROL0     0x7414
+#       define HDMI0_AVI_INFO_SEND   (1 << 0)
+#       define HDMI0_AVI_INFO_CONT   (1 << 1)
+#       define HDMI0_AUDIO_INFO_SEND (1 << 4)
+#       define HDMI0_AUDIO_INFO_CONT (1 << 5)
+#       define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
+#       define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
+#       define HDMI0_MPEG_INFO_SEND  (1 << 8)
+#       define HDMI0_MPEG_INFO_CONT  (1 << 9)
+#       define HDMI0_MPEG_INFO_UPDATE  (1 << 10)
+#define HDMI0_INFOFRAME_CONTROL1     0x7418
+#       define HDMI0_AVI_INFO_LINE(x)  (((x) & 0x3f) << 0)
+#       define HDMI0_AVI_INFO_LINE_MASK		(0x3f << 0)
+#       define HDMI0_AUDIO_INFO_LINE(x)  (((x) & 0x3f) << 8)
+#       define HDMI0_AUDIO_INFO_LINE_MASK	(0x3f << 8)
+#       define HDMI0_MPEG_INFO_LINE(x)  (((x) & 0x3f) << 16)
+#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
+#       define HDMI0_GENERIC0_SEND   (1 << 0)
+#       define HDMI0_GENERIC0_CONT   (1 << 1)
+#       define HDMI0_GENERIC0_UPDATE (1 << 2)
+#       define HDMI0_GENERIC1_SEND   (1 << 4)
+#       define HDMI0_GENERIC1_CONT   (1 << 5)
+#       define HDMI0_GENERIC0_LINE(x)  (((x) & 0x3f) << 16)
+#       define HDMI0_GENERIC0_LINE_MASK		(0x3f << 16)
+#       define HDMI0_GENERIC1_LINE(x)  (((x) & 0x3f) << 24)
+#       define HDMI0_GENERIC1_LINE_MASK		(0x3f << 24)
+#define HDMI0_GC                     0x7428
+#       define HDMI0_GC_AVMUTE       (1 << 0)
+#define HDMI0_AVI_INFO0              0x7454
+#       define HDMI0_AVI_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AVI_INFO_S(x)   (((x) & 3) << 8)
+#       define HDMI0_AVI_INFO_B(x)   (((x) & 3) << 10)
+#       define HDMI0_AVI_INFO_A(x)   (((x) & 1) << 12)
+#       define HDMI0_AVI_INFO_Y(x)   (((x) & 3) << 13)
+#       define HDMI0_AVI_INFO_Y_RGB       0
+#       define HDMI0_AVI_INFO_Y_YCBCR422  1
+#       define HDMI0_AVI_INFO_Y_YCBCR444  2
+#       define HDMI0_AVI_INFO_Y_A_B_S(x)   (((x) & 0xff) << 8)
+#       define HDMI0_AVI_INFO_R(x)   (((x) & 0xf) << 16)
+#       define HDMI0_AVI_INFO_M(x)   (((x) & 0x3) << 20)
+#       define HDMI0_AVI_INFO_C(x)   (((x) & 0x3) << 22)
+#       define HDMI0_AVI_INFO_C_M_R(x)   (((x) & 0xff) << 16)
+#       define HDMI0_AVI_INFO_SC(x)  (((x) & 0x3) << 24)
+#       define HDMI0_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define HDMI0_AVI_INFO1              0x7458
+#       define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define HDMI0_AVI_INFO_PR(x)  (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO2              0x745c
+#       define HDMI0_AVI_INFO_BOTTOM(x)  (((x) & 0xffff) << 0)
+#       define HDMI0_AVI_INFO_LEFT(x)    (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO3              0x7460
+#       define HDMI0_AVI_INFO_RIGHT(x)    (((x) & 0xffff) << 0)
+#       define HDMI0_AVI_INFO_VERSION(x)  (((x) & 3) << 24)
+#define HDMI0_MPEG_INFO0             0x7464
+#       define HDMI0_MPEG_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_MPEG_INFO_MB0(x)  (((x) & 0xff) << 8)
+#       define HDMI0_MPEG_INFO_MB1(x)  (((x) & 0xff) << 16)
+#       define HDMI0_MPEG_INFO_MB2(x)  (((x) & 0xff) << 24)
+#define HDMI0_MPEG_INFO1             0x7468
+#       define HDMI0_MPEG_INFO_MB3(x)  (((x) & 0xff) << 0)
+#       define HDMI0_MPEG_INFO_MF(x)   (((x) & 3) << 8)
+#       define HDMI0_MPEG_INFO_FR(x)   (((x) & 1) << 12)
+#define HDMI0_GENERIC0_HDR           0x746c
+#define HDMI0_GENERIC0_0             0x7470
+#define HDMI0_GENERIC0_1             0x7474
+#define HDMI0_GENERIC0_2             0x7478
+#define HDMI0_GENERIC0_3             0x747c
+#define HDMI0_GENERIC0_4             0x7480
+#define HDMI0_GENERIC0_5             0x7484
+#define HDMI0_GENERIC0_6             0x7488
+#define HDMI0_GENERIC1_HDR           0x748c
+#define HDMI0_GENERIC1_0             0x7490
+#define HDMI0_GENERIC1_1             0x7494
+#define HDMI0_GENERIC1_2             0x7498
+#define HDMI0_GENERIC1_3             0x749c
+#define HDMI0_GENERIC1_4             0x74a0
+#define HDMI0_GENERIC1_5             0x74a4
+#define HDMI0_GENERIC1_6             0x74a8
+#define HDMI0_ACR_32_0               0x74ac
+#       define HDMI0_ACR_CTS_32(x)   (((x) & 0xfffff) << 12)
+#       define HDMI0_ACR_CTS_32_MASK		(0xfffff << 12)
+#define HDMI0_ACR_32_1               0x74b0
+#       define HDMI0_ACR_N_32(x)   (((x) & 0xfffff) << 0)
+#       define HDMI0_ACR_N_32_MASK		(0xfffff << 0)
+#define HDMI0_ACR_44_0               0x74b4
+#       define HDMI0_ACR_CTS_44(x)   (((x) & 0xfffff) << 12)
+#       define HDMI0_ACR_CTS_44_MASK		(0xfffff << 12)
+#define HDMI0_ACR_44_1               0x74b8
+#       define HDMI0_ACR_N_44(x)   (((x) & 0xfffff) << 0)
+#       define HDMI0_ACR_N_44_MASK		(0xfffff << 0)
+#define HDMI0_ACR_48_0               0x74bc
+#       define HDMI0_ACR_CTS_48(x)   (((x) & 0xfffff) << 12)
+#       define HDMI0_ACR_CTS_48_MASK		(0xfffff << 12)
+#define HDMI0_ACR_48_1               0x74c0
+#       define HDMI0_ACR_N_48(x)   (((x) & 0xfffff) << 0)
+#       define HDMI0_ACR_N_48_MASK		(0xfffff << 0)
+#define HDMI0_ACR_STATUS_0           0x74c4
+#define HDMI0_ACR_STATUS_1           0x74c8
+#define HDMI0_AUDIO_INFO0            0x74cc
+#       define HDMI0_AUDIO_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AUDIO_INFO_CC(x)  (((x) & 7) << 8)
+#define HDMI0_AUDIO_INFO1            0x74d0
+#       define HDMI0_AUDIO_INFO_CA(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AUDIO_INFO_LSV(x)  (((x) & 0xf) << 11)
+#       define HDMI0_AUDIO_INFO_DM_INH(x)  (((x) & 1) << 15)
+#       define HDMI0_AUDIO_INFO_DM_INH_LSV(x)  (((x) & 0xff) << 8)
+#define HDMI0_60958_0                0x74d4
+#       define HDMI0_60958_CS_A(x)   (((x) & 1) << 0)
+#       define HDMI0_60958_CS_B(x)   (((x) & 1) << 1)
+#       define HDMI0_60958_CS_C(x)   (((x) & 1) << 2)
+#       define HDMI0_60958_CS_D(x)   (((x) & 3) << 3)
+#       define HDMI0_60958_CS_MODE(x)   (((x) & 3) << 6)
+#       define HDMI0_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define HDMI0_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK	(0xf << 20)
+#       define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define HDMI0_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#       define HDMI0_60958_CS_CLOCK_ACCURACY_MASK	(3 << 28)
+#define HDMI0_60958_1                0x74d8
+#       define HDMI0_60958_CS_WORD_LENGTH(x)        (((x) & 0xf) << 0)
+#       define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define HDMI0_60958_CS_VALID_L(x)   (((x) & 1) << 16)
+#       define HDMI0_60958_CS_VALID_R(x)   (((x) & 1) << 18)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK	(0xf << 20)
+#define HDMI0_ACR_PACKET_CONTROL     0x74dc
+#       define HDMI0_ACR_SEND        (1 << 0)
+#       define HDMI0_ACR_CONT        (1 << 1)
+#       define HDMI0_ACR_SELECT(x)   (((x) & 3) << 4)
+#       define HDMI0_ACR_HW          0
+#       define HDMI0_ACR_32          1
+#       define HDMI0_ACR_44          2
+#       define HDMI0_ACR_48          3
+#       define HDMI0_ACR_SOURCE      (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI0_ACR_AUTO_SEND   (1 << 12)
+#define DCE3_HDMI0_AUDIO_CRC_CONTROL	0x74dc
+#define HDMI0_RAMP_CONTROL0          0x74e0
+#       define HDMI0_RAMP_MAX_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL1          0x74e4
+#       define HDMI0_RAMP_MIN_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL2          0x74e8
+#       define HDMI0_RAMP_INC_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL3          0x74ec
+#       define HDMI0_RAMP_DEC_COUNT(x)   (((x) & 0xffffff) << 0)
+/* HDMI0_60958_2 is r7xx only */
+#define HDMI0_60958_2                0x74f0
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+/* r6xx only; second instance starts at 0x7700 */
+#define HDMI1_CONTROL                0x7700
+#define HDMI1_STATUS                 0x7704
+#define HDMI1_AUDIO_PACKET_CONTROL   0x7708
+/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
+#define DCE3_HDMI1_CONTROL                0x7800
+#define DCE3_HDMI1_STATUS                 0x7804
+#define DCE3_HDMI1_AUDIO_PACKET_CONTROL   0x7808
+/* DCE3.2 (for interrupts) */
+#define AFMT_STATUS                          0x7600
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x7604
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+
+/* DCE3 FMT blocks */
+#define FMT_CONTROL                          0x6700
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6710
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_DEPTH            (1 << 4)
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x672c
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
+/* Power management */
+#define CG_SPLL_FUNC_CNTL                                 0x600
+#       define SPLL_RESET                                (1 << 0)
+#       define SPLL_SLEEP                                (1 << 1)
+#       define SPLL_REF_DIV(x)                           ((x) << 2)
+#       define SPLL_REF_DIV_MASK                         (7 << 2)
+#       define SPLL_FB_DIV(x)                            ((x) << 5)
+#       define SPLL_FB_DIV_MASK                          (0xff << 5)
+#       define SPLL_PULSEEN                              (1 << 13)
+#       define SPLL_PULSENUM(x)                          ((x) << 14)
+#       define SPLL_PULSENUM_MASK                        (3 << 14)
+#       define SPLL_SW_HILEN(x)                          ((x) << 16)
+#       define SPLL_SW_HILEN_MASK                        (0xf << 16)
+#       define SPLL_SW_LOLEN(x)                          ((x) << 20)
+#       define SPLL_SW_LOLEN_MASK                        (0xf << 20)
+#       define SPLL_DIVEN                                (1 << 24)
+#       define SPLL_BYPASS_EN                            (1 << 25)
+#       define SPLL_CHG_STATUS                           (1 << 29)
+#       define SPLL_CTLREQ                               (1 << 30)
+#       define SPLL_CTLACK                               (1 << 31)
+
+#define GENERAL_PWRMGT                                    0x618
+#       define GLOBAL_PWRMGT_EN                           (1 << 0)
+#       define STATIC_PM_EN                               (1 << 1)
+#       define MOBILE_SU                                  (1 << 2)
+#       define THERMAL_PROTECTION_DIS                     (1 << 3)
+#       define THERMAL_PROTECTION_TYPE                    (1 << 4)
+#       define ENABLE_GEN2PCIE                            (1 << 5)
+#       define SW_GPIO_INDEX(x)                           ((x) << 6)
+#       define SW_GPIO_INDEX_MASK                         (3 << 6)
+#       define LOW_VOLT_D2_ACPI                           (1 << 8)
+#       define LOW_VOLT_D3_ACPI                           (1 << 9)
+#       define VOLT_PWRMGT_EN                             (1 << 10)
+#define CG_TPC                                            0x61c
+#       define TPCC(x)                                    ((x) << 0)
+#       define TPCC_MASK                                  (0x7fffff << 0)
+#       define TPU(x)                                     ((x) << 23)
+#       define TPU_MASK                                   (0x1f << 23)
+#define SCLK_PWRMGT_CNTL                                  0x620
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_TURNOFF                               (1 << 1)
+#       define SPLL_TURNOFF                               (1 << 2)
+#       define SU_SCLK_USE_BCLK                           (1 << 3)
+#       define DYNAMIC_GFX_ISLAND_PWR_DOWN                (1 << 4)
+#       define DYNAMIC_GFX_ISLAND_PWR_LP                  (1 << 5)
+#       define CLK_TURN_ON_STAGGER                        (1 << 6)
+#       define CLK_TURN_OFF_STAGGER                       (1 << 7)
+#       define FIR_FORCE_TREND_SEL                        (1 << 8)
+#       define FIR_TREND_MODE                             (1 << 9)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 10)
+#       define VDDC3D_TURNOFF_D1                          (1 << 11)
+#       define VDDC3D_TURNOFF_D2                          (1 << 12)
+#       define VDDC3D_TURNOFF_D3                          (1 << 13)
+#       define SPLL_TURNOFF_D2                            (1 << 14)
+#       define SCLK_LOW_D1                                (1 << 15)
+#       define DYN_GFX_CLK_OFF_MC_EN                      (1 << 16)
+#define MCLK_PWRMGT_CNTL                                  0x624
+#       define MPLL_PWRMGT_OFF                            (1 << 0)
+#       define YCLK_TURNOFF                               (1 << 1)
+#       define MPLL_TURNOFF                               (1 << 2)
+#       define SU_MCLK_USE_BCLK                           (1 << 3)
+#       define DLL_READY                                  (1 << 4)
+#       define MC_BUSY                                    (1 << 5)
+#       define MC_INT_CNTL                                (1 << 7)
+#       define MRDCKA_SLEEP                               (1 << 8)
+#       define MRDCKB_SLEEP                               (1 << 9)
+#       define MRDCKC_SLEEP                               (1 << 10)
+#       define MRDCKD_SLEEP                               (1 << 11)
+#       define MRDCKE_SLEEP                               (1 << 12)
+#       define MRDCKF_SLEEP                               (1 << 13)
+#       define MRDCKG_SLEEP                               (1 << 14)
+#       define MRDCKH_SLEEP                               (1 << 15)
+#       define MRDCKA_RESET                               (1 << 16)
+#       define MRDCKB_RESET                               (1 << 17)
+#       define MRDCKC_RESET                               (1 << 18)
+#       define MRDCKD_RESET                               (1 << 19)
+#       define MRDCKE_RESET                               (1 << 20)
+#       define MRDCKF_RESET                               (1 << 21)
+#       define MRDCKG_RESET                               (1 << 22)
+#       define MRDCKH_RESET                               (1 << 23)
+#       define DLL_READY_READ                             (1 << 24)
+#       define USE_DISPLAY_GAP                            (1 << 25)
+#       define USE_DISPLAY_URGENT_NORMAL                  (1 << 26)
+#       define USE_DISPLAY_GAP_CTXSW                      (1 << 27)
+#       define MPLL_TURNOFF_D2                            (1 << 28)
+#       define USE_DISPLAY_URGENT_CTXSW                   (1 << 29)
+
+#define MPLL_TIME                                         0x634
+#       define MPLL_LOCK_TIME(x)                          ((x) << 0)
+#       define MPLL_LOCK_TIME_MASK                        (0xffff << 0)
+#       define MPLL_RESET_TIME(x)                         ((x) << 16)
+#       define MPLL_RESET_TIME_MASK                       (0xffff << 16)
+
+#define SCLK_FREQ_SETTING_STEP_0_PART1                    0x648
+#       define STEP_0_SPLL_POST_DIV(x)                    ((x) << 0)
+#       define STEP_0_SPLL_POST_DIV_MASK                  (0xff << 0)
+#       define STEP_0_SPLL_FB_DIV(x)                      ((x) << 8)
+#       define STEP_0_SPLL_FB_DIV_MASK                    (0xff << 8)
+#       define STEP_0_SPLL_REF_DIV(x)                     ((x) << 16)
+#       define STEP_0_SPLL_REF_DIV_MASK                   (7 << 16)
+#       define STEP_0_SPLL_STEP_TIME(x)                   ((x) << 19)
+#       define STEP_0_SPLL_STEP_TIME_MASK                 (0x1fff << 19)
+#define SCLK_FREQ_SETTING_STEP_0_PART2                    0x64c
+#       define STEP_0_PULSE_HIGH_CNT(x)                   ((x) << 0)
+#       define STEP_0_PULSE_HIGH_CNT_MASK                 (0x1ff << 0)
+#       define STEP_0_POST_DIV_EN                         (1 << 9)
+#       define STEP_0_SPLL_STEP_ENABLE                    (1 << 30)
+#       define STEP_0_SPLL_ENTRY_VALID                    (1 << 31)
+
+#define VID_RT                                            0x6f8
+#       define VID_CRT(x)                                 ((x) << 0)
+#       define VID_CRT_MASK                               (0x1fff << 0)
+#       define VID_CRTU(x)                                ((x) << 13)
+#       define VID_CRTU_MASK                              (7 << 13)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (7 << 16)
+#define CTXSW_PROFILE_INDEX                               0x6fc
+#       define CTXSW_FREQ_VIDS_CFG_INDEX(x)               ((x) << 0)
+#       define CTXSW_FREQ_VIDS_CFG_INDEX_MASK             (3 << 0)
+#       define CTXSW_FREQ_VIDS_CFG_INDEX_SHIFT            0
+#       define CTXSW_FREQ_MCLK_CFG_INDEX(x)               ((x) << 2)
+#       define CTXSW_FREQ_MCLK_CFG_INDEX_MASK             (3 << 2)
+#       define CTXSW_FREQ_MCLK_CFG_INDEX_SHIFT            2
+#       define CTXSW_FREQ_SCLK_CFG_INDEX(x)               ((x) << 4)
+#       define CTXSW_FREQ_SCLK_CFG_INDEX_MASK             (0x1f << 4)
+#       define CTXSW_FREQ_SCLK_CFG_INDEX_SHIFT            4
+#       define CTXSW_FREQ_STATE_SPLL_RESET_EN             (1 << 9)
+#       define CTXSW_FREQ_STATE_ENABLE                    (1 << 10)
+#       define CTXSW_FREQ_DISPLAY_WATERMARK               (1 << 11)
+#       define CTXSW_FREQ_GEN2PCIE_VOLT                   (1 << 12)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x70c
+#       define TARGET_PROFILE_INDEX_MASK                  (3 << 0)
+#       define TARGET_PROFILE_INDEX_SHIFT                 0
+#       define CURRENT_PROFILE_INDEX_MASK                 (3 << 2)
+#       define CURRENT_PROFILE_INDEX_SHIFT                2
+#       define DYN_PWR_ENTER_INDEX(x)                     ((x) << 4)
+#       define DYN_PWR_ENTER_INDEX_MASK                   (3 << 4)
+#       define DYN_PWR_ENTER_INDEX_SHIFT                  4
+#       define CURR_MCLK_INDEX_MASK                       (3 << 6)
+#       define CURR_MCLK_INDEX_SHIFT                      6
+#       define CURR_SCLK_INDEX_MASK                       (0x1f << 8)
+#       define CURR_SCLK_INDEX_SHIFT                      8
+#       define CURR_VID_INDEX_MASK                        (3 << 13)
+#       define CURR_VID_INDEX_SHIFT                       13
+
+#define LOWER_GPIO_ENABLE                                 0x710
+#define UPPER_GPIO_ENABLE                                 0x714
+#define CTXSW_VID_LOWER_GPIO_CNTL                         0x718
+
+#define VID_UPPER_GPIO_CNTL                               0x740
+#define CG_CTX_CGTT3D_R                                   0x744
+#       define PHC(x)                                     ((x) << 0)
+#       define PHC_MASK                                   (0x1ff << 0)
+#       define SDC(x)                                     ((x) << 9)
+#       define SDC_MASK                                   (0x3fff << 9)
+#define CG_VDDC3D_OOR                                     0x748
+#       define SU(x)                                      ((x) << 23)
+#       define SU_MASK                                    (0xf << 23)
+#define CG_FTV                                            0x74c
+#define CG_FFCT_0                                         0x750
+#       define UTC_0(x)                                   ((x) << 0)
+#       define UTC_0_MASK                                 (0x3ff << 0)
+#       define DTC_0(x)                                   ((x) << 10)
+#       define DTC_0_MASK                                 (0x3ff << 10)
+
+#define CG_BSP                                            0x78c
+#       define BSP(x)                                     ((x) << 0)
+#       define BSP_MASK                                   (0xffff << 0)
+#       define BSU(x)                                     ((x) << 16)
+#       define BSU_MASK                                   (0xf << 16)
+#define CG_RT                                             0x790
+#       define FLS(x)                                     ((x) << 0)
+#       define FLS_MASK                                   (0xffff << 0)
+#       define FMS(x)                                     ((x) << 16)
+#       define FMS_MASK                                   (0xffff << 16)
+#define CG_LT                                             0x794
+#       define FHS(x)                                     ((x) << 0)
+#       define FHS_MASK                                   (0xffff << 0)
+#define CG_GIT                                            0x798
+#       define CG_GICST(x)                                ((x) << 0)
+#       define CG_GICST_MASK                              (0xffff << 0)
+#       define CG_GIPOT(x)                                ((x) << 16)
+#       define CG_GIPOT_MASK                              (0xffff << 16)
+
+#define CG_SSP                                            0x7a8
+#       define CG_SST(x)                                  ((x) << 0)
+#       define CG_SST_MASK                                (0xffff << 0)
+#       define CG_SSTU(x)                                 ((x) << 16)
+#       define CG_SSTU_MASK                               (0xf << 16)
+
+#define CG_RLC_REQ_AND_RSP                                0x7c4
+#       define RLC_CG_REQ_TYPE_MASK                       0xf
+#       define RLC_CG_REQ_TYPE_SHIFT                      0
+#       define CG_RLC_RSP_TYPE_MASK                       0xf0
+#       define CG_RLC_RSP_TYPE_SHIFT                      4
+
+#define CG_FC_T                                           0x7cc
+#       define FC_T(x)                                    ((x) << 0)
+#       define FC_T_MASK                                  (0xffff << 0)
+#       define FC_TU(x)                                   ((x) << 16)
+#       define FC_TU_MASK                                 (0x1f << 16)
+
+#define GPIOPAD_MASK                                      0x1798
+#define GPIOPAD_A                                         0x179c
+#define GPIOPAD_EN                                        0x17a0
+
+#define GRBM_PWR_CNTL                                     0x800c
+#       define REQ_TYPE_MASK                              0xf
+#       define REQ_TYPE_SHIFT                             0
+#       define RSP_TYPE_MASK                              0xf0
+#       define RSP_TYPE_SHIFT                             4
+
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW				0xef00
+#define UVD_SEMA_ADDR_HIGH				0xef04
+#define UVD_SEMA_CMD					0xef08
+
+#define UVD_GPCOM_VCPU_CMD				0xef0c
+#define UVD_GPCOM_VCPU_DATA0				0xef10
+#define UVD_GPCOM_VCPU_DATA1				0xef14
+#define UVD_ENGINE_CNTL					0xef18
+#define UVD_NO_OP					0xeffc
+
+#define UVD_SEMA_CNTL					0xf400
+#define UVD_RB_ARB_CTRL					0xf480
+
+#define UVD_LMI_EXT40_ADDR				0xf498
+#define UVD_CGC_GATE					0xf4a8
+#define UVD_LMI_CTRL2					0xf4f4
+#define UVD_MASTINT_EN					0xf500
+#define UVD_FW_START					0xf51C
+#define UVD_LMI_ADDR_EXT				0xf594
+#define UVD_LMI_CTRL					0xf598
+#define UVD_LMI_SWAP_CNTL				0xf5b4
+#define UVD_MP_SWAP_CNTL				0xf5bC
+#define UVD_MPC_CNTL					0xf5dC
+#define UVD_MPC_SET_MUXA0				0xf5e4
+#define UVD_MPC_SET_MUXA1				0xf5e8
+#define UVD_MPC_SET_MUXB0				0xf5eC
+#define UVD_MPC_SET_MUXB1				0xf5f0
+#define UVD_MPC_SET_MUX					0xf5f4
+#define UVD_MPC_SET_ALU					0xf5f8
+
+#define UVD_VCPU_CACHE_OFFSET0				0xf608
+#define UVD_VCPU_CACHE_SIZE0				0xf60c
+#define UVD_VCPU_CACHE_OFFSET1				0xf610
+#define UVD_VCPU_CACHE_SIZE1				0xf614
+#define UVD_VCPU_CACHE_OFFSET2				0xf618
+#define UVD_VCPU_CACHE_SIZE2				0xf61c
+
+#define UVD_VCPU_CNTL					0xf660
+#define UVD_SOFT_RESET					0xf680
+#define		RBC_SOFT_RESET					(1<<0)
+#define		LBSI_SOFT_RESET					(1<<1)
+#define		LMI_SOFT_RESET					(1<<2)
+#define		VCPU_SOFT_RESET					(1<<3)
+#define		CSM_SOFT_RESET					(1<<5)
+#define		CXW_SOFT_RESET					(1<<6)
+#define		TAP_SOFT_RESET					(1<<7)
+#define		LMI_UMC_SOFT_RESET				(1<<13)
+#define UVD_RBC_IB_BASE					0xf684
+#define UVD_RBC_IB_SIZE					0xf688
+#define UVD_RBC_RB_BASE					0xf68c
+#define UVD_RBC_RB_RPTR					0xf690
+#define UVD_RBC_RB_WPTR					0xf694
+#define UVD_RBC_RB_WPTR_CNTL				0xf698
+
+#define UVD_STATUS					0xf6bc
+
+#define UVD_SEMA_TIMEOUT_STATUS				0xf6c0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL		0xf6c4
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL		0xf6c8
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL		0xf6cc
+
+#define UVD_RBC_RB_CNTL					0xf6a4
+#define UVD_RBC_RB_RPTR_ADDR				0xf6a8
+
+#define UVD_CONTEXT_ID					0xf6f4
+
+/* rs780 only */
+#define	GFX_MACRO_BYPASS_CNTL				0x30c0
+#define		SPLL_BYPASS_CNTL			(1 << 0)
+#define		UPLL_BYPASS_CNTL			(1 << 1)
+
+#define CG_UPLL_FUNC_CNTL				0x7e0
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_FB_DIV(x)				((x) << 4)
+#	define UPLL_FB_DIV_MASK				0x0000FFF0
+#	define UPLL_REF_DIV(x)				((x) << 16)
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_REFCLK_SRC_SEL_MASK			0x20000000
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define CG_UPLL_FUNC_CNTL_2				0x7e4
+#	define UPLL_SW_HILEN(x)				((x) << 0)
+#	define UPLL_SW_LOLEN(x)				((x) << 4)
+#	define UPLL_SW_HILEN2(x)			((x) << 8)
+#	define UPLL_SW_LOLEN2(x)			((x) << 12)
+#	define UPLL_DIVEN_MASK				0x00010000
+#	define UPLL_DIVEN2_MASK				0x00020000
+#	define UPLL_SW_MASK				0x0003FFFF
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_START_3D_CMDBUF				0x24
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_IMMD_BE			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_INDIRECT_BUFFER_MP			0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#              define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
+#              define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define	PACKET3_PFP_SYNC_ME				0x42 /* r7xx+ only */
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20) /* r7xx+ only */
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_VC_ACTION_ENA        (1 << 24)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SMX_ACTION_ENA       (1 << 28)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - TS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_OFFSET			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_OFFSET			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+#define		PACKET3_SET_ALU_CONST_OFFSET			0x00030000
+#define		PACKET3_SET_ALU_CONST_END			0x00032000
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_OFFSET			0x0003e380
+#define		PACKET3_SET_BOOL_CONST_END			0x00040000
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_OFFSET			0x0003e200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003e380
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_OFFSET			0x00038000
+#define		PACKET3_SET_RESOURCE_END			0x0003c000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_OFFSET			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003cff0
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_OFFSET			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003e200
+#define	PACKET3_STRMOUT_BASE_UPDATE			0x72 /* r7xx */
+#define	PACKET3_SURFACE_BASE_UPDATE			0x73
+
+#define R_000011_K8_FB_LOCATION                 0x11
+#define R_000012_MC_MISC_UMA_CNTL               0x12
+#define   G_000012_K8_ADDR_EXT(x)               (((x) >> 0) & 0xFF)
+#define R_0028F8_MC_INDEX			0x28F8
+#define   	S_0028F8_MC_IND_ADDR(x)                 (((x) & 0x1FF) << 0)
+#define   	C_0028F8_MC_IND_ADDR                    0xFFFFFE00
+#define   	S_0028F8_MC_IND_WR_EN(x)                (((x) & 0x1) << 9)
+#define R_0028FC_MC_DATA                        0x28FC
+
+#define	R_008020_GRBM_SOFT_RESET		0x8020
+#define		S_008020_SOFT_RESET_CP(x)		(((x) & 1) << 0)
+#define		S_008020_SOFT_RESET_CB(x)		(((x) & 1) << 1)
+#define		S_008020_SOFT_RESET_CR(x)		(((x) & 1) << 2)
+#define		S_008020_SOFT_RESET_DB(x)		(((x) & 1) << 3)
+#define		S_008020_SOFT_RESET_PA(x)		(((x) & 1) << 5)
+#define		S_008020_SOFT_RESET_SC(x)		(((x) & 1) << 6)
+#define		S_008020_SOFT_RESET_SMX(x)		(((x) & 1) << 7)
+#define		S_008020_SOFT_RESET_SPI(x)		(((x) & 1) << 8)
+#define		S_008020_SOFT_RESET_SH(x)		(((x) & 1) << 9)
+#define		S_008020_SOFT_RESET_SX(x)		(((x) & 1) << 10)
+#define		S_008020_SOFT_RESET_TC(x)		(((x) & 1) << 11)
+#define		S_008020_SOFT_RESET_TA(x)		(((x) & 1) << 12)
+#define		S_008020_SOFT_RESET_VC(x)		(((x) & 1) << 13)
+#define		S_008020_SOFT_RESET_VGT(x)		(((x) & 1) << 14)
+#define	R_008010_GRBM_STATUS			0x8010
+#define		S_008010_CMDFIFO_AVAIL(x)		(((x) & 0x1F) << 0)
+#define		S_008010_CP_RQ_PENDING(x)		(((x) & 1) << 6)
+#define		S_008010_CF_RQ_PENDING(x)		(((x) & 1) << 7)
+#define		S_008010_PF_RQ_PENDING(x)		(((x) & 1) << 8)
+#define		S_008010_GRBM_EE_BUSY(x)		(((x) & 1) << 10)
+#define		S_008010_VC_BUSY(x)			(((x) & 1) << 11)
+#define		S_008010_DB03_CLEAN(x)			(((x) & 1) << 12)
+#define		S_008010_CB03_CLEAN(x)			(((x) & 1) << 13)
+#define		S_008010_VGT_BUSY_NO_DMA(x)		(((x) & 1) << 16)
+#define		S_008010_VGT_BUSY(x)			(((x) & 1) << 17)
+#define		S_008010_TA03_BUSY(x)			(((x) & 1) << 18)
+#define		S_008010_TC_BUSY(x)			(((x) & 1) << 19)
+#define		S_008010_SX_BUSY(x)			(((x) & 1) << 20)
+#define		S_008010_SH_BUSY(x)			(((x) & 1) << 21)
+#define		S_008010_SPI03_BUSY(x)			(((x) & 1) << 22)
+#define		S_008010_SMX_BUSY(x)			(((x) & 1) << 23)
+#define		S_008010_SC_BUSY(x)			(((x) & 1) << 24)
+#define		S_008010_PA_BUSY(x)			(((x) & 1) << 25)
+#define		S_008010_DB03_BUSY(x)			(((x) & 1) << 26)
+#define		S_008010_CR_BUSY(x)			(((x) & 1) << 27)
+#define		S_008010_CP_COHERENCY_BUSY(x)		(((x) & 1) << 28)
+#define		S_008010_CP_BUSY(x)			(((x) & 1) << 29)
+#define		S_008010_CB03_BUSY(x)			(((x) & 1) << 30)
+#define		S_008010_GUI_ACTIVE(x)			(((x) & 1) << 31)
+#define		G_008010_CMDFIFO_AVAIL(x)		(((x) >> 0) & 0x1F)
+#define		G_008010_CP_RQ_PENDING(x)		(((x) >> 6) & 1)
+#define		G_008010_CF_RQ_PENDING(x)		(((x) >> 7) & 1)
+#define		G_008010_PF_RQ_PENDING(x)		(((x) >> 8) & 1)
+#define		G_008010_GRBM_EE_BUSY(x)		(((x) >> 10) & 1)
+#define		G_008010_VC_BUSY(x)			(((x) >> 11) & 1)
+#define		G_008010_DB03_CLEAN(x)			(((x) >> 12) & 1)
+#define		G_008010_CB03_CLEAN(x)			(((x) >> 13) & 1)
+#define		G_008010_TA_BUSY(x)			(((x) >> 14) & 1)
+#define		G_008010_VGT_BUSY_NO_DMA(x)		(((x) >> 16) & 1)
+#define		G_008010_VGT_BUSY(x)			(((x) >> 17) & 1)
+#define		G_008010_TA03_BUSY(x)			(((x) >> 18) & 1)
+#define		G_008010_TC_BUSY(x)			(((x) >> 19) & 1)
+#define		G_008010_SX_BUSY(x)			(((x) >> 20) & 1)
+#define		G_008010_SH_BUSY(x)			(((x) >> 21) & 1)
+#define		G_008010_SPI03_BUSY(x)			(((x) >> 22) & 1)
+#define		G_008010_SMX_BUSY(x)			(((x) >> 23) & 1)
+#define		G_008010_SC_BUSY(x)			(((x) >> 24) & 1)
+#define		G_008010_PA_BUSY(x)			(((x) >> 25) & 1)
+#define		G_008010_DB03_BUSY(x)			(((x) >> 26) & 1)
+#define		G_008010_CR_BUSY(x)			(((x) >> 27) & 1)
+#define		G_008010_CP_COHERENCY_BUSY(x)		(((x) >> 28) & 1)
+#define		G_008010_CP_BUSY(x)			(((x) >> 29) & 1)
+#define		G_008010_CB03_BUSY(x)			(((x) >> 30) & 1)
+#define		G_008010_GUI_ACTIVE(x)			(((x) >> 31) & 1)
+#define	R_008014_GRBM_STATUS2			0x8014
+#define		S_008014_CR_CLEAN(x)			(((x) & 1) << 0)
+#define		S_008014_SMX_CLEAN(x)			(((x) & 1) << 1)
+#define		S_008014_SPI0_BUSY(x)			(((x) & 1) << 8)
+#define		S_008014_SPI1_BUSY(x)			(((x) & 1) << 9)
+#define		S_008014_SPI2_BUSY(x)			(((x) & 1) << 10)
+#define		S_008014_SPI3_BUSY(x)			(((x) & 1) << 11)
+#define		S_008014_TA0_BUSY(x)			(((x) & 1) << 12)
+#define		S_008014_TA1_BUSY(x)			(((x) & 1) << 13)
+#define		S_008014_TA2_BUSY(x)			(((x) & 1) << 14)
+#define		S_008014_TA3_BUSY(x)			(((x) & 1) << 15)
+#define		S_008014_DB0_BUSY(x)			(((x) & 1) << 16)
+#define		S_008014_DB1_BUSY(x)			(((x) & 1) << 17)
+#define		S_008014_DB2_BUSY(x)			(((x) & 1) << 18)
+#define		S_008014_DB3_BUSY(x)			(((x) & 1) << 19)
+#define		S_008014_CB0_BUSY(x)			(((x) & 1) << 20)
+#define		S_008014_CB1_BUSY(x)			(((x) & 1) << 21)
+#define		S_008014_CB2_BUSY(x)			(((x) & 1) << 22)
+#define		S_008014_CB3_BUSY(x)			(((x) & 1) << 23)
+#define		G_008014_CR_CLEAN(x)			(((x) >> 0) & 1)
+#define		G_008014_SMX_CLEAN(x)			(((x) >> 1) & 1)
+#define		G_008014_SPI0_BUSY(x)			(((x) >> 8) & 1)
+#define		G_008014_SPI1_BUSY(x)			(((x) >> 9) & 1)
+#define		G_008014_SPI2_BUSY(x)			(((x) >> 10) & 1)
+#define		G_008014_SPI3_BUSY(x)			(((x) >> 11) & 1)
+#define		G_008014_TA0_BUSY(x)			(((x) >> 12) & 1)
+#define		G_008014_TA1_BUSY(x)			(((x) >> 13) & 1)
+#define		G_008014_TA2_BUSY(x)			(((x) >> 14) & 1)
+#define		G_008014_TA3_BUSY(x)			(((x) >> 15) & 1)
+#define		G_008014_DB0_BUSY(x)			(((x) >> 16) & 1)
+#define		G_008014_DB1_BUSY(x)			(((x) >> 17) & 1)
+#define		G_008014_DB2_BUSY(x)			(((x) >> 18) & 1)
+#define		G_008014_DB3_BUSY(x)			(((x) >> 19) & 1)
+#define		G_008014_CB0_BUSY(x)			(((x) >> 20) & 1)
+#define		G_008014_CB1_BUSY(x)			(((x) >> 21) & 1)
+#define		G_008014_CB2_BUSY(x)			(((x) >> 22) & 1)
+#define		G_008014_CB3_BUSY(x)			(((x) >> 23) & 1)
+#define	R_000E50_SRBM_STATUS				0x0E50
+#define		G_000E50_RLC_RQ_PENDING(x)		(((x) >> 3) & 1)
+#define		G_000E50_RCU_RQ_PENDING(x)		(((x) >> 4) & 1)
+#define		G_000E50_GRBM_RQ_PENDING(x)		(((x) >> 5) & 1)
+#define		G_000E50_HI_RQ_PENDING(x)		(((x) >> 6) & 1)
+#define		G_000E50_IO_EXTERN_SIGNAL(x)		(((x) >> 7) & 1)
+#define		G_000E50_VMC_BUSY(x)			(((x) >> 8) & 1)
+#define		G_000E50_MCB_BUSY(x)			(((x) >> 9) & 1)
+#define		G_000E50_MCDZ_BUSY(x)			(((x) >> 10) & 1)
+#define		G_000E50_MCDY_BUSY(x)			(((x) >> 11) & 1)
+#define		G_000E50_MCDX_BUSY(x)			(((x) >> 12) & 1)
+#define		G_000E50_MCDW_BUSY(x)			(((x) >> 13) & 1)
+#define		G_000E50_SEM_BUSY(x)			(((x) >> 14) & 1)
+#define		G_000E50_RLC_BUSY(x)			(((x) >> 15) & 1)
+#define		G_000E50_IH_BUSY(x)			(((x) >> 17) & 1)
+#define		G_000E50_BIF_BUSY(x)			(((x) >> 29) & 1)
+#define	R_000E60_SRBM_SOFT_RESET			0x0E60
+#define		S_000E60_SOFT_RESET_BIF(x)		(((x) & 1) << 1)
+#define		S_000E60_SOFT_RESET_CG(x)		(((x) & 1) << 2)
+#define		S_000E60_SOFT_RESET_CMC(x)		(((x) & 1) << 3)
+#define		S_000E60_SOFT_RESET_CSC(x)		(((x) & 1) << 4)
+#define		S_000E60_SOFT_RESET_DC(x)		(((x) & 1) << 5)
+#define		S_000E60_SOFT_RESET_GRBM(x)		(((x) & 1) << 8)
+#define		S_000E60_SOFT_RESET_HDP(x)		(((x) & 1) << 9)
+#define		S_000E60_SOFT_RESET_IH(x)		(((x) & 1) << 10)
+#define		S_000E60_SOFT_RESET_MC(x)		(((x) & 1) << 11)
+#define		S_000E60_SOFT_RESET_RLC(x)		(((x) & 1) << 13)
+#define		S_000E60_SOFT_RESET_ROM(x)		(((x) & 1) << 14)
+#define		S_000E60_SOFT_RESET_SEM(x)		(((x) & 1) << 15)
+#define		S_000E60_SOFT_RESET_TSC(x)		(((x) & 1) << 16)
+#define		S_000E60_SOFT_RESET_VMC(x)		(((x) & 1) << 17)
+
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL		0x5480
+
+#define R_028C04_PA_SC_AA_CONFIG                     0x028C04
+#define   S_028C04_MSAA_NUM_SAMPLES(x)                 (((x) & 0x3) << 0)
+#define   G_028C04_MSAA_NUM_SAMPLES(x)                 (((x) >> 0) & 0x3)
+#define   C_028C04_MSAA_NUM_SAMPLES                    0xFFFFFFFC
+#define   S_028C04_AA_MASK_CENTROID_DTMN(x)            (((x) & 0x1) << 4)
+#define   G_028C04_AA_MASK_CENTROID_DTMN(x)            (((x) >> 4) & 0x1)
+#define   C_028C04_AA_MASK_CENTROID_DTMN               0xFFFFFFEF
+#define   S_028C04_MAX_SAMPLE_DIST(x)                  (((x) & 0xF) << 13)
+#define   G_028C04_MAX_SAMPLE_DIST(x)                  (((x) >> 13) & 0xF)
+#define   C_028C04_MAX_SAMPLE_DIST                     0xFFFE1FFF
+#define R_0280E0_CB_COLOR0_FRAG                      0x0280E0
+#define   S_0280E0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
+#define   G_0280E0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0280E0_BASE_256B                           0x00000000
+#define R_0280E4_CB_COLOR1_FRAG                      0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG                      0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG                      0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG                      0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG                      0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG                      0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG                      0x0280FC
+#define R_0280C0_CB_COLOR0_TILE                      0x0280C0
+#define   S_0280C0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
+#define   G_0280C0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0280C0_BASE_256B                           0x00000000
+#define R_0280C4_CB_COLOR1_TILE                      0x0280C4
+#define R_0280C8_CB_COLOR2_TILE                      0x0280C8
+#define R_0280CC_CB_COLOR3_TILE                      0x0280CC
+#define R_0280D0_CB_COLOR4_TILE                      0x0280D0
+#define R_0280D4_CB_COLOR5_TILE                      0x0280D4
+#define R_0280D8_CB_COLOR6_TILE                      0x0280D8
+#define R_0280DC_CB_COLOR7_TILE                      0x0280DC
+#define R_0280A0_CB_COLOR0_INFO                      0x0280A0
+#define   S_0280A0_ENDIAN(x)                           (((x) & 0x3) << 0)
+#define   G_0280A0_ENDIAN(x)                           (((x) >> 0) & 0x3)
+#define   C_0280A0_ENDIAN                              0xFFFFFFFC
+#define   S_0280A0_FORMAT(x)                           (((x) & 0x3F) << 2)
+#define   G_0280A0_FORMAT(x)                           (((x) >> 2) & 0x3F)
+#define   C_0280A0_FORMAT                              0xFFFFFF03
+#define     V_0280A0_COLOR_INVALID                     0x00000000
+#define     V_0280A0_COLOR_8                           0x00000001
+#define     V_0280A0_COLOR_4_4                         0x00000002
+#define     V_0280A0_COLOR_3_3_2                       0x00000003
+#define     V_0280A0_COLOR_16                          0x00000005
+#define     V_0280A0_COLOR_16_FLOAT                    0x00000006
+#define     V_0280A0_COLOR_8_8                         0x00000007
+#define     V_0280A0_COLOR_5_6_5                       0x00000008
+#define     V_0280A0_COLOR_6_5_5                       0x00000009
+#define     V_0280A0_COLOR_1_5_5_5                     0x0000000A
+#define     V_0280A0_COLOR_4_4_4_4                     0x0000000B
+#define     V_0280A0_COLOR_5_5_5_1                     0x0000000C
+#define     V_0280A0_COLOR_32                          0x0000000D
+#define     V_0280A0_COLOR_32_FLOAT                    0x0000000E
+#define     V_0280A0_COLOR_16_16                       0x0000000F
+#define     V_0280A0_COLOR_16_16_FLOAT                 0x00000010
+#define     V_0280A0_COLOR_8_24                        0x00000011
+#define     V_0280A0_COLOR_8_24_FLOAT                  0x00000012
+#define     V_0280A0_COLOR_24_8                        0x00000013
+#define     V_0280A0_COLOR_24_8_FLOAT                  0x00000014
+#define     V_0280A0_COLOR_10_11_11                    0x00000015
+#define     V_0280A0_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_0280A0_COLOR_11_11_10                    0x00000017
+#define     V_0280A0_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_0280A0_COLOR_2_10_10_10                  0x00000019
+#define     V_0280A0_COLOR_8_8_8_8                     0x0000001A
+#define     V_0280A0_COLOR_10_10_10_2                  0x0000001B
+#define     V_0280A0_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_0280A0_COLOR_32_32                       0x0000001D
+#define     V_0280A0_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_0280A0_COLOR_16_16_16_16                 0x0000001F
+#define     V_0280A0_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_0280A0_COLOR_32_32_32_32                 0x00000022
+#define     V_0280A0_COLOR_32_32_32_32_FLOAT           0x00000023
+#define   S_0280A0_ARRAY_MODE(x)                       (((x) & 0xF) << 8)
+#define   G_0280A0_ARRAY_MODE(x)                       (((x) >> 8) & 0xF)
+#define   C_0280A0_ARRAY_MODE                          0xFFFFF0FF
+#define     V_0280A0_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_0280A0_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_0280A0_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_0280A0_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_0280A0_NUMBER_TYPE(x)                      (((x) & 0x7) << 12)
+#define   G_0280A0_NUMBER_TYPE(x)                      (((x) >> 12) & 0x7)
+#define   C_0280A0_NUMBER_TYPE                         0xFFFF8FFF
+#define   S_0280A0_READ_SIZE(x)                        (((x) & 0x1) << 15)
+#define   G_0280A0_READ_SIZE(x)                        (((x) >> 15) & 0x1)
+#define   C_0280A0_READ_SIZE                           0xFFFF7FFF
+#define   S_0280A0_COMP_SWAP(x)                        (((x) & 0x3) << 16)
+#define   G_0280A0_COMP_SWAP(x)                        (((x) >> 16) & 0x3)
+#define   C_0280A0_COMP_SWAP                           0xFFFCFFFF
+#define   S_0280A0_TILE_MODE(x)                        (((x) & 0x3) << 18)
+#define   G_0280A0_TILE_MODE(x)                        (((x) >> 18) & 0x3)
+#define   C_0280A0_TILE_MODE                           0xFFF3FFFF
+#define     V_0280A0_TILE_DISABLE			0
+#define     V_0280A0_CLEAR_ENABLE			1
+#define     V_0280A0_FRAG_ENABLE			2
+#define   S_0280A0_BLEND_CLAMP(x)                      (((x) & 0x1) << 20)
+#define   G_0280A0_BLEND_CLAMP(x)                      (((x) >> 20) & 0x1)
+#define   C_0280A0_BLEND_CLAMP                         0xFFEFFFFF
+#define   S_0280A0_CLEAR_COLOR(x)                      (((x) & 0x1) << 21)
+#define   G_0280A0_CLEAR_COLOR(x)                      (((x) >> 21) & 0x1)
+#define   C_0280A0_CLEAR_COLOR                         0xFFDFFFFF
+#define   S_0280A0_BLEND_BYPASS(x)                     (((x) & 0x1) << 22)
+#define   G_0280A0_BLEND_BYPASS(x)                     (((x) >> 22) & 0x1)
+#define   C_0280A0_BLEND_BYPASS                        0xFFBFFFFF
+#define   S_0280A0_BLEND_FLOAT32(x)                    (((x) & 0x1) << 23)
+#define   G_0280A0_BLEND_FLOAT32(x)                    (((x) >> 23) & 0x1)
+#define   C_0280A0_BLEND_FLOAT32                       0xFF7FFFFF
+#define   S_0280A0_SIMPLE_FLOAT(x)                     (((x) & 0x1) << 24)
+#define   G_0280A0_SIMPLE_FLOAT(x)                     (((x) >> 24) & 0x1)
+#define   C_0280A0_SIMPLE_FLOAT                        0xFEFFFFFF
+#define   S_0280A0_ROUND_MODE(x)                       (((x) & 0x1) << 25)
+#define   G_0280A0_ROUND_MODE(x)                       (((x) >> 25) & 0x1)
+#define   C_0280A0_ROUND_MODE                          0xFDFFFFFF
+#define   S_0280A0_TILE_COMPACT(x)                     (((x) & 0x1) << 26)
+#define   G_0280A0_TILE_COMPACT(x)                     (((x) >> 26) & 0x1)
+#define   C_0280A0_TILE_COMPACT                        0xFBFFFFFF
+#define   S_0280A0_SOURCE_FORMAT(x)                    (((x) & 0x1) << 27)
+#define   G_0280A0_SOURCE_FORMAT(x)                    (((x) >> 27) & 0x1)
+#define   C_0280A0_SOURCE_FORMAT                       0xF7FFFFFF
+#define R_0280A4_CB_COLOR1_INFO                      0x0280A4
+#define R_0280A8_CB_COLOR2_INFO                      0x0280A8
+#define R_0280AC_CB_COLOR3_INFO                      0x0280AC
+#define R_0280B0_CB_COLOR4_INFO                      0x0280B0
+#define R_0280B4_CB_COLOR5_INFO                      0x0280B4
+#define R_0280B8_CB_COLOR6_INFO                      0x0280B8
+#define R_0280BC_CB_COLOR7_INFO                      0x0280BC
+#define R_028060_CB_COLOR0_SIZE                      0x028060
+#define   S_028060_PITCH_TILE_MAX(x)                   (((x) & 0x3FF) << 0)
+#define   G_028060_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x3FF)
+#define   C_028060_PITCH_TILE_MAX                      0xFFFFFC00
+#define   S_028060_SLICE_TILE_MAX(x)                   (((x) & 0xFFFFF) << 10)
+#define   G_028060_SLICE_TILE_MAX(x)                   (((x) >> 10) & 0xFFFFF)
+#define   C_028060_SLICE_TILE_MAX                      0xC00003FF
+#define R_028064_CB_COLOR1_SIZE                      0x028064
+#define R_028068_CB_COLOR2_SIZE                      0x028068
+#define R_02806C_CB_COLOR3_SIZE                      0x02806C
+#define R_028070_CB_COLOR4_SIZE                      0x028070
+#define R_028074_CB_COLOR5_SIZE                      0x028074
+#define R_028078_CB_COLOR6_SIZE                      0x028078
+#define R_02807C_CB_COLOR7_SIZE                      0x02807C
+#define R_028238_CB_TARGET_MASK                      0x028238
+#define   S_028238_TARGET0_ENABLE(x)                   (((x) & 0xF) << 0)
+#define   G_028238_TARGET0_ENABLE(x)                   (((x) >> 0) & 0xF)
+#define   C_028238_TARGET0_ENABLE                      0xFFFFFFF0
+#define   S_028238_TARGET1_ENABLE(x)                   (((x) & 0xF) << 4)
+#define   G_028238_TARGET1_ENABLE(x)                   (((x) >> 4) & 0xF)
+#define   C_028238_TARGET1_ENABLE                      0xFFFFFF0F
+#define   S_028238_TARGET2_ENABLE(x)                   (((x) & 0xF) << 8)
+#define   G_028238_TARGET2_ENABLE(x)                   (((x) >> 8) & 0xF)
+#define   C_028238_TARGET2_ENABLE                      0xFFFFF0FF
+#define   S_028238_TARGET3_ENABLE(x)                   (((x) & 0xF) << 12)
+#define   G_028238_TARGET3_ENABLE(x)                   (((x) >> 12) & 0xF)
+#define   C_028238_TARGET3_ENABLE                      0xFFFF0FFF
+#define   S_028238_TARGET4_ENABLE(x)                   (((x) & 0xF) << 16)
+#define   G_028238_TARGET4_ENABLE(x)                   (((x) >> 16) & 0xF)
+#define   C_028238_TARGET4_ENABLE                      0xFFF0FFFF
+#define   S_028238_TARGET5_ENABLE(x)                   (((x) & 0xF) << 20)
+#define   G_028238_TARGET5_ENABLE(x)                   (((x) >> 20) & 0xF)
+#define   C_028238_TARGET5_ENABLE                      0xFF0FFFFF
+#define   S_028238_TARGET6_ENABLE(x)                   (((x) & 0xF) << 24)
+#define   G_028238_TARGET6_ENABLE(x)                   (((x) >> 24) & 0xF)
+#define   C_028238_TARGET6_ENABLE                      0xF0FFFFFF
+#define   S_028238_TARGET7_ENABLE(x)                   (((x) & 0xF) << 28)
+#define   G_028238_TARGET7_ENABLE(x)                   (((x) >> 28) & 0xF)
+#define   C_028238_TARGET7_ENABLE                      0x0FFFFFFF
+#define R_02823C_CB_SHADER_MASK                      0x02823C
+#define   S_02823C_OUTPUT0_ENABLE(x)                   (((x) & 0xF) << 0)
+#define   G_02823C_OUTPUT0_ENABLE(x)                   (((x) >> 0) & 0xF)
+#define   C_02823C_OUTPUT0_ENABLE                      0xFFFFFFF0
+#define   S_02823C_OUTPUT1_ENABLE(x)                   (((x) & 0xF) << 4)
+#define   G_02823C_OUTPUT1_ENABLE(x)                   (((x) >> 4) & 0xF)
+#define   C_02823C_OUTPUT1_ENABLE                      0xFFFFFF0F
+#define   S_02823C_OUTPUT2_ENABLE(x)                   (((x) & 0xF) << 8)
+#define   G_02823C_OUTPUT2_ENABLE(x)                   (((x) >> 8) & 0xF)
+#define   C_02823C_OUTPUT2_ENABLE                      0xFFFFF0FF
+#define   S_02823C_OUTPUT3_ENABLE(x)                   (((x) & 0xF) << 12)
+#define   G_02823C_OUTPUT3_ENABLE(x)                   (((x) >> 12) & 0xF)
+#define   C_02823C_OUTPUT3_ENABLE                      0xFFFF0FFF
+#define   S_02823C_OUTPUT4_ENABLE(x)                   (((x) & 0xF) << 16)
+#define   G_02823C_OUTPUT4_ENABLE(x)                   (((x) >> 16) & 0xF)
+#define   C_02823C_OUTPUT4_ENABLE                      0xFFF0FFFF
+#define   S_02823C_OUTPUT5_ENABLE(x)                   (((x) & 0xF) << 20)
+#define   G_02823C_OUTPUT5_ENABLE(x)                   (((x) >> 20) & 0xF)
+#define   C_02823C_OUTPUT5_ENABLE                      0xFF0FFFFF
+#define   S_02823C_OUTPUT6_ENABLE(x)                   (((x) & 0xF) << 24)
+#define   G_02823C_OUTPUT6_ENABLE(x)                   (((x) >> 24) & 0xF)
+#define   C_02823C_OUTPUT6_ENABLE                      0xF0FFFFFF
+#define   S_02823C_OUTPUT7_ENABLE(x)                   (((x) & 0xF) << 28)
+#define   G_02823C_OUTPUT7_ENABLE(x)                   (((x) >> 28) & 0xF)
+#define   C_02823C_OUTPUT7_ENABLE                      0x0FFFFFFF
+#define R_028AB0_VGT_STRMOUT_EN                      0x028AB0
+#define   S_028AB0_STREAMOUT(x)                        (((x) & 0x1) << 0)
+#define   G_028AB0_STREAMOUT(x)                        (((x) >> 0) & 0x1)
+#define   C_028AB0_STREAMOUT                           0xFFFFFFFE
+#define R_028B20_VGT_STRMOUT_BUFFER_EN               0x028B20
+#define   S_028B20_BUFFER_0_EN(x)                      (((x) & 0x1) << 0)
+#define   G_028B20_BUFFER_0_EN(x)                      (((x) >> 0) & 0x1)
+#define   C_028B20_BUFFER_0_EN                         0xFFFFFFFE
+#define   S_028B20_BUFFER_1_EN(x)                      (((x) & 0x1) << 1)
+#define   G_028B20_BUFFER_1_EN(x)                      (((x) >> 1) & 0x1)
+#define   C_028B20_BUFFER_1_EN                         0xFFFFFFFD
+#define   S_028B20_BUFFER_2_EN(x)                      (((x) & 0x1) << 2)
+#define   G_028B20_BUFFER_2_EN(x)                      (((x) >> 2) & 0x1)
+#define   C_028B20_BUFFER_2_EN                         0xFFFFFFFB
+#define   S_028B20_BUFFER_3_EN(x)                      (((x) & 0x1) << 3)
+#define   G_028B20_BUFFER_3_EN(x)                      (((x) >> 3) & 0x1)
+#define   C_028B20_BUFFER_3_EN                         0xFFFFFFF7
+#define   S_028B20_SIZE(x)                             (((x) & 0xFFFFFFFF) << 0)
+#define   G_028B20_SIZE(x)                             (((x) >> 0) & 0xFFFFFFFF)
+#define   C_028B20_SIZE                                0x00000000
+#define R_038000_SQ_TEX_RESOURCE_WORD0_0             0x038000
+#define   S_038000_DIM(x)                              (((x) & 0x7) << 0)
+#define   G_038000_DIM(x)                              (((x) >> 0) & 0x7)
+#define   C_038000_DIM                                 0xFFFFFFF8
+#define     V_038000_SQ_TEX_DIM_1D                     0x00000000
+#define     V_038000_SQ_TEX_DIM_2D                     0x00000001
+#define     V_038000_SQ_TEX_DIM_3D                     0x00000002
+#define     V_038000_SQ_TEX_DIM_CUBEMAP                0x00000003
+#define     V_038000_SQ_TEX_DIM_1D_ARRAY               0x00000004
+#define     V_038000_SQ_TEX_DIM_2D_ARRAY               0x00000005
+#define     V_038000_SQ_TEX_DIM_2D_MSAA                0x00000006
+#define     V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA          0x00000007
+#define   S_038000_TILE_MODE(x)                        (((x) & 0xF) << 3)
+#define   G_038000_TILE_MODE(x)                        (((x) >> 3) & 0xF)
+#define   C_038000_TILE_MODE                           0xFFFFFF87
+#define     V_038000_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_038000_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_038000_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_038000_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_038000_TILE_TYPE(x)                        (((x) & 0x1) << 7)
+#define   G_038000_TILE_TYPE(x)                        (((x) >> 7) & 0x1)
+#define   C_038000_TILE_TYPE                           0xFFFFFF7F
+#define   S_038000_PITCH(x)                            (((x) & 0x7FF) << 8)
+#define   G_038000_PITCH(x)                            (((x) >> 8) & 0x7FF)
+#define   C_038000_PITCH                               0xFFF800FF
+#define   S_038000_TEX_WIDTH(x)                        (((x) & 0x1FFF) << 19)
+#define   G_038000_TEX_WIDTH(x)                        (((x) >> 19) & 0x1FFF)
+#define   C_038000_TEX_WIDTH                           0x0007FFFF
+#define R_038004_SQ_TEX_RESOURCE_WORD1_0             0x038004
+#define   S_038004_TEX_HEIGHT(x)                       (((x) & 0x1FFF) << 0)
+#define   G_038004_TEX_HEIGHT(x)                       (((x) >> 0) & 0x1FFF)
+#define   C_038004_TEX_HEIGHT                          0xFFFFE000
+#define   S_038004_TEX_DEPTH(x)                        (((x) & 0x1FFF) << 13)
+#define   G_038004_TEX_DEPTH(x)                        (((x) >> 13) & 0x1FFF)
+#define   C_038004_TEX_DEPTH                           0xFC001FFF
+#define   S_038004_DATA_FORMAT(x)                      (((x) & 0x3F) << 26)
+#define   G_038004_DATA_FORMAT(x)                      (((x) >> 26) & 0x3F)
+#define   C_038004_DATA_FORMAT                         0x03FFFFFF
+#define     V_038004_COLOR_INVALID                     0x00000000
+#define     V_038004_COLOR_8                           0x00000001
+#define     V_038004_COLOR_4_4                         0x00000002
+#define     V_038004_COLOR_3_3_2                       0x00000003
+#define     V_038004_COLOR_16                          0x00000005
+#define     V_038004_COLOR_16_FLOAT                    0x00000006
+#define     V_038004_COLOR_8_8                         0x00000007
+#define     V_038004_COLOR_5_6_5                       0x00000008
+#define     V_038004_COLOR_6_5_5                       0x00000009
+#define     V_038004_COLOR_1_5_5_5                     0x0000000A
+#define     V_038004_COLOR_4_4_4_4                     0x0000000B
+#define     V_038004_COLOR_5_5_5_1                     0x0000000C
+#define     V_038004_COLOR_32                          0x0000000D
+#define     V_038004_COLOR_32_FLOAT                    0x0000000E
+#define     V_038004_COLOR_16_16                       0x0000000F
+#define     V_038004_COLOR_16_16_FLOAT                 0x00000010
+#define     V_038004_COLOR_8_24                        0x00000011
+#define     V_038004_COLOR_8_24_FLOAT                  0x00000012
+#define     V_038004_COLOR_24_8                        0x00000013
+#define     V_038004_COLOR_24_8_FLOAT                  0x00000014
+#define     V_038004_COLOR_10_11_11                    0x00000015
+#define     V_038004_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_038004_COLOR_11_11_10                    0x00000017
+#define     V_038004_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_038004_COLOR_2_10_10_10                  0x00000019
+#define     V_038004_COLOR_8_8_8_8                     0x0000001A
+#define     V_038004_COLOR_10_10_10_2                  0x0000001B
+#define     V_038004_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_038004_COLOR_32_32                       0x0000001D
+#define     V_038004_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_038004_COLOR_16_16_16_16                 0x0000001F
+#define     V_038004_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_038004_COLOR_32_32_32_32                 0x00000022
+#define     V_038004_COLOR_32_32_32_32_FLOAT           0x00000023
+#define     V_038004_FMT_1                             0x00000025
+#define     V_038004_FMT_GB_GR                         0x00000027
+#define     V_038004_FMT_BG_RG                         0x00000028
+#define     V_038004_FMT_32_AS_8                       0x00000029
+#define     V_038004_FMT_32_AS_8_8                     0x0000002A
+#define     V_038004_FMT_5_9_9_9_SHAREDEXP             0x0000002B
+#define     V_038004_FMT_8_8_8                         0x0000002C
+#define     V_038004_FMT_16_16_16                      0x0000002D
+#define     V_038004_FMT_16_16_16_FLOAT                0x0000002E
+#define     V_038004_FMT_32_32_32                      0x0000002F
+#define     V_038004_FMT_32_32_32_FLOAT                0x00000030
+#define     V_038004_FMT_BC1                           0x00000031
+#define     V_038004_FMT_BC2                           0x00000032
+#define     V_038004_FMT_BC3                           0x00000033
+#define     V_038004_FMT_BC4                           0x00000034
+#define     V_038004_FMT_BC5                           0x00000035
+#define     V_038004_FMT_BC6                           0x00000036
+#define     V_038004_FMT_BC7                           0x00000037
+#define     V_038004_FMT_32_AS_32_32_32_32             0x00000038
+#define R_038010_SQ_TEX_RESOURCE_WORD4_0             0x038010
+#define   S_038010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
+#define   G_038010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
+#define   C_038010_FORMAT_COMP_X                       0xFFFFFFFC
+#define   S_038010_FORMAT_COMP_Y(x)                    (((x) & 0x3) << 2)
+#define   G_038010_FORMAT_COMP_Y(x)                    (((x) >> 2) & 0x3)
+#define   C_038010_FORMAT_COMP_Y                       0xFFFFFFF3
+#define   S_038010_FORMAT_COMP_Z(x)                    (((x) & 0x3) << 4)
+#define   G_038010_FORMAT_COMP_Z(x)                    (((x) >> 4) & 0x3)
+#define   C_038010_FORMAT_COMP_Z                       0xFFFFFFCF
+#define   S_038010_FORMAT_COMP_W(x)                    (((x) & 0x3) << 6)
+#define   G_038010_FORMAT_COMP_W(x)                    (((x) >> 6) & 0x3)
+#define   C_038010_FORMAT_COMP_W                       0xFFFFFF3F
+#define   S_038010_NUM_FORMAT_ALL(x)                   (((x) & 0x3) << 8)
+#define   G_038010_NUM_FORMAT_ALL(x)                   (((x) >> 8) & 0x3)
+#define   C_038010_NUM_FORMAT_ALL                      0xFFFFFCFF
+#define   S_038010_SRF_MODE_ALL(x)                     (((x) & 0x1) << 10)
+#define   G_038010_SRF_MODE_ALL(x)                     (((x) >> 10) & 0x1)
+#define   C_038010_SRF_MODE_ALL                        0xFFFFFBFF
+#define   S_038010_FORCE_DEGAMMA(x)                    (((x) & 0x1) << 11)
+#define   G_038010_FORCE_DEGAMMA(x)                    (((x) >> 11) & 0x1)
+#define   C_038010_FORCE_DEGAMMA                       0xFFFFF7FF
+#define   S_038010_ENDIAN_SWAP(x)                      (((x) & 0x3) << 12)
+#define   G_038010_ENDIAN_SWAP(x)                      (((x) >> 12) & 0x3)
+#define   C_038010_ENDIAN_SWAP                         0xFFFFCFFF
+#define   S_038010_REQUEST_SIZE(x)                     (((x) & 0x3) << 14)
+#define   G_038010_REQUEST_SIZE(x)                     (((x) >> 14) & 0x3)
+#define   C_038010_REQUEST_SIZE                        0xFFFF3FFF
+#define   S_038010_DST_SEL_X(x)                        (((x) & 0x7) << 16)
+#define   G_038010_DST_SEL_X(x)                        (((x) >> 16) & 0x7)
+#define   C_038010_DST_SEL_X                           0xFFF8FFFF
+#define   S_038010_DST_SEL_Y(x)                        (((x) & 0x7) << 19)
+#define   G_038010_DST_SEL_Y(x)                        (((x) >> 19) & 0x7)
+#define   C_038010_DST_SEL_Y                           0xFFC7FFFF
+#define   S_038010_DST_SEL_Z(x)                        (((x) & 0x7) << 22)
+#define   G_038010_DST_SEL_Z(x)                        (((x) >> 22) & 0x7)
+#define   C_038010_DST_SEL_Z                           0xFE3FFFFF
+#define   S_038010_DST_SEL_W(x)                        (((x) & 0x7) << 25)
+#define   G_038010_DST_SEL_W(x)                        (((x) >> 25) & 0x7)
+#define   C_038010_DST_SEL_W                           0xF1FFFFFF
+#	define SQ_SEL_X					0
+#	define SQ_SEL_Y					1
+#	define SQ_SEL_Z					2
+#	define SQ_SEL_W					3
+#	define SQ_SEL_0					4
+#	define SQ_SEL_1					5
+#define   S_038010_BASE_LEVEL(x)                       (((x) & 0xF) << 28)
+#define   G_038010_BASE_LEVEL(x)                       (((x) >> 28) & 0xF)
+#define   C_038010_BASE_LEVEL                          0x0FFFFFFF
+#define R_038014_SQ_TEX_RESOURCE_WORD5_0             0x038014
+#define   S_038014_LAST_LEVEL(x)                       (((x) & 0xF) << 0)
+#define   G_038014_LAST_LEVEL(x)                       (((x) >> 0) & 0xF)
+#define   C_038014_LAST_LEVEL                          0xFFFFFFF0
+#define   S_038014_BASE_ARRAY(x)                       (((x) & 0x1FFF) << 4)
+#define   G_038014_BASE_ARRAY(x)                       (((x) >> 4) & 0x1FFF)
+#define   C_038014_BASE_ARRAY                          0xFFFE000F
+#define   S_038014_LAST_ARRAY(x)                       (((x) & 0x1FFF) << 17)
+#define   G_038014_LAST_ARRAY(x)                       (((x) >> 17) & 0x1FFF)
+#define   C_038014_LAST_ARRAY                          0xC001FFFF
+#define R_0288A8_SQ_ESGS_RING_ITEMSIZE               0x0288A8
+#define   S_0288A8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288A8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288A8_ITEMSIZE                            0xFFFF8000
+#define R_008C44_SQ_ESGS_RING_SIZE                   0x008C44
+#define   S_008C44_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C44_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C44_MEM_SIZE                            0x00000000
+#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE              0x0288B0
+#define   S_0288B0_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B0_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B0_ITEMSIZE                            0xFFFF8000
+#define R_008C54_SQ_ESTMP_RING_SIZE                  0x008C54
+#define   S_008C54_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C54_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C54_MEM_SIZE                            0x00000000
+#define R_0288C0_SQ_FBUF_RING_ITEMSIZE               0x0288C0
+#define   S_0288C0_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C0_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C0_ITEMSIZE                            0xFFFF8000
+#define R_008C74_SQ_FBUF_RING_SIZE                   0x008C74
+#define   S_008C74_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C74_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C74_MEM_SIZE                            0x00000000
+#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE              0x0288B4
+#define   S_0288B4_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B4_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B4_ITEMSIZE                            0xFFFF8000
+#define R_008C5C_SQ_GSTMP_RING_SIZE                  0x008C5C
+#define   S_008C5C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C5C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C5C_MEM_SIZE                            0x00000000
+#define R_0288AC_SQ_GSVS_RING_ITEMSIZE               0x0288AC
+#define   S_0288AC_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288AC_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288AC_ITEMSIZE                            0xFFFF8000
+#define R_008C4C_SQ_GSVS_RING_SIZE                   0x008C4C
+#define   S_008C4C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C4C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C4C_MEM_SIZE                            0x00000000
+#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE              0x0288BC
+#define   S_0288BC_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288BC_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288BC_ITEMSIZE                            0xFFFF8000
+#define R_008C6C_SQ_PSTMP_RING_SIZE                  0x008C6C
+#define   S_008C6C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C6C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C6C_MEM_SIZE                            0x00000000
+#define R_0288C4_SQ_REDUC_RING_ITEMSIZE              0x0288C4
+#define   S_0288C4_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C4_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C4_ITEMSIZE                            0xFFFF8000
+#define R_008C7C_SQ_REDUC_RING_SIZE                  0x008C7C
+#define   S_008C7C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C7C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C7C_MEM_SIZE                            0x00000000
+#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE              0x0288B8
+#define   S_0288B8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B8_ITEMSIZE                            0xFFFF8000
+#define R_008C64_SQ_VSTMP_RING_SIZE                  0x008C64
+#define   S_008C64_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C64_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C64_MEM_SIZE                            0x00000000
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE                 0x0288C8
+#define   S_0288C8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C8_ITEMSIZE                            0xFFFF8000
+#define R_028010_DB_DEPTH_INFO                       0x028010
+#define   S_028010_FORMAT(x)                           (((x) & 0x7) << 0)
+#define   G_028010_FORMAT(x)                           (((x) >> 0) & 0x7)
+#define   C_028010_FORMAT                              0xFFFFFFF8
+#define     V_028010_DEPTH_INVALID                     0x00000000
+#define     V_028010_DEPTH_16                          0x00000001
+#define     V_028010_DEPTH_X8_24                       0x00000002
+#define     V_028010_DEPTH_8_24                        0x00000003
+#define     V_028010_DEPTH_X8_24_FLOAT                 0x00000004
+#define     V_028010_DEPTH_8_24_FLOAT                  0x00000005
+#define     V_028010_DEPTH_32_FLOAT                    0x00000006
+#define     V_028010_DEPTH_X24_8_32_FLOAT              0x00000007
+#define   S_028010_READ_SIZE(x)                        (((x) & 0x1) << 3)
+#define   G_028010_READ_SIZE(x)                        (((x) >> 3) & 0x1)
+#define   C_028010_READ_SIZE                           0xFFFFFFF7
+#define   S_028010_ARRAY_MODE(x)                       (((x) & 0xF) << 15)
+#define   G_028010_ARRAY_MODE(x)                       (((x) >> 15) & 0xF)
+#define   C_028010_ARRAY_MODE                          0xFFF87FFF
+#define     V_028010_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_028010_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_028010_TILE_SURFACE_ENABLE(x)              (((x) & 0x1) << 25)
+#define   G_028010_TILE_SURFACE_ENABLE(x)              (((x) >> 25) & 0x1)
+#define   C_028010_TILE_SURFACE_ENABLE                 0xFDFFFFFF
+#define   S_028010_TILE_COMPACT(x)                     (((x) & 0x1) << 26)
+#define   G_028010_TILE_COMPACT(x)                     (((x) >> 26) & 0x1)
+#define   C_028010_TILE_COMPACT                        0xFBFFFFFF
+#define   S_028010_ZRANGE_PRECISION(x)                 (((x) & 0x1) << 31)
+#define   G_028010_ZRANGE_PRECISION(x)                 (((x) >> 31) & 0x1)
+#define   C_028010_ZRANGE_PRECISION                    0x7FFFFFFF
+#define R_028000_DB_DEPTH_SIZE                       0x028000
+#define   S_028000_PITCH_TILE_MAX(x)                   (((x) & 0x3FF) << 0)
+#define   G_028000_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x3FF)
+#define   C_028000_PITCH_TILE_MAX                      0xFFFFFC00
+#define   S_028000_SLICE_TILE_MAX(x)                   (((x) & 0xFFFFF) << 10)
+#define   G_028000_SLICE_TILE_MAX(x)                   (((x) >> 10) & 0xFFFFF)
+#define   C_028000_SLICE_TILE_MAX                      0xC00003FF
+#define R_028004_DB_DEPTH_VIEW                       0x028004
+#define   S_028004_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028004_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028004_SLICE_START                         0xFFFFF800
+#define   S_028004_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028004_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028004_SLICE_MAX                           0xFF001FFF
+#define R_028800_DB_DEPTH_CONTROL                    0x028800
+#define   S_028800_STENCIL_ENABLE(x)                   (((x) & 0x1) << 0)
+#define   G_028800_STENCIL_ENABLE(x)                   (((x) >> 0) & 0x1)
+#define   C_028800_STENCIL_ENABLE                      0xFFFFFFFE
+#define   S_028800_Z_ENABLE(x)                         (((x) & 0x1) << 1)
+#define   G_028800_Z_ENABLE(x)                         (((x) >> 1) & 0x1)
+#define   C_028800_Z_ENABLE                            0xFFFFFFFD
+#define   S_028800_Z_WRITE_ENABLE(x)                   (((x) & 0x1) << 2)
+#define   G_028800_Z_WRITE_ENABLE(x)                   (((x) >> 2) & 0x1)
+#define   C_028800_Z_WRITE_ENABLE                      0xFFFFFFFB
+#define   S_028800_ZFUNC(x)                            (((x) & 0x7) << 4)
+#define   G_028800_ZFUNC(x)                            (((x) >> 4) & 0x7)
+#define   C_028800_ZFUNC                               0xFFFFFF8F
+#define   S_028800_BACKFACE_ENABLE(x)                  (((x) & 0x1) << 7)
+#define   G_028800_BACKFACE_ENABLE(x)                  (((x) >> 7) & 0x1)
+#define   C_028800_BACKFACE_ENABLE                     0xFFFFFF7F
+#define   S_028800_STENCILFUNC(x)                      (((x) & 0x7) << 8)
+#define   G_028800_STENCILFUNC(x)                      (((x) >> 8) & 0x7)
+#define   C_028800_STENCILFUNC                         0xFFFFF8FF
+#define   S_028800_STENCILFAIL(x)                      (((x) & 0x7) << 11)
+#define   G_028800_STENCILFAIL(x)                      (((x) >> 11) & 0x7)
+#define   C_028800_STENCILFAIL                         0xFFFFC7FF
+#define   S_028800_STENCILZPASS(x)                     (((x) & 0x7) << 14)
+#define   G_028800_STENCILZPASS(x)                     (((x) >> 14) & 0x7)
+#define   C_028800_STENCILZPASS                        0xFFFE3FFF
+#define   S_028800_STENCILZFAIL(x)                     (((x) & 0x7) << 17)
+#define   G_028800_STENCILZFAIL(x)                     (((x) >> 17) & 0x7)
+#define   C_028800_STENCILZFAIL                        0xFFF1FFFF
+#define   S_028800_STENCILFUNC_BF(x)                   (((x) & 0x7) << 20)
+#define   G_028800_STENCILFUNC_BF(x)                   (((x) >> 20) & 0x7)
+#define   C_028800_STENCILFUNC_BF                      0xFF8FFFFF
+#define   S_028800_STENCILFAIL_BF(x)                   (((x) & 0x7) << 23)
+#define   G_028800_STENCILFAIL_BF(x)                   (((x) >> 23) & 0x7)
+#define   C_028800_STENCILFAIL_BF                      0xFC7FFFFF
+#define   S_028800_STENCILZPASS_BF(x)                  (((x) & 0x7) << 26)
+#define   G_028800_STENCILZPASS_BF(x)                  (((x) >> 26) & 0x7)
+#define   C_028800_STENCILZPASS_BF                     0xE3FFFFFF
+#define   S_028800_STENCILZFAIL_BF(x)                  (((x) & 0x7) << 29)
+#define   G_028800_STENCILZFAIL_BF(x)                  (((x) >> 29) & 0x7)
+#define   C_028800_STENCILZFAIL_BF                     0x1FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
new file mode 100644
index 0000000..1a6f6ed
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -0,0 +1,2970 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_H__
+#define __RADEON_H__
+
+/* TODO: Here are things that needs to be done :
+ *	- surface allocator & initializer : (bit like scratch reg) should
+ *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
+ *	  related to surface
+ *	- WB : write back stuff (do it bit like scratch reg things)
+ *	- Vblank : look at Jesse's rework and what we should do
+ *	- r600/r700: gart & cp
+ *	- cs : clean cs ioctl use bitmap & things like that.
+ *	- power management stuff
+ *	- Barrier in gart code
+ *	- Unmappabled vram ?
+ *	- TESTING, TESTING, TESTING
+ */
+
+/* Initialization path:
+ *  We expect that acceleration initialization might fail for various
+ *  reasons even thought we work hard to make it works on most
+ *  configurations. In order to still have a working userspace in such
+ *  situation the init path must succeed up to the memory controller
+ *  initialization point. Failure before this point are considered as
+ *  fatal error. Here is the init callchain :
+ *      radeon_device_init  perform common structure, mutex initialization
+ *      asic_init           setup the GPU memory layout and perform all
+ *                          one time initialization (failure in this
+ *                          function are considered fatal)
+ *      asic_startup        setup the GPU acceleration, in order to
+ *                          follow guideline the first thing this
+ *                          function should do is setting the GPU
+ *                          memory controller (only MC setup failure
+ *                          are considered as fatal)
+ */
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/interval_tree.h>
+#include <linux/hashtable.h>
+#include <linux/dma-fence.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+
+#include <drm/drm_gem.h>
+
+#include "radeon_family.h"
+#include "radeon_mode.h"
+#include "radeon_reg.h"
+
+/*
+ * Modules parameters.
+ */
+extern int radeon_no_wb;
+extern int radeon_modeset;
+extern int radeon_dynclks;
+extern int radeon_r4xx_atom;
+extern int radeon_agpmode;
+extern int radeon_vram_limit;
+extern int radeon_gart_size;
+extern int radeon_benchmarking;
+extern int radeon_testing;
+extern int radeon_connector_table;
+extern int radeon_tv;
+extern int radeon_audio;
+extern int radeon_disp_priority;
+extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
+extern int radeon_msi;
+extern int radeon_lockup_timeout;
+extern int radeon_fastfb;
+extern int radeon_dpm;
+extern int radeon_aspm;
+extern int radeon_runtime_pm;
+extern int radeon_hard_reset;
+extern int radeon_vm_size;
+extern int radeon_vm_block_size;
+extern int radeon_deep_color;
+extern int radeon_use_pflipirq;
+extern int radeon_bapm;
+extern int radeon_backlight;
+extern int radeon_auxch;
+extern int radeon_mst;
+extern int radeon_uvd;
+extern int radeon_vce;
+extern int radeon_si_support;
+extern int radeon_cik_support;
+
+/*
+ * Copy from radeon_drv.h so we don't have to include both and have conflicting
+ * symbol;
+ */
+#define RADEON_MAX_USEC_TIMEOUT			100000	/* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
+#define RADEON_USEC_IB_TEST_TIMEOUT		1000000 /* 1s */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
+#define RADEON_IB_POOL_SIZE			16
+#define RADEON_DEBUGFS_MAX_COMPONENTS		32
+#define RADEONFB_CONN_LIMIT			4
+#define RADEON_BIOS_NUM_SCRATCH			8
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX		0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX		1
+#define CAYMAN_RING_TYPE_CP2_INDEX		2
+
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX		3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX		4
+
+/* R600+ */
+#define R600_RING_TYPE_UVD_INDEX		5
+
+/* TN+ */
+#define TN_RING_TYPE_VCE1_INDEX			6
+#define TN_RING_TYPE_VCE2_INDEX			7
+
+/* max number of rings */
+#define RADEON_NUM_RINGS			8
+
+/* number of hw syncs before falling back on blocking */
+#define RADEON_NUM_SYNCS			4
+
+/* hardcode those limit for now */
+#define RADEON_VA_IB_OFFSET			(1 << 20)
+#define RADEON_VA_RESERVED_SIZE			(8 << 20)
+#define RADEON_IB_VM_MAX_SIZE			(64 << 10)
+
+/* hard reset data */
+#define RADEON_ASIC_RESET_DATA                  0x39d5e86b
+
+/* reset flags */
+#define RADEON_RESET_GFX			(1 << 0)
+#define RADEON_RESET_COMPUTE			(1 << 1)
+#define RADEON_RESET_DMA			(1 << 2)
+#define RADEON_RESET_CP				(1 << 3)
+#define RADEON_RESET_GRBM			(1 << 4)
+#define RADEON_RESET_DMA1			(1 << 5)
+#define RADEON_RESET_RLC			(1 << 6)
+#define RADEON_RESET_SEM			(1 << 7)
+#define RADEON_RESET_IH				(1 << 8)
+#define RADEON_RESET_VMC			(1 << 9)
+#define RADEON_RESET_MC				(1 << 10)
+#define RADEON_RESET_DISPLAY			(1 << 11)
+
+/* CG block flags */
+#define RADEON_CG_BLOCK_GFX			(1 << 0)
+#define RADEON_CG_BLOCK_MC			(1 << 1)
+#define RADEON_CG_BLOCK_SDMA			(1 << 2)
+#define RADEON_CG_BLOCK_UVD			(1 << 3)
+#define RADEON_CG_BLOCK_VCE			(1 << 4)
+#define RADEON_CG_BLOCK_HDP			(1 << 5)
+#define RADEON_CG_BLOCK_BIF			(1 << 6)
+
+/* CG flags */
+#define RADEON_CG_SUPPORT_GFX_MGCG		(1 << 0)
+#define RADEON_CG_SUPPORT_GFX_MGLS		(1 << 1)
+#define RADEON_CG_SUPPORT_GFX_CGCG		(1 << 2)
+#define RADEON_CG_SUPPORT_GFX_CGLS		(1 << 3)
+#define RADEON_CG_SUPPORT_GFX_CGTS		(1 << 4)
+#define RADEON_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
+#define RADEON_CG_SUPPORT_GFX_CP_LS		(1 << 6)
+#define RADEON_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
+#define RADEON_CG_SUPPORT_MC_LS			(1 << 8)
+#define RADEON_CG_SUPPORT_MC_MGCG		(1 << 9)
+#define RADEON_CG_SUPPORT_SDMA_LS		(1 << 10)
+#define RADEON_CG_SUPPORT_SDMA_MGCG		(1 << 11)
+#define RADEON_CG_SUPPORT_BIF_LS		(1 << 12)
+#define RADEON_CG_SUPPORT_UVD_MGCG		(1 << 13)
+#define RADEON_CG_SUPPORT_VCE_MGCG		(1 << 14)
+#define RADEON_CG_SUPPORT_HDP_LS		(1 << 15)
+#define RADEON_CG_SUPPORT_HDP_MGCG		(1 << 16)
+
+/* PG flags */
+#define RADEON_PG_SUPPORT_GFX_PG		(1 << 0)
+#define RADEON_PG_SUPPORT_GFX_SMG		(1 << 1)
+#define RADEON_PG_SUPPORT_GFX_DMG		(1 << 2)
+#define RADEON_PG_SUPPORT_UVD			(1 << 3)
+#define RADEON_PG_SUPPORT_VCE			(1 << 4)
+#define RADEON_PG_SUPPORT_CP			(1 << 5)
+#define RADEON_PG_SUPPORT_GDS			(1 << 6)
+#define RADEON_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
+#define RADEON_PG_SUPPORT_SDMA			(1 << 8)
+#define RADEON_PG_SUPPORT_ACP			(1 << 9)
+#define RADEON_PG_SUPPORT_SAMU			(1 << 10)
+
+/* max cursor sizes (in pixels) */
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+#define CIK_CURSOR_WIDTH 128
+#define CIK_CURSOR_HEIGHT 128
+
+/*
+ * Errata workarounds.
+ */
+enum radeon_pll_errata {
+	CHIP_ERRATA_R300_CG             = 0x00000001,
+	CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
+	CHIP_ERRATA_PLL_DELAY           = 0x00000004
+};
+
+
+struct radeon_device;
+
+
+/*
+ * BIOS.
+ */
+bool radeon_get_bios(struct radeon_device *rdev);
+
+/*
+ * Dummy page
+ */
+struct radeon_dummy_page {
+	uint64_t	entry;
+	struct page	*page;
+	dma_addr_t	addr;
+};
+int radeon_dummy_page_init(struct radeon_device *rdev);
+void radeon_dummy_page_fini(struct radeon_device *rdev);
+
+
+/*
+ * Clocks
+ */
+struct radeon_clock {
+	struct radeon_pll p1pll;
+	struct radeon_pll p2pll;
+	struct radeon_pll dcpll;
+	struct radeon_pll spll;
+	struct radeon_pll mpll;
+	/* 10 Khz units */
+	uint32_t default_mclk;
+	uint32_t default_sclk;
+	uint32_t default_dispclk;
+	uint32_t current_dispclk;
+	uint32_t dp_extclk;
+	uint32_t max_pixel_clock;
+	uint32_t vco_freq;
+};
+
+/*
+ * Power management
+ */
+int radeon_pm_init(struct radeon_device *rdev);
+int radeon_pm_late_init(struct radeon_device *rdev);
+void radeon_pm_fini(struct radeon_device *rdev);
+void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
+void radeon_combios_get_power_modes(struct radeon_device *rdev);
+void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+				   u8 clock_type,
+				   u32 clock,
+				   bool strobe_mode,
+				   struct atom_clock_dividers *dividers);
+int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
+					u32 clock,
+					bool strobe_mode,
+					struct atom_mpll_param *mpll_param);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
+					  u16 voltage_level, u8 voltage_type,
+					  u32 *gpio_value, u32 *gpio_mask);
+void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
+					 u32 eng_clock, u32 mem_clock);
+int radeon_atom_get_voltage_step(struct radeon_device *rdev,
+				 u8 voltage_type, u16 *voltage_step);
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+			     u16 voltage_id, u16 *voltage);
+int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
+						      u16 *voltage,
+						      u16 leakage_idx);
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+					  u16 *leakage_id);
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+							 u16 *vddc, u16 *vddci,
+							 u16 virtual_voltage_id,
+							 u16 vbios_voltage_id);
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+				u16 virtual_voltage_id,
+				u16 *voltage);
+int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
+				      u8 voltage_type,
+				      u16 nominal_voltage,
+				      u16 *true_voltage);
+int radeon_atom_get_min_voltage(struct radeon_device *rdev,
+				u8 voltage_type, u16 *min_voltage);
+int radeon_atom_get_max_voltage(struct radeon_device *rdev,
+				u8 voltage_type, u16 *max_voltage);
+int radeon_atom_get_voltage_table(struct radeon_device *rdev,
+				  u8 voltage_type, u8 voltage_mode,
+				  struct atom_voltage_table *voltage_table);
+bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
+				 u8 voltage_type, u8 voltage_mode);
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+			      u8 voltage_type,
+			      u8 *svd_gpio_id, u8 *svc_gpio_id);
+void radeon_atom_update_memory_dll(struct radeon_device *rdev,
+				   u32 mem_clock);
+void radeon_atom_set_ac_timing(struct radeon_device *rdev,
+			       u32 mem_clock);
+int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
+				  u8 module_index,
+				  struct atom_mc_reg_table *reg_table);
+int radeon_atom_get_memory_info(struct radeon_device *rdev,
+				u8 module_index, struct atom_memory_info *mem_info);
+int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
+				     bool gddr5, u8 module_index,
+				     struct atom_memory_clock_range_table *mclk_range_table);
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+			     u16 voltage_id, u16 *voltage);
+void rs690_pm_info(struct radeon_device *rdev);
+extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+				    unsigned *bankh, unsigned *mtaspect,
+				    unsigned *tile_split);
+
+/*
+ * Fences.
+ */
+struct radeon_fence_driver {
+	struct radeon_device		*rdev;
+	uint32_t			scratch_reg;
+	uint64_t			gpu_addr;
+	volatile uint32_t		*cpu_addr;
+	/* sync_seq is protected by ring emission lock */
+	uint64_t			sync_seq[RADEON_NUM_RINGS];
+	atomic64_t			last_seq;
+	bool				initialized, delayed_irq;
+	struct delayed_work		lockup_work;
+};
+
+struct radeon_fence {
+	struct dma_fence		base;
+
+	struct radeon_device	*rdev;
+	uint64_t		seq;
+	/* RB, DMA, etc. */
+	unsigned		ring;
+	bool			is_vm_update;
+
+	wait_queue_entry_t		fence_wake;
+};
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
+int radeon_fence_driver_init(struct radeon_device *rdev);
+void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
+bool radeon_fence_signaled(struct radeon_fence *fence);
+long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout);
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr);
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
+void radeon_fence_unref(struct radeon_fence **fence);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
+void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
+static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
+						      struct radeon_fence *b)
+{
+	if (!a) {
+		return b;
+	}
+
+	if (!b) {
+		return a;
+	}
+
+	BUG_ON(a->ring != b->ring);
+
+	if (a->seq > b->seq) {
+		return a;
+	} else {
+		return b;
+	}
+}
+
+static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
+					   struct radeon_fence *b)
+{
+	if (!a) {
+		return false;
+	}
+
+	if (!b) {
+		return true;
+	}
+
+	BUG_ON(a->ring != b->ring);
+
+	return a->seq < b->seq;
+}
+
+/*
+ * Tiling registers
+ */
+struct radeon_surface_reg {
+	struct radeon_bo *bo;
+};
+
+#define RADEON_GEM_MAX_SURFACES 8
+
+/*
+ * TTM.
+ */
+struct radeon_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	struct ttm_bo_device		bdev;
+	bool				mem_global_referenced;
+	bool				initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry			*vram;
+	struct dentry			*gtt;
+#endif
+};
+
+struct radeon_bo_list {
+	struct radeon_bo		*robj;
+	struct ttm_validate_buffer	tv;
+	uint64_t			gpu_offset;
+	unsigned			preferred_domains;
+	unsigned			allowed_domains;
+	uint32_t			tiling_flags;
+};
+
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+	/* protected by bo being reserved */
+	struct list_head		bo_list;
+	uint32_t			flags;
+	struct radeon_fence		*last_pt_update;
+	unsigned			ref_count;
+
+	/* protected by vm mutex */
+	struct interval_tree_node	it;
+	struct list_head		vm_status;
+
+	/* constant after initialization */
+	struct radeon_vm		*vm;
+	struct radeon_bo		*bo;
+};
+
+struct radeon_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				initial_domain;
+	struct ttm_place		placements[4];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	u32				flags;
+	unsigned			pin_count;
+	void				*kptr;
+	u32				tiling_flags;
+	u32				pitch;
+	int				surface_reg;
+	unsigned			prime_shared_count;
+	/* list of all virtual address to which this bo
+	 * is associated to
+	 */
+	struct list_head		va;
+	/* Constant after initialization */
+	struct radeon_device		*rdev;
+	struct drm_gem_object		gem_base;
+
+	struct ttm_bo_kmap_obj		dma_buf_vmap;
+	pid_t				pid;
+
+	struct radeon_mn		*mn;
+	struct list_head		mn_list;
+};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev);
+
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+	wait_queue_head_t	wq;
+	struct radeon_bo	*bo;
+	struct list_head	*hole;
+	struct list_head	flist[RADEON_NUM_RINGS];
+	struct list_head	olist;
+	unsigned		size;
+	uint64_t		gpu_addr;
+	void			*cpu_ptr;
+	uint32_t		domain;
+	uint32_t		align;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+	struct list_head		olist;
+	struct list_head		flist;
+	struct radeon_sa_manager	*manager;
+	unsigned			soffset;
+	unsigned			eoffset;
+	struct radeon_fence		*fence;
+};
+
+/*
+ * GEM objects.
+ */
+struct radeon_gem {
+	struct mutex		mutex;
+	struct list_head	objects;
+};
+
+int radeon_gem_init(struct radeon_device *rdev);
+void radeon_gem_fini(struct radeon_device *rdev);
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
+				int alignment, int initial_domain,
+				u32 flags, bool kernel,
+				struct drm_gem_object **obj);
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p);
+
+/*
+ * Semaphores.
+ */
+struct radeon_semaphore {
+	struct radeon_sa_bo	*sa_bo;
+	signed			waiters;
+	uint64_t		gpu_addr;
+};
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+			    struct radeon_semaphore **semaphore);
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+				  struct radeon_semaphore *semaphore);
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+				struct radeon_semaphore *semaphore);
+void radeon_semaphore_free(struct radeon_device *rdev,
+			   struct radeon_semaphore **semaphore,
+			   struct radeon_fence *fence);
+
+/*
+ * Synchronization
+ */
+struct radeon_sync {
+	struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
+	struct radeon_fence	*sync_to[RADEON_NUM_RINGS];
+	struct radeon_fence	*last_vm_update;
+};
+
+void radeon_sync_create(struct radeon_sync *sync);
+void radeon_sync_fence(struct radeon_sync *sync,
+		       struct radeon_fence *fence);
+int radeon_sync_resv(struct radeon_device *rdev,
+		     struct radeon_sync *sync,
+		     struct reservation_object *resv,
+		     bool shared);
+int radeon_sync_rings(struct radeon_device *rdev,
+		      struct radeon_sync *sync,
+		      int waiting_ring);
+void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
+		      struct radeon_fence *fence);
+
+/*
+ * GART structures, functions & helpers
+ */
+struct radeon_mc;
+
+#define RADEON_GPU_PAGE_SIZE 4096
+#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+
+#define RADEON_GART_PAGE_DUMMY  0
+#define RADEON_GART_PAGE_VALID	(1 << 0)
+#define RADEON_GART_PAGE_READ	(1 << 1)
+#define RADEON_GART_PAGE_WRITE	(1 << 2)
+#define RADEON_GART_PAGE_SNOOP	(1 << 3)
+
+struct radeon_gart {
+	dma_addr_t			table_addr;
+	struct radeon_bo		*robj;
+	void				*ptr;
+	unsigned			num_gpu_pages;
+	unsigned			num_cpu_pages;
+	unsigned			table_size;
+	struct page			**pages;
+	uint64_t			*pages_entry;
+	bool				ready;
+};
+
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_ram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
+int radeon_gart_init(struct radeon_device *rdev);
+void radeon_gart_fini(struct radeon_device *rdev);
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+			int pages);
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+		     int pages, struct page **pagelist,
+		     dma_addr_t *dma_addr, uint32_t flags);
+
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct radeon_mc {
+	resource_size_t		aper_size;
+	resource_size_t		aper_base;
+	resource_size_t		agp_base;
+	/* for some chips with <= 32MB we need to lie
+	 * about vram size near mc fb location */
+	u64			mc_vram_size;
+	u64			visible_vram_size;
+	u64			gtt_size;
+	u64			gtt_start;
+	u64			gtt_end;
+	u64			vram_start;
+	u64			vram_end;
+	unsigned		vram_width;
+	u64			real_vram_size;
+	int			vram_mtrr;
+	bool			vram_is_ddr;
+	bool			igp_sideport_enabled;
+	u64                     gtt_base_align;
+	u64                     mc_mask;
+};
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
+
+/*
+ * GPU scratch registers structures, functions & helpers
+ */
+struct radeon_scratch {
+	unsigned		num_reg;
+	uint32_t                reg_base;
+	bool			free[32];
+	uint32_t		reg[32];
+};
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+#define RADEON_MAX_DOORBELLS 1024	/* Reserve at most 1024 doorbell slots for radeon-owned rings. */
+
+struct radeon_doorbell {
+	/* doorbell mmio */
+	resource_size_t		base;
+	resource_size_t		size;
+	u32 __iomem		*ptr;
+	u32			num_doorbells;	/* Number of doorbells actually reserved for radeon. */
+	DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS);
+};
+
+int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
+void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
+
+/*
+ * IRQS.
+ */
+
+struct radeon_flip_work {
+	struct work_struct		flip_work;
+	struct work_struct		unpin_work;
+	struct radeon_device		*rdev;
+	int				crtc_id;
+	u32				target_vblank;
+	uint64_t			base;
+	struct drm_pending_vblank_event *event;
+	struct radeon_bo		*old_rbo;
+	struct dma_fence		*fence;
+	bool				async;
+};
+
+struct r500_irq_stat_regs {
+	u32 disp_int;
+	u32 hdmi0_status;
+};
+
+struct r600_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 hdmi0_status;
+	u32 hdmi1_status;
+};
+
+struct evergreen_irq_stat_regs {
+	u32 disp_int[6];
+	u32 grph_int[6];
+	u32 afmt_status[6];
+};
+
+struct cik_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 disp_int_cont3;
+	u32 disp_int_cont4;
+	u32 disp_int_cont5;
+	u32 disp_int_cont6;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 d3grph_int;
+	u32 d4grph_int;
+	u32 d5grph_int;
+	u32 d6grph_int;
+};
+
+union radeon_irq_stat_regs {
+	struct r500_irq_stat_regs r500;
+	struct r600_irq_stat_regs r600;
+	struct evergreen_irq_stat_regs evergreen;
+	struct cik_irq_stat_regs cik;
+};
+
+struct radeon_irq {
+	bool				installed;
+	spinlock_t			lock;
+	atomic_t			ring_int[RADEON_NUM_RINGS];
+	bool				crtc_vblank_int[RADEON_MAX_CRTCS];
+	atomic_t			pflip[RADEON_MAX_CRTCS];
+	wait_queue_head_t		vblank_queue;
+	bool				hpd[RADEON_MAX_HPD_PINS];
+	bool				afmt[RADEON_MAX_AFMT_BLOCKS];
+	union radeon_irq_stat_regs	stat_regs;
+	bool				dpm_thermal;
+};
+
+int radeon_irq_kms_init(struct radeon_device *rdev);
+void radeon_irq_kms_fini(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+
+/*
+ * CP & rings.
+ */
+
+struct radeon_ib {
+	struct radeon_sa_bo		*sa_bo;
+	uint32_t			length_dw;
+	uint64_t			gpu_addr;
+	uint32_t			*ptr;
+	int				ring;
+	struct radeon_fence		*fence;
+	struct radeon_vm		*vm;
+	bool				is_const_ib;
+	struct radeon_sync		sync;
+};
+
+struct radeon_ring {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr_offs;
+	unsigned		rptr_save_reg;
+	u64			next_rptr_gpu_addr;
+	volatile u32		*next_rptr_cpu_addr;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		ring_size;
+	unsigned		ring_free_dw;
+	int			count_dw;
+	atomic_t		last_rptr;
+	atomic64_t		last_activity;
+	uint64_t		gpu_addr;
+	uint32_t		align_mask;
+	uint32_t		ptr_mask;
+	bool			ready;
+	u32			nop;
+	u32			idx;
+	u64			last_semaphore_signal_addr;
+	u64			last_semaphore_wait_addr;
+	/* for CIK queues */
+	u32 me;
+	u32 pipe;
+	u32 queue;
+	struct radeon_bo	*mqd_obj;
+	u32 doorbell_index;
+	unsigned		wptr_offs;
+};
+
+struct radeon_mec {
+	struct radeon_bo	*hpd_eop_obj;
+	u64			hpd_eop_gpu_addr;
+	u32 num_pipe;
+	u32 num_mec;
+	u32 num_queue;
+};
+
+/*
+ * VM
+ */
+
+/* maximum number of VMIDs */
+#define RADEON_NUM_VM	16
+
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define RADEON_VM_PTB_ALIGN_SIZE   32768
+#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
+#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
+
+#define R600_PTE_VALID		(1 << 0)
+#define R600_PTE_SYSTEM		(1 << 1)
+#define R600_PTE_SNOOPED	(1 << 2)
+#define R600_PTE_READABLE	(1 << 5)
+#define R600_PTE_WRITEABLE	(1 << 6)
+
+/* PTE (Page Table Entry) fragment field for different page sizes */
+#define R600_PTE_FRAG_4KB	(0 << 7)
+#define R600_PTE_FRAG_64KB	(4 << 7)
+#define R600_PTE_FRAG_256KB	(6 << 7)
+
+/* flags needed to be set so we can copy directly from the GART table */
+#define R600_PTE_GART_MASK	( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
+				  R600_PTE_SYSTEM | R600_PTE_VALID )
+
+struct radeon_vm_pt {
+	struct radeon_bo		*bo;
+	uint64_t			addr;
+};
+
+struct radeon_vm_id {
+	unsigned		id;
+	uint64_t		pd_gpu_addr;
+	/* last flushed PD/PT update */
+	struct radeon_fence	*flushed_updates;
+	/* last use of vmid */
+	struct radeon_fence	*last_id_use;
+};
+
+struct radeon_vm {
+	struct mutex		mutex;
+
+	struct rb_root_cached	va;
+
+	/* protecting invalidated and freed */
+	spinlock_t		status_lock;
+
+	/* BOs moved, but not yet updated in the PT */
+	struct list_head	invalidated;
+
+	/* BOs freed, but not yet updated in the PT */
+	struct list_head	freed;
+
+	/* BOs cleared in the PT */
+	struct list_head	cleared;
+
+	/* contains the page directory */
+	struct radeon_bo	*page_directory;
+	unsigned		max_pde_used;
+
+	/* array of page tables, one for each page directory entry */
+	struct radeon_vm_pt	*page_tables;
+
+	struct radeon_bo_va	*ib_bo_va;
+
+	/* for id and flush management per ring */
+	struct radeon_vm_id	ids[RADEON_NUM_RINGS];
+};
+
+struct radeon_vm_manager {
+	struct radeon_fence		*active[RADEON_NUM_VM];
+	uint32_t			max_pfn;
+	/* number of VMIDs */
+	unsigned			nvm;
+	/* vram base address for page table entry  */
+	u64				vram_base_offset;
+	/* is vm enabled? */
+	bool				enabled;
+	/* for hw to save the PD addr on suspend/resume */
+	uint32_t			saved_table_addr[RADEON_NUM_VM];
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+	struct radeon_vm		vm;
+};
+
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		ring_size;
+	uint64_t		gpu_addr;
+	uint32_t		ptr_mask;
+	atomic_t		lock;
+	bool                    enabled;
+};
+
+/*
+ * RLC stuff
+ */
+#include "clearstate_defs.h"
+
+struct radeon_rlc {
+	/* for power gating */
+	struct radeon_bo	*save_restore_obj;
+	uint64_t		save_restore_gpu_addr;
+	volatile uint32_t	*sr_ptr;
+	const u32               *reg_list;
+	u32                     reg_list_size;
+	/* for clear state */
+	struct radeon_bo	*clear_state_obj;
+	uint64_t		clear_state_gpu_addr;
+	volatile uint32_t	*cs_ptr;
+	const struct cs_section_def   *cs_data;
+	u32                     clear_state_size;
+	/* for cp tables */
+	struct radeon_bo	*cp_table_obj;
+	uint64_t		cp_table_gpu_addr;
+	volatile uint32_t	*cp_table_ptr;
+	u32                     cp_table_size;
+};
+
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+		       struct radeon_ib *const_ib, bool hdp_flush);
+int radeon_ib_pool_init(struct radeon_device *rdev);
+void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_ring_tests(struct radeon_device *rdev);
+/* Ring access between begin & end cannot sleep */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+				      struct radeon_ring *ring);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+			bool hdp_flush);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+			       bool hdp_flush);
+void radeon_ring_undo(struct radeon_ring *ring);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_lockup_update(struct radeon_device *rdev,
+			       struct radeon_ring *ring);
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+			    uint32_t **data);
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+			unsigned size, uint32_t *data);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+		     unsigned rptr_offs, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+
+
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
+/*
+ * CS.
+ */
+struct radeon_cs_chunk {
+	uint32_t		length_dw;
+	uint32_t		*kdata;
+	void __user		*user_ptr;
+};
+
+struct radeon_cs_parser {
+	struct device		*dev;
+	struct radeon_device	*rdev;
+	struct drm_file		*filp;
+	/* chunks */
+	unsigned		nchunks;
+	struct radeon_cs_chunk	*chunks;
+	uint64_t		*chunks_array;
+	/* IB */
+	unsigned		idx;
+	/* relocations */
+	unsigned		nrelocs;
+	struct radeon_bo_list	*relocs;
+	struct radeon_bo_list	*vm_bos;
+	struct list_head	validated;
+	unsigned		dma_reloc_idx;
+	/* indices of various chunks */
+	struct radeon_cs_chunk  *chunk_ib;
+	struct radeon_cs_chunk  *chunk_relocs;
+	struct radeon_cs_chunk  *chunk_flags;
+	struct radeon_cs_chunk  *chunk_const_ib;
+	struct radeon_ib	ib;
+	struct radeon_ib	const_ib;
+	void			*track;
+	unsigned		family;
+	int			parser_error;
+	u32			cs_flags;
+	u32			ring;
+	s32			priority;
+	struct ww_acquire_ctx	ticket;
+};
+
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+	struct radeon_cs_chunk *ibc = p->chunk_ib;
+
+	if (ibc->kdata)
+		return ibc->kdata[idx];
+	return p->ib.ptr[idx];
+}
+
+
+struct radeon_cs_packet {
+	unsigned	idx;
+	unsigned	type;
+	unsigned	reg;
+	unsigned	opcode;
+	int		count;
+	unsigned	one_reg_wr;
+};
+
+typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt,
+				      unsigned idx, unsigned reg);
+typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt);
+
+
+/*
+ * AGP
+ */
+int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
+void radeon_agp_fini(struct radeon_device *rdev);
+
+
+/*
+ * Writeback
+ */
+struct radeon_wb {
+	struct radeon_bo	*wb_obj;
+	volatile uint32_t	*wb;
+	uint64_t		gpu_addr;
+	bool                    enabled;
+	bool                    use_event;
+};
+
+#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_RING0_NEXT_RPTR 256
+#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define RADEON_WB_CP1_RPTR_OFFSET 1280
+#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET   1792
+#define R600_WB_IH_WPTR_OFFSET   2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
+#define R600_WB_EVENT_OFFSET     3072
+#define CIK_WB_CP1_WPTR_OFFSET     3328
+#define CIK_WB_CP2_WPTR_OFFSET     3584
+#define R600_WB_DMA_RING_TEST_OFFSET 3588
+#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
+
+/**
+ * struct radeon_pm - power management datas
+ * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
+ * @igp_sideport_mclk:  sideport memory clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_system_mclk:    system clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_clk:    ht link clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_width:  ht link width in bits (rs690,rs740,rs780,rs880)
+ * @k8_bandwidth:       k8 bandwidth the gpu has (MByte/s) (IGP)
+ * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
+ * @ht_bandwidth:       ht bandwidth the gpu has (MByte/s) (IGP)
+ * @core_bandwidth:     core GPU bandwidth the gpu has (MByte/s) (IGP)
+ * @sclk:          	GPU clock Mhz (core bandwidth depends of this clock)
+ * @needed_bandwidth:   current bandwidth needs
+ *
+ * It keeps track of various data needed to take powermanagement decision.
+ * Bandwidth need is used to determine minimun clock of the GPU and memory.
+ * Equation between gpu/memory clock and available bandwidth is hw dependent
+ * (type of memory, bus size, efficiency, ...)
+ */
+
+enum radeon_pm_method {
+	PM_METHOD_PROFILE,
+	PM_METHOD_DYNPM,
+	PM_METHOD_DPM,
+};
+
+enum radeon_dynpm_state {
+	DYNPM_STATE_DISABLED,
+	DYNPM_STATE_MINIMUM,
+	DYNPM_STATE_PAUSED,
+	DYNPM_STATE_ACTIVE,
+	DYNPM_STATE_SUSPENDED,
+};
+enum radeon_dynpm_action {
+	DYNPM_ACTION_NONE,
+	DYNPM_ACTION_MINIMUM,
+	DYNPM_ACTION_DOWNCLOCK,
+	DYNPM_ACTION_UPCLOCK,
+	DYNPM_ACTION_DEFAULT
+};
+
+enum radeon_voltage_type {
+	VOLTAGE_NONE = 0,
+	VOLTAGE_GPIO,
+	VOLTAGE_VDDC,
+	VOLTAGE_SW
+};
+
+enum radeon_pm_state_type {
+	/* not used for dpm */
+	POWER_STATE_TYPE_DEFAULT,
+	POWER_STATE_TYPE_POWERSAVE,
+	/* user selectable states */
+	POWER_STATE_TYPE_BATTERY,
+	POWER_STATE_TYPE_BALANCED,
+	POWER_STATE_TYPE_PERFORMANCE,
+	/* internal states */
+	POWER_STATE_TYPE_INTERNAL_UVD,
+	POWER_STATE_TYPE_INTERNAL_UVD_SD,
+	POWER_STATE_TYPE_INTERNAL_UVD_HD,
+	POWER_STATE_TYPE_INTERNAL_UVD_HD2,
+	POWER_STATE_TYPE_INTERNAL_UVD_MVC,
+	POWER_STATE_TYPE_INTERNAL_BOOT,
+	POWER_STATE_TYPE_INTERNAL_THERMAL,
+	POWER_STATE_TYPE_INTERNAL_ACPI,
+	POWER_STATE_TYPE_INTERNAL_ULV,
+	POWER_STATE_TYPE_INTERNAL_3DPERF,
+};
+
+enum radeon_pm_profile_type {
+	PM_PROFILE_DEFAULT,
+	PM_PROFILE_AUTO,
+	PM_PROFILE_LOW,
+	PM_PROFILE_MID,
+	PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX  1
+#define PM_PROFILE_MID_SH_IDX  2
+#define PM_PROFILE_HIGH_SH_IDX 3
+#define PM_PROFILE_LOW_MH_IDX  4
+#define PM_PROFILE_MID_MH_IDX  5
+#define PM_PROFILE_HIGH_MH_IDX 6
+#define PM_PROFILE_MAX         7
+
+struct radeon_pm_profile {
+	int dpms_off_ps_idx;
+	int dpms_on_ps_idx;
+	int dpms_off_cm_idx;
+	int dpms_on_cm_idx;
+};
+
+enum radeon_int_thermal_type {
+	THERMAL_TYPE_NONE,
+	THERMAL_TYPE_EXTERNAL,
+	THERMAL_TYPE_EXTERNAL_GPIO,
+	THERMAL_TYPE_RV6XX,
+	THERMAL_TYPE_RV770,
+	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
+	THERMAL_TYPE_SI,
+	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+	THERMAL_TYPE_CI,
+	THERMAL_TYPE_KV,
+};
+
+struct radeon_voltage {
+	enum radeon_voltage_type type;
+	/* gpio voltage */
+	struct radeon_gpio_rec gpio;
+	u32 delay; /* delay in usec from voltage drop to sclk change */
+	bool active_high; /* voltage drop is active when bit is high */
+	/* VDDC voltage */
+	u8 vddc_id; /* index into vddc voltage table */
+	u8 vddci_id; /* index into vddci voltage table */
+	bool vddci_enabled;
+	/* r6xx+ sw */
+	u16 voltage;
+	/* evergreen+ vddci */
+	u16 vddci;
+};
+
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
+
+struct radeon_pm_clock_info {
+	/* memory clock */
+	u32 mclk;
+	/* engine clock */
+	u32 sclk;
+	/* voltage info */
+	struct radeon_voltage voltage;
+	/* standardized clock flags */
+	u32 flags;
+};
+
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
+struct radeon_power_state {
+	enum radeon_pm_state_type type;
+	struct radeon_pm_clock_info *clock_info;
+	/* number of valid clock modes in this power state */
+	int num_clock_modes;
+	struct radeon_pm_clock_info *default_clock_mode;
+	/* standardized state flags */
+	u32 flags;
+	u32 misc; /* vbios specific flags */
+	u32 misc2; /* vbios specific flags */
+	int pcie_lanes; /* pcie lanes */
+};
+
+/*
+ * Some modes are overclocked by very low value, accept them
+ */
+#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
+
+enum radeon_dpm_auto_throttle_src {
+	RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
+	RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum radeon_dpm_event_src {
+	RADEON_DPM_EVENT_SRC_ANALOG = 0,
+	RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
+	RADEON_DPM_EVENT_SRC_DIGITAL = 2,
+	RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+	RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+#define RADEON_MAX_VCE_LEVELS 6
+
+enum radeon_vce_level {
+	RADEON_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+	RADEON_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+	RADEON_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+	RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+	RADEON_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+	RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+struct radeon_ps {
+	u32 caps; /* vbios flags */
+	u32 class; /* vbios flags */
+	u32 class2; /* vbios flags */
+	/* UVD clocks */
+	u32 vclk;
+	u32 dclk;
+	/* VCE clocks */
+	u32 evclk;
+	u32 ecclk;
+	bool vce_active;
+	enum radeon_vce_level vce_level;
+	/* asic priv */
+	void *ps_priv;
+};
+
+struct radeon_dpm_thermal {
+	/* thermal interrupt work */
+	struct work_struct work;
+	/* low temperature threshold */
+	int                min_temp;
+	/* high temperature threshold */
+	int                max_temp;
+	/* was interrupt low to high or high to low */
+	bool               high_to_low;
+};
+
+enum radeon_clk_action
+{
+	RADEON_SCLK_UP = 1,
+	RADEON_SCLK_DOWN
+};
+
+struct radeon_blacklist_clocks
+{
+	u32 sclk;
+	u32 mclk;
+	enum radeon_clk_action action;
+};
+
+struct radeon_clock_and_voltage_limits {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u16 vddci;
+};
+
+struct radeon_clock_array {
+	u32 count;
+	u32 *values;
+};
+
+struct radeon_clock_voltage_dependency_entry {
+	u32 clk;
+	u16 v;
+};
+
+struct radeon_clock_voltage_dependency_table {
+	u32 count;
+	struct radeon_clock_voltage_dependency_entry *entries;
+};
+
+union radeon_cac_leakage_entry {
+	struct {
+		u16 vddc;
+		u32 leakage;
+	};
+	struct {
+		u16 vddc1;
+		u16 vddc2;
+		u16 vddc3;
+	};
+};
+
+struct radeon_cac_leakage_table {
+	u32 count;
+	union radeon_cac_leakage_entry *entries;
+};
+
+struct radeon_phase_shedding_limits_entry {
+	u16 voltage;
+	u32 sclk;
+	u32 mclk;
+};
+
+struct radeon_phase_shedding_limits_table {
+	u32 count;
+	struct radeon_phase_shedding_limits_entry *entries;
+};
+
+struct radeon_uvd_clock_voltage_dependency_entry {
+	u32 vclk;
+	u32 dclk;
+	u16 v;
+};
+
+struct radeon_uvd_clock_voltage_dependency_table {
+	u8 count;
+	struct radeon_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct radeon_vce_clock_voltage_dependency_entry {
+	u32 ecclk;
+	u32 evclk;
+	u16 v;
+};
+
+struct radeon_vce_clock_voltage_dependency_table {
+	u8 count;
+	struct radeon_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct radeon_ppm_table {
+	u8 ppm_design;
+	u16 cpu_core_number;
+	u32 platform_tdp;
+	u32 small_ac_platform_tdp;
+	u32 platform_tdc;
+	u32 small_ac_platform_tdc;
+	u32 apu_tdp;
+	u32 dgpu_tdp;
+	u32 dgpu_ulv_power;
+	u32 tj_max;
+};
+
+struct radeon_cac_tdp_table {
+	u16 tdp;
+	u16 configurable_tdp;
+	u16 tdc;
+	u16 battery_power_limit;
+	u16 small_power_limit;
+	u16 low_cac_leakage;
+	u16 high_cac_leakage;
+	u16 maximum_power_delivery_limit;
+};
+
+struct radeon_dpm_dynamic_state {
+	struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
+	struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
+	struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
+	struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+	struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+	struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+	struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+	struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+	struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+	struct radeon_clock_array valid_sclk_values;
+	struct radeon_clock_array valid_mclk_values;
+	struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
+	struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
+	u32 mclk_sclk_ratio;
+	u32 sclk_mclk_delta;
+	u16 vddc_vddci_delta;
+	u16 min_vddc_for_pcie_gen2;
+	struct radeon_cac_leakage_table cac_leakage_table;
+	struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
+	struct radeon_ppm_table *ppm_table;
+	struct radeon_cac_tdp_table *cac_tdp_table;
+};
+
+struct radeon_dpm_fan {
+	u16 t_min;
+	u16 t_med;
+	u16 t_high;
+	u16 pwm_min;
+	u16 pwm_med;
+	u16 pwm_high;
+	u8 t_hyst;
+	u32 cycle_delay;
+	u16 t_max;
+	u8 control_mode;
+	u16 default_max_fan_pwm;
+	u16 default_fan_output_sensitivity;
+	u16 fan_output_sensitivity;
+	bool ucode_fan_control;
+};
+
+enum radeon_pcie_gen {
+	RADEON_PCIE_GEN1 = 0,
+	RADEON_PCIE_GEN2 = 1,
+	RADEON_PCIE_GEN3 = 2,
+	RADEON_PCIE_GEN_INVALID = 0xffff
+};
+
+enum radeon_dpm_forced_level {
+	RADEON_DPM_FORCED_LEVEL_AUTO = 0,
+	RADEON_DPM_FORCED_LEVEL_LOW = 1,
+	RADEON_DPM_FORCED_LEVEL_HIGH = 2,
+};
+
+struct radeon_vce_state {
+	/* vce clocks */
+	u32 evclk;
+	u32 ecclk;
+	/* gpu clocks */
+	u32 sclk;
+	u32 mclk;
+	u8 clk_idx;
+	u8 pstate;
+};
+
+struct radeon_dpm {
+	struct radeon_ps        *ps;
+	/* number of valid power states */
+	int                     num_ps;
+	/* current power state that is active */
+	struct radeon_ps        *current_ps;
+	/* requested power state */
+	struct radeon_ps        *requested_ps;
+	/* boot up power state */
+	struct radeon_ps        *boot_ps;
+	/* default uvd power state */
+	struct radeon_ps        *uvd_ps;
+	/* vce requirements */
+	struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS];
+	enum radeon_vce_level vce_level;
+	enum radeon_pm_state_type state;
+	enum radeon_pm_state_type user_state;
+	u32                     platform_caps;
+	u32                     voltage_response_time;
+	u32                     backbias_response_time;
+	void                    *priv;
+	u32			new_active_crtcs;
+	int			new_active_crtc_count;
+	u32			current_active_crtcs;
+	int			current_active_crtc_count;
+	bool single_display;
+	struct radeon_dpm_dynamic_state dyn_state;
+	struct radeon_dpm_fan fan;
+	u32 tdp_limit;
+	u32 near_tdp_limit;
+	u32 near_tdp_limit_adjusted;
+	u32 sq_ramping_threshold;
+	u32 cac_leakage;
+	u16 tdp_od_limit;
+	u32 tdp_adjustment;
+	u16 load_line_slope;
+	bool power_control;
+	bool ac_power;
+	/* special states active */
+	bool                    thermal_active;
+	bool                    uvd_active;
+	bool                    vce_active;
+	/* thermal handling */
+	struct radeon_dpm_thermal thermal;
+	/* forced levels */
+	enum radeon_dpm_forced_level forced_level;
+	/* track UVD streams */
+	unsigned sd;
+	unsigned hd;
+};
+
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
+void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable);
+
+struct radeon_pm {
+	struct mutex		mutex;
+	/* write locked while reprogramming mclk */
+	struct rw_semaphore	mclk_lock;
+	u32			active_crtcs;
+	int			active_crtc_count;
+	int			req_vblank;
+	bool			vblank_sync;
+	fixed20_12		max_bandwidth;
+	fixed20_12		igp_sideport_mclk;
+	fixed20_12		igp_system_mclk;
+	fixed20_12		igp_ht_link_clk;
+	fixed20_12		igp_ht_link_width;
+	fixed20_12		k8_bandwidth;
+	fixed20_12		sideport_bandwidth;
+	fixed20_12		ht_bandwidth;
+	fixed20_12		core_bandwidth;
+	fixed20_12		sclk;
+	fixed20_12		mclk;
+	fixed20_12		needed_bandwidth;
+	struct radeon_power_state *power_state;
+	/* number of valid power states */
+	int                     num_power_states;
+	int                     current_power_state_index;
+	int                     current_clock_mode_index;
+	int                     requested_power_state_index;
+	int                     requested_clock_mode_index;
+	int                     default_power_state_index;
+	u32                     current_sclk;
+	u32                     current_mclk;
+	u16                     current_vddc;
+	u16                     current_vddci;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	u16                     default_vddc;
+	u16                     default_vddci;
+	struct radeon_i2c_chan *i2c_bus;
+	/* selected pm method */
+	enum radeon_pm_method     pm_method;
+	/* dynpm power management */
+	struct delayed_work	dynpm_idle_work;
+	enum radeon_dynpm_state	dynpm_state;
+	enum radeon_dynpm_action	dynpm_planned_action;
+	unsigned long		dynpm_action_timeout;
+	bool                    dynpm_can_upclock;
+	bool                    dynpm_can_downclock;
+	/* profile-based power management */
+	enum radeon_pm_profile_type profile;
+	int                     profile_index;
+	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
+	/* internal thermal controller on rv6xx+ */
+	enum radeon_int_thermal_type int_thermal_type;
+	struct device	        *int_hwmon_dev;
+	/* fan control parameters */
+	bool                    no_fan;
+	u8                      fan_pulses_per_revolution;
+	u8                      fan_min_rpm;
+	u8                      fan_max_rpm;
+	/* dpm */
+	bool                    dpm_enabled;
+	bool                    sysfs_initialized;
+	struct radeon_dpm       dpm;
+};
+
+#define RADEON_PCIE_SPEED_25 1
+#define RADEON_PCIE_SPEED_50 2
+#define RADEON_PCIE_SPEED_80 4
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+			     enum radeon_pm_state_type ps_type,
+			     int instance);
+/*
+ * UVD
+ */
+#define RADEON_DEFAULT_UVD_HANDLES	10
+#define RADEON_MAX_UVD_HANDLES		30
+#define RADEON_UVD_STACK_SIZE		(200*1024)
+#define RADEON_UVD_HEAP_SIZE		(256*1024)
+#define RADEON_UVD_SESSION_SIZE		(50*1024)
+
+struct radeon_uvd {
+	bool			fw_header_present;
+	struct radeon_bo	*vcpu_bo;
+	void			*cpu_addr;
+	uint64_t		gpu_addr;
+	unsigned		max_handles;
+	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
+	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
+	unsigned		img_size[RADEON_MAX_UVD_HANDLES];
+	struct delayed_work	idle_work;
+};
+
+int radeon_uvd_init(struct radeon_device *rdev);
+void radeon_uvd_fini(struct radeon_device *rdev);
+int radeon_uvd_suspend(struct radeon_device *rdev);
+int radeon_uvd_resume(struct radeon_device *rdev);
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+			      uint32_t handle, struct radeon_fence **fence);
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+			       uint32_t handle, struct radeon_fence **fence);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
+				       uint32_t allowed_domains);
+void radeon_uvd_free_handles(struct radeon_device *rdev,
+			     struct drm_file *filp);
+int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
+void radeon_uvd_note_usage(struct radeon_device *rdev);
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+				  unsigned vclk, unsigned dclk,
+				  unsigned vco_min, unsigned vco_max,
+				  unsigned fb_factor, unsigned fb_mask,
+				  unsigned pd_min, unsigned pd_max,
+				  unsigned pd_even,
+				  unsigned *optimal_fb_div,
+				  unsigned *optimal_vclk_div,
+				  unsigned *optimal_dclk_div);
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+                                unsigned cg_upll_func_cntl);
+
+/*
+ * VCE
+ */
+#define RADEON_MAX_VCE_HANDLES	16
+
+struct radeon_vce {
+	struct radeon_bo	*vcpu_bo;
+	uint64_t		gpu_addr;
+	unsigned		fw_version;
+	unsigned		fb_version;
+	atomic_t		handles[RADEON_MAX_VCE_HANDLES];
+	struct drm_file		*filp[RADEON_MAX_VCE_HANDLES];
+	unsigned		img_size[RADEON_MAX_VCE_HANDLES];
+	struct delayed_work	idle_work;
+	uint32_t		keyselect;
+};
+
+int radeon_vce_init(struct radeon_device *rdev);
+void radeon_vce_fini(struct radeon_device *rdev);
+int radeon_vce_suspend(struct radeon_device *rdev);
+int radeon_vce_resume(struct radeon_device *rdev);
+int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
+			      uint32_t handle, struct radeon_fence **fence);
+int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
+			       uint32_t handle, struct radeon_fence **fence);
+void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
+void radeon_vce_note_usage(struct radeon_device *rdev);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
+int radeon_vce_cs_parse(struct radeon_cs_parser *p);
+bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
+			       struct radeon_ring *ring,
+			       struct radeon_semaphore *semaphore,
+			       bool emit_wait);
+void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_vce_fence_emit(struct radeon_device *rdev,
+			   struct radeon_fence *fence);
+int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+
+struct r600_audio_pin {
+	int			channels;
+	int			rate;
+	int			bits_per_sample;
+	u8			status_bits;
+	u8			category_code;
+	u32			offset;
+	bool			connected;
+	u32			id;
+};
+
+struct r600_audio {
+	bool enabled;
+	struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
+	int num_pins;
+	struct radeon_audio_funcs *hdmi_funcs;
+	struct radeon_audio_funcs *dp_funcs;
+	struct radeon_audio_basic_funcs *funcs;
+};
+
+/*
+ * Benchmarking
+ */
+void radeon_benchmark(struct radeon_device *rdev, int test_number);
+
+
+/*
+ * Testing
+ */
+void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+			   struct radeon_ring *cpA,
+			   struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
+
+/*
+ * MMU Notifier
+ */
+#if defined(CONFIG_MMU_NOTIFIER)
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
+void radeon_mn_unregister(struct radeon_bo *bo);
+#else
+static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
+{
+	return -ENODEV;
+}
+static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
+#endif
+
+/*
+ * Debugfs
+ */
+struct radeon_debugfs {
+	struct drm_info_list	*files;
+	unsigned		num_files;
+};
+
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles);
+int radeon_debugfs_fence_init(struct radeon_device *rdev);
+
+/*
+ * ASIC ring specific functions.
+ */
+struct radeon_asic_ring {
+	/* ring read/write ptr handling */
+	u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+	u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+	void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+
+	/* validating and patching of IBs */
+	int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+	int (*cs_parse)(struct radeon_cs_parser *p);
+
+	/* command emmit functions */
+	void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+	void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+	void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
+	bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+			       struct radeon_semaphore *semaphore, bool emit_wait);
+	void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
+			 unsigned vm_id, uint64_t pd_addr);
+
+	/* testing functions */
+	int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+	int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+	bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+
+	/* deprecated */
+	void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+};
+
+/*
+ * ASIC specific functions.
+ */
+struct radeon_asic {
+	int (*init)(struct radeon_device *rdev);
+	void (*fini)(struct radeon_device *rdev);
+	int (*resume)(struct radeon_device *rdev);
+	int (*suspend)(struct radeon_device *rdev);
+	void (*vga_set_state)(struct radeon_device *rdev, bool state);
+	int (*asic_reset)(struct radeon_device *rdev, bool hard);
+	/* Flush the HDP cache via MMIO */
+	void (*mmio_hdp_flush)(struct radeon_device *rdev);
+	/* check if 3D engine is idle */
+	bool (*gui_idle)(struct radeon_device *rdev);
+	/* wait for mc_idle */
+	int (*mc_wait_for_idle)(struct radeon_device *rdev);
+	/* get the reference clock */
+	u32 (*get_xclk)(struct radeon_device *rdev);
+	/* get the gpu clock counter */
+	uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
+	/* get register for info ioctl */
+	int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
+	/* gart */
+	struct {
+		void (*tlb_flush)(struct radeon_device *rdev);
+		uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
+		void (*set_page)(struct radeon_device *rdev, unsigned i,
+				 uint64_t entry);
+	} gart;
+	struct {
+		int (*init)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+		void (*copy_pages)(struct radeon_device *rdev,
+				   struct radeon_ib *ib,
+				   uint64_t pe, uint64_t src,
+				   unsigned count);
+		void (*write_pages)(struct radeon_device *rdev,
+				    struct radeon_ib *ib,
+				    uint64_t pe,
+				    uint64_t addr, unsigned count,
+				    uint32_t incr, uint32_t flags);
+		void (*set_pages)(struct radeon_device *rdev,
+				  struct radeon_ib *ib,
+				  uint64_t pe,
+				  uint64_t addr, unsigned count,
+				  uint32_t incr, uint32_t flags);
+		void (*pad_ib)(struct radeon_ib *ib);
+	} vm;
+	/* ring specific callbacks */
+	const struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
+	/* irqs */
+	struct {
+		int (*set)(struct radeon_device *rdev);
+		int (*process)(struct radeon_device *rdev);
+	} irq;
+	/* displays */
+	struct {
+		/* display watermarks */
+		void (*bandwidth_update)(struct radeon_device *rdev);
+		/* get frame count */
+		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
+		/* wait for vblank */
+		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
+		/* set backlight level */
+		void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
+		/* get backlight level */
+		u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
+		/* audio callbacks */
+		void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
+		void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
+	} display;
+	/* copy functions for bo handling */
+	struct {
+		struct radeon_fence *(*blit)(struct radeon_device *rdev,
+					     uint64_t src_offset,
+					     uint64_t dst_offset,
+					     unsigned num_gpu_pages,
+					     struct reservation_object *resv);
+		u32 blit_ring_index;
+		struct radeon_fence *(*dma)(struct radeon_device *rdev,
+					    uint64_t src_offset,
+					    uint64_t dst_offset,
+					    unsigned num_gpu_pages,
+					    struct reservation_object *resv);
+		u32 dma_ring_index;
+		/* method used for bo copy */
+		struct radeon_fence *(*copy)(struct radeon_device *rdev,
+					     uint64_t src_offset,
+					     uint64_t dst_offset,
+					     unsigned num_gpu_pages,
+					     struct reservation_object *resv);
+		/* ring used for bo copies */
+		u32 copy_ring_index;
+	} copy;
+	/* surfaces */
+	struct {
+		int (*set_reg)(struct radeon_device *rdev, int reg,
+				       uint32_t tiling_flags, uint32_t pitch,
+				       uint32_t offset, uint32_t obj_size);
+		void (*clear_reg)(struct radeon_device *rdev, int reg);
+	} surface;
+	/* hotplug detect */
+	struct {
+		void (*init)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+	} hpd;
+	/* static power management */
+	struct {
+		void (*misc)(struct radeon_device *rdev);
+		void (*prepare)(struct radeon_device *rdev);
+		void (*finish)(struct radeon_device *rdev);
+		void (*init_profile)(struct radeon_device *rdev);
+		void (*get_dynpm_state)(struct radeon_device *rdev);
+		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
+		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
+		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+		int (*get_pcie_lanes)(struct radeon_device *rdev);
+		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+		int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
+		int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
+		int (*get_temperature)(struct radeon_device *rdev);
+	} pm;
+	/* dynamic power management */
+	struct {
+		int (*init)(struct radeon_device *rdev);
+		void (*setup_asic)(struct radeon_device *rdev);
+		int (*enable)(struct radeon_device *rdev);
+		int (*late_enable)(struct radeon_device *rdev);
+		void (*disable)(struct radeon_device *rdev);
+		int (*pre_set_power_state)(struct radeon_device *rdev);
+		int (*set_power_state)(struct radeon_device *rdev);
+		void (*post_set_power_state)(struct radeon_device *rdev);
+		void (*display_configuration_changed)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+		u32 (*get_sclk)(struct radeon_device *rdev, bool low);
+		u32 (*get_mclk)(struct radeon_device *rdev, bool low);
+		void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
+		void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
+		int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
+		bool (*vblank_too_short)(struct radeon_device *rdev);
+		void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
+		void (*enable_bapm)(struct radeon_device *rdev, bool enable);
+		void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
+		u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
+		int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
+		int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
+		u32 (*get_current_sclk)(struct radeon_device *rdev);
+		u32 (*get_current_mclk)(struct radeon_device *rdev);
+	} dpm;
+	/* pageflipping */
+	struct {
+		void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base, bool async);
+		bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
+	} pflip;
+};
+
+/*
+ * Asic structures
+ */
+struct r100_asic {
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			hdp_cntl;
+};
+
+struct r300_asic {
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			resync_scratch;
+	u32			hdp_cntl;
+};
+
+struct r600_asic {
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	unsigned		tile_config;
+	unsigned		backend_map;
+	unsigned		active_simds;
+};
+
+struct rv770_asic {
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		sx_num_of_sets;
+	unsigned		sc_prim_fifo_size;
+	unsigned		sc_hiz_tile_fifo_size;
+	unsigned		sc_earlyz_tile_fifo_fize;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	unsigned		tile_config;
+	unsigned		backend_map;
+	unsigned		active_simds;
+};
+
+struct evergreen_asic {
+	unsigned num_ses;
+	unsigned max_pipes;
+	unsigned max_tile_pipes;
+	unsigned max_simds;
+	unsigned max_backends;
+	unsigned max_gprs;
+	unsigned max_threads;
+	unsigned max_stack_entries;
+	unsigned max_hw_contexts;
+	unsigned max_gs_threads;
+	unsigned sx_max_export_size;
+	unsigned sx_max_export_pos_size;
+	unsigned sx_max_export_smx_size;
+	unsigned sq_num_cf_insts;
+	unsigned sx_num_of_sets;
+	unsigned sc_prim_fifo_size;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+	unsigned tiling_nbanks;
+	unsigned tiling_npipes;
+	unsigned tiling_group_size;
+	unsigned tile_config;
+	unsigned backend_map;
+	unsigned active_simds;
+};
+
+struct cayman_asic {
+	unsigned max_shader_engines;
+	unsigned max_pipes_per_simd;
+	unsigned max_tile_pipes;
+	unsigned max_simds_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_threads;
+	unsigned max_gs_threads;
+	unsigned max_stack_entries;
+	unsigned sx_num_of_sets;
+	unsigned sx_max_export_size;
+	unsigned sx_max_export_pos_size;
+	unsigned sx_max_export_smx_size;
+	unsigned max_hw_contexts;
+	unsigned sq_num_cf_insts;
+	unsigned sc_prim_fifo_size;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_shader_engines;
+	unsigned num_shader_pipes_per_simd;
+	unsigned num_tile_pipes;
+	unsigned num_simds_per_se;
+	unsigned num_backends_per_se;
+	unsigned backend_disable_mask_per_asic;
+	unsigned backend_map;
+	unsigned num_texture_channel_caches;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+
+	unsigned tile_config;
+	unsigned active_simds;
+};
+
+struct si_asic {
+	unsigned max_shader_engines;
+	unsigned max_tile_pipes;
+	unsigned max_cu_per_sh;
+	unsigned max_sh_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_gs_threads;
+	unsigned max_hw_contexts;
+	unsigned sc_prim_fifo_size_frontend;
+	unsigned sc_prim_fifo_size_backend;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_tile_pipes;
+	unsigned backend_enable_mask;
+	unsigned backend_disable_mask_per_asic;
+	unsigned backend_map;
+	unsigned num_texture_channel_caches;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+
+	unsigned tile_config;
+	uint32_t tile_mode_array[32];
+	uint32_t active_cus;
+};
+
+struct cik_asic {
+	unsigned max_shader_engines;
+	unsigned max_tile_pipes;
+	unsigned max_cu_per_sh;
+	unsigned max_sh_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_gs_threads;
+	unsigned max_hw_contexts;
+	unsigned sc_prim_fifo_size_frontend;
+	unsigned sc_prim_fifo_size_backend;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_tile_pipes;
+	unsigned backend_enable_mask;
+	unsigned backend_disable_mask_per_asic;
+	unsigned backend_map;
+	unsigned num_texture_channel_caches;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+
+	unsigned tile_config;
+	uint32_t tile_mode_array[32];
+	uint32_t macrotile_mode_array[16];
+	uint32_t active_cus;
+};
+
+union radeon_asic_config {
+	struct r300_asic	r300;
+	struct r100_asic	r100;
+	struct r600_asic	r600;
+	struct rv770_asic	rv770;
+	struct evergreen_asic	evergreen;
+	struct cayman_asic	cayman;
+	struct si_asic		si;
+	struct cik_asic		cik;
+};
+
+/*
+ * asic initizalization from radeon_asic.c
+ */
+void radeon_agp_disable(struct radeon_device *rdev);
+int radeon_asic_init(struct radeon_device *rdev);
+
+
+/*
+ * IOCTL.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp);
+int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *filp);
+int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *filp);
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+
+/* VRAM scratch page for HDP bug, default vram page */
+struct r600_vram_scratch {
+	struct radeon_bo		*robj;
+	volatile uint32_t		*ptr;
+	u64				gpu_addr;
+};
+
+/*
+ * ACPI
+ */
+struct radeon_atif_notification_cfg {
+	bool enabled;
+	int command_code;
+};
+
+struct radeon_atif_notifications {
+	bool display_switch;
+	bool expansion_mode_change;
+	bool thermal_state;
+	bool forced_power_state;
+	bool system_power_state;
+	bool display_conf_change;
+	bool px_gfx_switch;
+	bool brightness_change;
+	bool dgpu_display_event;
+};
+
+struct radeon_atif_functions {
+	bool system_params;
+	bool sbios_requests;
+	bool select_active_disp;
+	bool lid_state;
+	bool get_tv_standard;
+	bool set_tv_standard;
+	bool get_panel_expansion_mode;
+	bool set_panel_expansion_mode;
+	bool temperature_change;
+	bool graphics_device_types;
+};
+
+struct radeon_atif {
+	struct radeon_atif_notifications notifications;
+	struct radeon_atif_functions functions;
+	struct radeon_atif_notification_cfg notification_cfg;
+	struct radeon_encoder *encoder_for_bl;
+};
+
+struct radeon_atcs_functions {
+	bool get_ext_state;
+	bool pcie_perf_req;
+	bool pcie_dev_rdy;
+	bool pcie_bus_width;
+};
+
+struct radeon_atcs {
+	struct radeon_atcs_functions functions;
+};
+
+/*
+ * Core structure, functions and helpers.
+ */
+typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
+typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
+
+struct radeon_device {
+	struct device			*dev;
+	struct drm_device		*ddev;
+	struct pci_dev			*pdev;
+	struct rw_semaphore		exclusive_lock;
+	/* ASIC */
+	union radeon_asic_config	config;
+	enum radeon_family		family;
+	unsigned long			flags;
+	int				usec_timeout;
+	enum radeon_pll_errata		pll_errata;
+	int				num_gb_pipes;
+	int				num_z_pipes;
+	int				disp_priority;
+	/* BIOS */
+	uint8_t				*bios;
+	bool				is_atom_bios;
+	uint16_t			bios_header_start;
+	struct radeon_bo		*stolen_vga_memory;
+	/* Register mmio */
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	/* protects concurrent MM_INDEX/DATA based register access */
+	spinlock_t mmio_idx_lock;
+	/* protects concurrent SMC based register access */
+	spinlock_t smc_idx_lock;
+	/* protects concurrent PLL register access */
+	spinlock_t pll_idx_lock;
+	/* protects concurrent MC register access */
+	spinlock_t mc_idx_lock;
+	/* protects concurrent PCIE register access */
+	spinlock_t pcie_idx_lock;
+	/* protects concurrent PCIE_PORT register access */
+	spinlock_t pciep_idx_lock;
+	/* protects concurrent PIF register access */
+	spinlock_t pif_idx_lock;
+	/* protects concurrent CG register access */
+	spinlock_t cg_idx_lock;
+	/* protects concurrent UVD register access */
+	spinlock_t uvd_idx_lock;
+	/* protects concurrent RCU register access */
+	spinlock_t rcu_idx_lock;
+	/* protects concurrent DIDT register access */
+	spinlock_t didt_idx_lock;
+	/* protects concurrent ENDPOINT (audio) register access */
+	spinlock_t end_idx_lock;
+	void __iomem			*rmmio;
+	radeon_rreg_t			mc_rreg;
+	radeon_wreg_t			mc_wreg;
+	radeon_rreg_t			pll_rreg;
+	radeon_wreg_t			pll_wreg;
+	uint32_t                        pcie_reg_mask;
+	radeon_rreg_t			pciep_rreg;
+	radeon_wreg_t			pciep_wreg;
+	/* io port */
+	void __iomem                    *rio_mem;
+	resource_size_t			rio_mem_size;
+	struct radeon_clock             clock;
+	struct radeon_mc		mc;
+	struct radeon_gart		gart;
+	struct radeon_mode_info		mode_info;
+	struct radeon_scratch		scratch;
+	struct radeon_doorbell		doorbell;
+	struct radeon_mman		mman;
+	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
+	wait_queue_head_t		fence_queue;
+	u64				fence_context;
+	struct mutex			ring_lock;
+	struct radeon_ring		ring[RADEON_NUM_RINGS];
+	bool				ib_pool_ready;
+	struct radeon_sa_manager	ring_tmp_bo;
+	struct radeon_irq		irq;
+	struct radeon_asic		*asic;
+	struct radeon_gem		gem;
+	struct radeon_pm		pm;
+	struct radeon_uvd		uvd;
+	struct radeon_vce		vce;
+	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
+	struct radeon_wb		wb;
+	struct radeon_dummy_page	dummy_page;
+	bool				shutdown;
+	bool				need_dma32;
+	bool				need_swiotlb;
+	bool				accel_working;
+	bool				fastfb_working; /* IGP feature*/
+	bool				needs_reset, in_reset;
+	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
+	const struct firmware *me_fw;	/* all family ME firmware */
+	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
+	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
+	const struct firmware *mc_fw;	/* NI MC firmware */
+	const struct firmware *ce_fw;	/* SI CE firmware */
+	const struct firmware *mec_fw;	/* CIK MEC firmware */
+	const struct firmware *mec2_fw;	/* KV MEC2 firmware */
+	const struct firmware *sdma_fw;	/* CIK SDMA firmware */
+	const struct firmware *smc_fw;	/* SMC firmware */
+	const struct firmware *uvd_fw;	/* UVD firmware */
+	const struct firmware *vce_fw;	/* VCE firmware */
+	bool new_fw;
+	struct r600_vram_scratch vram_scratch;
+	int msi_enabled; /* msi enabled */
+	struct r600_ih ih; /* r6/700 interrupt ring */
+	struct radeon_rlc rlc;
+	struct radeon_mec mec;
+	struct delayed_work hotplug_work;
+	struct work_struct dp_work;
+	struct work_struct audio_work;
+	int num_crtc; /* number of crtcs */
+	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+	bool has_uvd;
+	bool has_vce;
+	struct r600_audio audio; /* audio stuff */
+	struct notifier_block acpi_nb;
+	/* only one userspace can use Hyperz features or CMASK at a time */
+	struct drm_file *hyperz_filp;
+	struct drm_file *cmask_filp;
+	/* i2c buses */
+	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+	/* debugfs */
+	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+	unsigned 		debugfs_count;
+	/* virtual memory */
+	struct radeon_vm_manager	vm_manager;
+	struct mutex			gpu_clock_mutex;
+	/* memory stats */
+	atomic64_t			vram_usage;
+	atomic64_t			gtt_usage;
+	atomic64_t			num_bytes_moved;
+	atomic_t			gpu_reset_counter;
+	/* ACPI interface */
+	struct radeon_atif		atif;
+	struct radeon_atcs		atcs;
+	/* srbm instance registers */
+	struct mutex			srbm_mutex;
+	/* clock, powergating flags */
+	u32 cg_flags;
+	u32 pg_flags;
+
+	struct dev_pm_domain vga_pm_domain;
+	bool have_disp_power_ref;
+	u32 px_quirk_flags;
+
+	/* tracking pinned memory */
+	u64 vram_pin_size;
+	u64 gart_pin_size;
+
+	struct mutex	mn_lock;
+	DECLARE_HASHTABLE(mn_hash, 7);
+};
+
+bool radeon_is_px(struct drm_device *dev);
+int radeon_device_init(struct radeon_device *rdev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev,
+		       uint32_t flags);
+void radeon_device_fini(struct radeon_device *rdev);
+int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
+
+#define RADEON_MIN_MMIO_SIZE 0x10000
+
+uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg);
+void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+				    bool always_indirect)
+{
+	/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
+	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+		return readl(((void __iomem *)rdev->rmmio) + reg);
+	else
+		return r100_mm_rreg_slow(rdev, reg);
+}
+static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+				bool always_indirect)
+{
+	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+		writel(v, ((void __iomem *)rdev->rmmio) + reg);
+	else
+		r100_mm_wreg_slow(rdev, reg, v);
+}
+
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
+
+/*
+ * Cast helper
+ */
+extern const struct dma_fence_ops radeon_fence_ops;
+
+static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
+{
+	struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
+
+	if (__f->base.ops == &radeon_fence_ops)
+		return __f;
+
+	return NULL;
+}
+
+/*
+ * Registers read & write functions.
+ */
+#define RREG8(reg) readb((rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
+#define RREG16(reg) readw((rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n",	\
+			    r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
+#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
+#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
+#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
+#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
+#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
+#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
+#define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg))
+#define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v))
+#define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg))
+#define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v))
+#define RREG32_CG(reg) eg_cg_rreg(rdev, (reg))
+#define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v))
+#define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg))
+#define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v))
+#define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg))
+#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
+#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
+#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
+#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
+#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
+#define WREG32_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32(reg);			\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32(reg, tmp_);				\
+	} while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+#define WREG32_PLL_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32_PLL(reg);		\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32_PLL(reg, tmp_);				\
+	} while (0)
+#define WREG32_SMC_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32_SMC(reg);		\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32_SMC(reg, tmp_);				\
+	} while (0)
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
+#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
+#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
+
+#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
+#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
+
+/*
+ * Indirect registers accessors.
+ * They used to be inlined, but this increases code size by ~65 kbytes.
+ * Since each performs a pair of MMIO ops
+ * within a spin_lock_irqsave/spin_unlock_irqrestore region,
+ * the cost of call+ret is almost negligible. MMIO and locking
+ * costs several dozens of cycles each at best, call+ret is ~5 cycles.
+ */
+uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg);
+void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg);
+void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg);
+void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg);
+void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg);
+void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg);
+void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg);
+void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+
+void r100_pll_errata_after_index(struct radeon_device *rdev);
+
+
+/*
+ * ASICs helpers.
+ */
+#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
+			    (rdev->pdev->device == 0x5969))
+#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
+		(rdev->family == CHIP_RV200) || \
+		(rdev->family == CHIP_RS100) || \
+		(rdev->family == CHIP_RS200) || \
+		(rdev->family == CHIP_RV250) || \
+		(rdev->family == CHIP_RV280) || \
+		(rdev->family == CHIP_RS300))
+#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||	\
+		(rdev->family == CHIP_RV350) ||			\
+		(rdev->family == CHIP_R350)  ||			\
+		(rdev->family == CHIP_RV380) ||			\
+		(rdev->family == CHIP_R420)  ||			\
+		(rdev->family == CHIP_R423)  ||			\
+		(rdev->family == CHIP_RV410) ||			\
+		(rdev->family == CHIP_RS400) ||			\
+		(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+		(rdev->ddev->pdev->device == 0x9443) || \
+		(rdev->ddev->pdev->device == 0x944B) || \
+		(rdev->ddev->pdev->device == 0x9506) || \
+		(rdev->ddev->pdev->device == 0x9509) || \
+		(rdev->ddev->pdev->device == 0x950F) || \
+		(rdev->ddev->pdev->device == 0x689C) || \
+		(rdev->ddev->pdev->device == 0x689D))
+#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
+			    (rdev->family == CHIP_RS690)  ||	\
+			    (rdev->family == CHIP_RS740)  ||	\
+			    (rdev->family >= CHIP_R600))
+#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
+#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
+#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+			     (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
+#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
+#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
+			     (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
+#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
+#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
+#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
+#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+			     (rdev->family == CHIP_MULLINS))
+
+#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
+			      (rdev->ddev->pdev->device == 0x6850) || \
+			      (rdev->ddev->pdev->device == 0x6858) || \
+			      (rdev->ddev->pdev->device == 0x6859) || \
+			      (rdev->ddev->pdev->device == 0x6840) || \
+			      (rdev->ddev->pdev->device == 0x6841) || \
+			      (rdev->ddev->pdev->device == 0x6842) || \
+			      (rdev->ddev->pdev->device == 0x6843))
+
+/*
+ * BIOS helpers.
+ */
+#define RBIOS8(i) (rdev->bios[i])
+#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+int radeon_combios_init(struct radeon_device *rdev);
+void radeon_combios_fini(struct radeon_device *rdev);
+int radeon_atombios_init(struct radeon_device *rdev);
+void radeon_atombios_fini(struct radeon_device *rdev);
+
+
+/*
+ * RING helpers.
+ */
+
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+	if (ring->count_dw <= 0)
+		DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
+
+	ring->ring[ring->wptr++] = v;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw--;
+	ring->ring_free_dw--;
+}
+
+/*
+ * ASICs macro.
+ */
+#define radeon_init(rdev) (rdev)->asic->init((rdev))
+#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
+#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
+#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
+#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev), false)
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
+#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
+#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
+#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
+#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
+#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
+#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
+#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
+#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
+#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
+#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
+#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
+#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
+#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
+#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
+#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
+#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
+#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
+#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
+#define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
+#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
+#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
+#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
+#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
+#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
+#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
+#define radeon_page_flip(rdev, crtc, base, async) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (async))
+#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
+#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
+#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v))
+#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
+#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
+#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
+#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
+#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
+#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
+#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
+#define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev))
+#define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev))
+#define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev))
+#define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l))
+#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
+#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
+#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
+#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
+#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
+#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
+#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
+#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev))
+#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev))
+
+/* Common functions */
+/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void radeon_pci_config_reset(struct radeon_device *rdev);
+extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
+extern void radeon_agp_disable(struct radeon_device *rdev);
+extern int radeon_modeset_init(struct radeon_device *rdev);
+extern void radeon_modeset_fini(struct radeon_device *rdev);
+extern bool radeon_card_posted(struct radeon_device *rdev);
+extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
+extern void radeon_update_display_priority(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
+extern void radeon_scratch_init(struct radeon_device *rdev);
+extern void radeon_wb_fini(struct radeon_device *rdev);
+extern int radeon_wb_init(struct radeon_device *rdev);
+extern void radeon_wb_disable(struct radeon_device *rdev);
+extern void radeon_surface_init(struct radeon_device *rdev);
+extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+				     uint32_t flags);
+extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
+extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
+extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
+extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+extern int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+			      bool fbcon, bool freeze);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+extern void radeon_program_register_sequence(struct radeon_device *rdev,
+					     const u32 *registers,
+					     const u32 array_size);
+
+/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
+					  struct radeon_vm *vm,
+                                          struct list_head *head);
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring);
+void radeon_vm_flush(struct radeon_device *rdev,
+                     struct radeon_vm *vm,
+		     int ring, struct radeon_fence *fence);
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence);
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
+int radeon_vm_update_page_directory(struct radeon_device *rdev,
+				    struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+			  struct radeon_vm *vm);
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+			     struct radeon_vm *vm);
+int radeon_vm_bo_update(struct radeon_device *rdev,
+			struct radeon_bo_va *bo_va,
+			struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+			     struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo);
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t offset,
+			  uint32_t flags);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+		      struct radeon_bo_va *bo_va);
+
+/* audio */
+void r600_audio_update_hdmi(struct work_struct *work);
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
+void r600_audio_enable(struct radeon_device *rdev,
+		       struct r600_audio_pin *pin,
+		       u8 enable_mask);
+void dce6_audio_enable(struct radeon_device *rdev,
+		       struct r600_audio_pin *pin,
+		       u8 enable_mask);
+
+/*
+ * R600 vram scratch functions
+ */
+int r600_vram_scratch_init(struct radeon_device *rdev);
+void r600_vram_scratch_fini(struct radeon_device *rdev);
+
+/*
+ * r600 cs checking helper
+ */
+unsigned r600_mip_minify(unsigned size, unsigned level);
+bool r600_fmt_is_valid_color(u32 format);
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
+int r600_fmt_get_blocksize(u32 format);
+int r600_fmt_get_nblocksx(u32 format, u32 w);
+int r600_fmt_get_nblocksy(u32 format, u32 h);
+
+/*
+ * r600 functions used by radeon_encoder.c
+ */
+struct radeon_hdmi_acr {
+	u32 clock;
+
+	int n_32khz;
+	int cts_32khz;
+
+	int n_44_1khz;
+	int cts_44_1khz;
+
+	int n_48khz;
+	int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+				     u32 tiling_pipe_num,
+				     u32 max_rb_num,
+				     u32 total_max_rb_num,
+				     u32 enabled_rb_mask);
+
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+extern void radeon_acpi_fini(struct radeon_device *rdev);
+extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
+extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
+						u8 perf_req, bool advertise);
+extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
+#endif
+
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt,
+			   unsigned idx);
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt);
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+				struct radeon_bo_list **cs_reloc,
+				int nomm);
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+			       uint32_t *vline_start_end,
+			       uint32_t *vline_status);
+
+/* interrupt control register helpers */
+void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
+				      u32 reg, u32 mask,
+				      bool enable, const char *name,
+				      unsigned n);
+
+#include "radeon_object.h"
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
new file mode 100644
index 0000000..8d3251a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon.h"
+#include "radeon_acpi.h"
+#include "atom.h"
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_atpx_dgpu_req_power_for_displays(void);
+#else
+static inline bool radeon_atpx_dgpu_req_power_for_displays(void) { return false; }
+#endif
+
+#define ACPI_AC_CLASS           "ac_adapter"
+
+extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
+
+struct atif_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 notification_mask;	/* supported notifications mask */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atif_system_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_mask;		/* valid flags mask */
+	u32 flags;		/* flags */
+	u8 command_code;	/* notify command code */
+} __packed;
+
+struct atif_sbios_requests {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 pending;		/* pending sbios requests */
+	u8 panel_exp_mode;	/* panel expansion mode */
+	u8 thermal_gfx;		/* thermal state: target gfx controller */
+	u8 thermal_state;	/* thermal state: state id (0: exit state, non-0: state) */
+	u8 forced_power_gfx;	/* forced power state: target gfx controller */
+	u8 forced_power_state;	/* forced power state: state id */
+	u8 system_power_src;	/* system power source */
+	u8 backlight_level;	/* panel backlight level (0-255) */
+} __packed;
+
+#define ATIF_NOTIFY_MASK	0x3
+#define ATIF_NOTIFY_NONE	0
+#define ATIF_NOTIFY_81		1
+#define ATIF_NOTIFY_N		2
+
+struct atcs_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+#define ATCS_VALID_FLAGS_MASK	0x3
+
+struct atcs_pref_req_input {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 client_id;		/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+	u16 valid_flags_mask;	/* valid flags mask */
+	u16 flags;		/* flags */
+	u8 req_type;		/* request type */
+	u8 perf_req;		/* performance request */
+} __packed;
+
+struct atcs_pref_req_output {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u8 ret_val;		/* return value */
+} __packed;
+
+/* Call the ATIF method
+ */
+/**
+ * radeon_atif_call - call an ATIF method
+ *
+ * @handle: acpi handle
+ * @function: the ATIF function to execute
+ * @params: ATIF function params
+ *
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atif_call(acpi_handle handle, int function,
+		struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atif_arg_elements[2];
+	struct acpi_object_list atif_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atif_arg.count = 2;
+	atif_arg.pointer = &atif_arg_elements[0];
+
+	atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atif_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atif_arg_elements[1].buffer.length = params->length;
+		atif_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atif_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+				 acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atif_parse_notification - parse supported notifications
+ *
+ * @n: supported notifications struct
+ * @mask: supported notifications mask from ATIF
+ *
+ * Use the supported notifications mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
+{
+	n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
+	n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
+	n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
+	n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
+	n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
+	n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
+	n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+}
+
+/**
+ * radeon_atif_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATIF
+ *
+ * Use the supported functions mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
+{
+	f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
+	f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
+	f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
+	f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
+	f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
+	f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
+	f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
+	f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
+	f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
+	f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+}
+
+/**
+ * radeon_atif_verify_interface - verify ATIF
+ *
+ * @handle: acpi handle
+ * @atif: radeon atif struct
+ *
+ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
+ * to initialize ATIF and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atif_verify_interface(acpi_handle handle,
+		struct radeon_atif *atif)
+{
+	union acpi_object *info;
+	struct atif_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 12) {
+		DRM_INFO("ATIF buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
+
+	radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
+	radeon_atif_parse_functions(&atif->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_notification_params - determine notify configuration
+ *
+ * @handle: acpi handle
+ * @n: atif notification configuration struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
+ * to determine if a notifier is used and if so which one
+ * (all asics).  This is either Notify(VGA, 0x81) or Notify(VGA, n)
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_notification_params(acpi_handle handle,
+		struct radeon_atif_notification_cfg *n)
+{
+	union acpi_object *info;
+	struct atif_system_params params;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+	if (!info) {
+		err = -EIO;
+		goto out;
+	}
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 10) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	memset(&params, 0, sizeof(params));
+	size = min(sizeof(params), size);
+	memcpy(&params, info->buffer.pointer, size);
+
+	DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
+			params.flags, params.valid_mask);
+	params.flags = params.flags & params.valid_mask;
+
+	if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
+		n->enabled = false;
+		n->command_code = 0;
+	} else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
+		n->enabled = true;
+		n->command_code = 0x81;
+	} else {
+		if (size < 11) {
+			err = -EINVAL;
+			goto out;
+		}
+		n->enabled = true;
+		n->command_code = params.command_code;
+	}
+
+out:
+	DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
+			(n->enabled ? "enabled" : "disabled"),
+			n->command_code);
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_sbios_requests - get requested sbios event
+ *
+ * @handle: acpi handle
+ * @req: atif sbios request struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
+ * to determine what requests the sbios is making to the driver
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_sbios_requests(acpi_handle handle,
+		struct atif_sbios_requests *req)
+{
+	union acpi_object *info;
+	size_t size;
+	int count = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+	if (!info)
+		return -EIO;
+
+	size = *(u16 *)info->buffer.pointer;
+	if (size < 0xd) {
+		count = -EINVAL;
+		goto out;
+	}
+	memset(req, 0, sizeof(*req));
+
+	size = min(sizeof(*req), size);
+	memcpy(req, info->buffer.pointer, size);
+	DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
+
+	count = hweight32(req->pending);
+
+out:
+	kfree(info);
+	return count;
+}
+
+/**
+ * radeon_atif_handler - handle ATIF notify requests
+ *
+ * @rdev: radeon_device pointer
+ * @event: atif sbios request struct
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+ * Returns NOTIFY code
+ */
+static int radeon_atif_handler(struct radeon_device *rdev,
+		struct acpi_bus_event *event)
+{
+	struct radeon_atif *atif = &rdev->atif;
+	struct atif_sbios_requests req;
+	acpi_handle handle;
+	int count;
+
+	DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+			event->device_class, event->type);
+
+	if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+		return NOTIFY_DONE;
+
+	if (!atif->notification_cfg.enabled ||
+			event->type != atif->notification_cfg.command_code)
+		/* Not our event */
+		return NOTIFY_DONE;
+
+	/* Check pending SBIOS requests */
+	handle = ACPI_HANDLE(&rdev->pdev->dev);
+	count = radeon_atif_get_sbios_requests(handle, &req);
+
+	if (count <= 0)
+		return NOTIFY_DONE;
+
+	DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+	if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+		struct radeon_encoder *enc = atif->encoder_for_bl;
+
+		if (enc) {
+			DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+					req.backlight_level);
+
+			radeon_set_backlight_level(rdev, enc, req.backlight_level);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+			if (rdev->is_atom_bios) {
+				struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			} else {
+				struct radeon_encoder_lvds *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			}
+#endif
+		}
+	}
+	if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+		if ((rdev->flags & RADEON_IS_PX) &&
+		    radeon_atpx_dgpu_req_power_for_displays()) {
+			pm_runtime_get_sync(rdev->ddev->dev);
+			/* Just fire off a uevent and let userspace tell us what to do */
+			drm_helper_hpd_irq_event(rdev->ddev);
+			pm_runtime_mark_last_busy(rdev->ddev->dev);
+			pm_runtime_put_autosuspend(rdev->ddev->dev);
+		}
+	}
+	/* TODO: check other events */
+
+	/* We've handled the event, stop the notifier chain. The ACPI interface
+	 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+	 * userspace if the event was generated only to signal a SBIOS
+	 * request.
+	 */
+	return NOTIFY_BAD;
+}
+
+/* Call the ATCS method
+ */
+/**
+ * radeon_atcs_call - call an ATCS method
+ *
+ * @handle: acpi handle
+ * @function: the ATCS function to execute
+ * @params: ATCS function params
+ *
+ * Executes the requested ATCS function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atcs_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atcs_arg_elements[2];
+	struct acpi_object_list atcs_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atcs_arg.count = 2;
+	atcs_arg.pointer = &atcs_arg_elements[0];
+
+	atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atcs_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atcs_arg_elements[1].buffer.length = params->length;
+		atcs_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atcs_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
+				 acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atcs_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATCS
+ *
+ * Use the supported functions mask from ATCS function
+ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
+{
+	f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
+	f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
+	f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
+	f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+}
+
+/**
+ * radeon_atcs_verify_interface - verify ATCS
+ *
+ * @handle: acpi handle
+ * @atcs: radeon atcs struct
+ *
+ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
+ * to initialize ATCS and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atcs_verify_interface(acpi_handle handle,
+					struct radeon_atcs *atcs)
+{
+	union acpi_object *info;
+	struct atcs_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		DRM_INFO("ATCS buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
+
+	radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_acpi_is_pcie_performance_request_supported
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
+ * are supported (all asics).
+ * returns true if supported, false if not.
+ */
+bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev)
+{
+	struct radeon_atcs *atcs = &rdev->atcs;
+
+	if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
+		return true;
+
+	return false;
+}
+
+/**
+ * radeon_acpi_pcie_notify_device_ready
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Executes the PCIE_DEVICE_READY_NOTIFICATION method
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
+{
+	acpi_handle handle;
+	union acpi_object *info;
+	struct radeon_atcs *atcs = &rdev->atcs;
+
+	/* Get the device handle */
+	handle = ACPI_HANDLE(&rdev->pdev->dev);
+	if (!handle)
+		return -EINVAL;
+
+	if (!atcs->functions.pcie_dev_rdy)
+		return -EINVAL;
+
+	info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
+	if (!info)
+		return -EIO;
+
+	kfree(info);
+
+	return 0;
+}
+
+/**
+ * radeon_acpi_pcie_performance_request
+ *
+ * @rdev: radeon_device pointer
+ * @perf_req: requested perf level (pcie gen speed)
+ * @advertise: set advertise caps flag if set
+ *
+ * Executes the PCIE_PERFORMANCE_REQUEST method to
+ * change the pcie gen speed (all asics).
+ * returns 0 on success, error on failure.
+ */
+int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
+					 u8 perf_req, bool advertise)
+{
+	acpi_handle handle;
+	union acpi_object *info;
+	struct radeon_atcs *atcs = &rdev->atcs;
+	struct atcs_pref_req_input atcs_input;
+	struct atcs_pref_req_output atcs_output;
+	struct acpi_buffer params;
+	size_t size;
+	u32 retry = 3;
+
+	/* Get the device handle */
+	handle = ACPI_HANDLE(&rdev->pdev->dev);
+	if (!handle)
+		return -EINVAL;
+
+	if (!atcs->functions.pcie_perf_req)
+		return -EINVAL;
+
+	atcs_input.size = sizeof(struct atcs_pref_req_input);
+	/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+	atcs_input.client_id = rdev->pdev->devfn | (rdev->pdev->bus->number << 8);
+	atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
+	atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
+	if (advertise)
+		atcs_input.flags |= ATCS_ADVERTISE_CAPS;
+	atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
+	atcs_input.perf_req = perf_req;
+
+	params.length = sizeof(struct atcs_pref_req_input);
+	params.pointer = &atcs_input;
+
+	while (retry--) {
+		info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
+		if (!info)
+			return -EIO;
+
+		memset(&atcs_output, 0, sizeof(atcs_output));
+
+		size = *(u16 *) info->buffer.pointer;
+		if (size < 3) {
+			DRM_INFO("ATCS buffer is too small: %zu\n", size);
+			kfree(info);
+			return -EINVAL;
+		}
+		size = min(sizeof(atcs_output), size);
+
+		memcpy(&atcs_output, info->buffer.pointer, size);
+
+		kfree(info);
+
+		switch (atcs_output.ret_val) {
+		case ATCS_REQUEST_REFUSED:
+		default:
+			return -EINVAL;
+		case ATCS_REQUEST_COMPLETE:
+			return 0;
+		case ATCS_REQUEST_IN_PROGRESS:
+			udelay(10);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_acpi_event - handle notify events
+ *
+ * @nb: notifier block
+ * @val: val
+ * @data: acpi event
+ *
+ * Calls relevant radeon functions in response to various
+ * acpi events.
+ * Returns NOTIFY code
+ */
+static int radeon_acpi_event(struct notifier_block *nb,
+			     unsigned long val,
+			     void *data)
+{
+	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+		if (power_supply_is_system_supplied() > 0)
+			DRM_DEBUG_DRIVER("pm: AC\n");
+		else
+			DRM_DEBUG_DRIVER("pm: DC\n");
+
+		radeon_pm_acpi_event_handler(rdev);
+	}
+
+	/* Check for pending SBIOS requests */
+	return radeon_atif_handler(rdev, entry);
+}
+
+/* Call all ACPI methods here */
+/**
+ * radeon_acpi_init - init driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the acpi
+ * notifier chain (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_acpi_init(struct radeon_device *rdev)
+{
+	acpi_handle handle;
+	struct radeon_atif *atif = &rdev->atif;
+	struct radeon_atcs *atcs = &rdev->atcs;
+	int ret;
+
+	/* Get the device handle */
+	handle = ACPI_HANDLE(&rdev->pdev->dev);
+
+	/* No need to proceed if we're sure that ATIF is not supported */
+	if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+		return 0;
+
+	/* Call the ATCS method */
+	ret = radeon_atcs_verify_interface(handle, atcs);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+	}
+
+	/* Call the ATIF method */
+	ret = radeon_atif_verify_interface(handle, atif);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+		goto out;
+	}
+
+	if (atif->notifications.brightness_change) {
+		struct drm_encoder *tmp;
+		struct radeon_encoder *target = NULL;
+
+		/* Find the encoder controlling the brightness */
+		list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+				head) {
+			struct radeon_encoder *enc = to_radeon_encoder(tmp);
+
+			if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+			    enc->enc_priv) {
+				if (rdev->is_atom_bios) {
+					struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				} else {
+					struct radeon_encoder_lvds *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				}
+			}
+		}
+
+		atif->encoder_for_bl = target;
+	}
+
+	if (atif->functions.sbios_requests && !atif->functions.system_params) {
+		/* XXX check this workraround, if sbios request function is
+		 * present we have to see how it's configured in the system
+		 * params
+		 */
+		atif->functions.system_params = true;
+	}
+
+	if (atif->functions.system_params) {
+		ret = radeon_atif_get_notification_params(handle,
+				&atif->notification_cfg);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+					ret);
+			/* Disable notification */
+			atif->notification_cfg.enabled = false;
+		}
+	}
+
+out:
+	rdev->acpi_nb.notifier_call = radeon_acpi_event;
+	register_acpi_notifier(&rdev->acpi_nb);
+
+	return ret;
+}
+
+/**
+ * radeon_acpi_fini - tear down driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void radeon_acpi_fini(struct radeon_device *rdev)
+{
+	unregister_acpi_notifier(&rdev->acpi_nb);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
new file mode 100644
index 0000000..35202a4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_ACPI_H
+#define RADEON_ACPI_H
+
+struct radeon_device;
+struct acpi_bus_event;
+
+/* AMD hw uses four ACPI control methods:
+ * 1. ATIF
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATIF provides an entry point for the gfx driver to interact with the sbios.
+ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
+ * notification. Which notification is used as indicated by the ATIF Control
+ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
+ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
+ * to identify pending System BIOS requests and associated parameters. For
+ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
+ * will perform display device detection and invoke ATIF Control Method
+ * SELECT_ACTIVE_DISPLAYS.
+ *
+ * 2. ATPX
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATPX methods are used on PowerXpress systems to handle mux switching and
+ * discrete GPU power control.
+ *
+ * 3. ATRM
+ * ARG0: (ACPI_INTEGER) offset of vbios rom data
+ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
+ * OUTPUT: (ACPI_BUFFER) output buffer
+ * ATRM provides an interfacess to access the discrete GPU vbios image on
+ * PowerXpress systems with multiple GPUs.
+ *
+ * 4. ATCS
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATCS provides an interface to AMD chipset specific functionality.
+ *
+ */
+/* ATIF */
+#define ATIF_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported notifications mask
+ * DWORD - supported functions bit vector
+ */
+/* Notifications mask */
+#       define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED               (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED        (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED         (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED    (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED   (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED          (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED                (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED      (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED                   (1 << 8)
+/* supported functions vector */
+#       define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED               (1 << 0)
+#       define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED            (1 << 1)
+#       define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED              (1 << 2)
+#       define ATIF_GET_LID_STATE_SUPPORTED                       (1 << 3)
+#       define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED           (1 << 4)
+#       define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED             (1 << 5)
+#       define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED  (1 << 6)
+#       define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED    (1 << 7)
+#       define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED     (1 << 12)
+#       define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED           (1 << 14)
+#       define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED        (1 << 20)
+#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS                        0x1
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ *
+ * OR
+ *
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ * BYTE  - notify command code
+ *
+ * flags
+ * bits 1:0:
+ * 0 - Notify(VGA, 0x81) is not used for notification
+ * 1 - Notify(VGA, 0x81) is used for notification
+ * 2 - Notify(VGA, n) is used for notification where
+ * n (0xd0-0xd9) is specified in notify command code.
+ * bit 2:
+ * 1 - lid changes not reported though int10
+ */
+#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS                     0x2
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - pending sbios requests
+ * BYTE  - panel expansion mode
+ * BYTE  - thermal state: target gfx controller
+ * BYTE  - thermal state: state id (0: exit state, non-0: state)
+ * BYTE  - forced power state: target gfx controller
+ * BYTE  - forced power state: state id
+ * BYTE  - system power source
+ * BYTE  - panel backlight level (0-255)
+ */
+/* pending sbios requests */
+#       define ATIF_DISPLAY_SWITCH_REQUEST                         (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST                  (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST                   (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST              (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST             (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST                    (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST                          (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST                (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT                             (1 << 8)
+/* panel expansion mode */
+#       define ATIF_PANEL_EXPANSION_DISABLE                        0
+#       define ATIF_PANEL_EXPANSION_FULL                           1
+#       define ATIF_PANEL_EXPANSION_ASPECT                         2
+/* target gfx controller */
+#       define ATIF_TARGET_GFX_SINGLE                              0
+#       define ATIF_TARGET_GFX_PX_IGPU                             1
+#       define ATIF_TARGET_GFX_PX_DGPU                             2
+/* system power source */
+#       define ATIF_POWER_SOURCE_AC                                1
+#       define ATIF_POWER_SOURCE_DC                                2
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_1                   3
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_2                   4
+#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS                       0x3
+/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ * WORD  - connected displays
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ */
+#       define ATIF_LCD1                                           (1 << 0)
+#       define ATIF_CRT1                                           (1 << 1)
+#       define ATIF_TV                                             (1 << 2)
+#       define ATIF_DFP1                                           (1 << 3)
+#       define ATIF_CRT2                                           (1 << 4)
+#       define ATIF_LCD2                                           (1 << 5)
+#       define ATIF_DFP2                                           (1 << 7)
+#       define ATIF_CV                                             (1 << 8)
+#       define ATIF_DFP3                                           (1 << 9)
+#       define ATIF_DFP4                                           (1 << 10)
+#       define ATIF_DFP5                                           (1 << 11)
+#       define ATIF_DFP6                                           (1 << 12)
+#define ATIF_FUNCTION_GET_LID_STATE                                0x4
+/* ARG0: ATIF_FUNCTION_GET_LID_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - lid state (0: open, 1: closed)
+ *
+ * GET_LID_STATE only works at boot and resume, for general lid
+ * status, use the kernel provided status
+ */
+#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS                    0x5
+/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ */
+#       define ATIF_TV_STD_NTSC                                    0
+#       define ATIF_TV_STD_PAL                                     1
+#       define ATIF_TV_STD_PALM                                    2
+#       define ATIF_TV_STD_PAL60                                   3
+#       define ATIF_TV_STD_NTSCJ                                   4
+#       define ATIF_TV_STD_PALCN                                   5
+#       define ATIF_TV_STD_PALN                                    6
+#       define ATIF_TV_STD_SCART_RGB                               9
+#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS                      0x6
+/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS           0x7
+/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ */
+#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS             0x8
+/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION              0xD
+/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - gfx controller id
+ * BYTE  - current temperature (degress Celsius)
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES                    0xF
+/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of gfx devices
+ * WORD  - device structure size in bytes (excludes device size field)
+ * DWORD - flags         \
+ * WORD  - bus number     } repeated structure
+ * WORD  - device number /
+ */
+/* flags */
+#       define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE                   (1 << 0)
+#       define ATIF_XGP_PORT                                       (1 << 1)
+#       define ATIF_VGA_ENABLED_GRAPHICS_DEVICE                    (1 << 2)
+#       define ATIF_XGP_PORT_IN_DOCK                               (1 << 3)
+#define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION                 0x15
+/* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of reported external gfx devices
+ * WORD  - device structure size in bytes (excludes device size field)
+ * WORD  - flags         \
+ * WORD  - bus number    / repeated structure
+ */
+/* flags */
+#       define ATIF_EXTERNAL_GRAPHICS_PORT                         (1 << 0)
+
+/* ATPX */
+#define ATPX_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATPX_GET_PX_PARAMETERS_SUPPORTED                    (1 << 0)
+#       define ATPX_POWER_CONTROL_SUPPORTED                        (1 << 1)
+#       define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED                  (1 << 2)
+#       define ATPX_I2C_MUX_CONTROL_SUPPORTED                      (1 << 3)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED   (1 << 5)
+#       define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED       (1 << 7)
+#       define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED          (1 << 8)
+#define ATPX_FUNCTION_GET_PX_PARAMETERS                            0x1
+/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ */
+/* flags */
+#       define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 0)
+#       define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 1)
+#       define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 2)
+#       define ATPX_CRT1_RGB_SIGNAL_MUXED                          (1 << 3)
+#       define ATPX_TV_SIGNAL_MUXED                                (1 << 4)
+#       define ATPX_DFP_SIGNAL_MUXED                               (1 << 5)
+#       define ATPX_SEPARATE_MUX_FOR_I2C                           (1 << 6)
+#       define ATPX_DYNAMIC_PX_SUPPORTED                           (1 << 7)
+#       define ATPX_ACF_NOT_SUPPORTED                              (1 << 8)
+#       define ATPX_FIXED_NOT_SUPPORTED                            (1 << 9)
+#       define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED               (1 << 10)
+#       define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS                    (1 << 11)
+#       define ATPX_DGPU_CAN_DRIVE_DISPLAYS                        (1 << 12)
+#       define ATPX_MS_HYBRID_GFX_SUPPORTED                        (1 << 14)
+#define ATPX_FUNCTION_POWER_CONTROL                                0x2
+/* ARG0: ATPX_FUNCTION_POWER_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - dGPU power state (0: power off, 1: power on)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL                          0x3
+/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - display mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#       define ATPX_INTEGRATED_GPU                                 0
+#       define ATPX_DISCRETE_GPU                                   1
+#define ATPX_FUNCTION_I2C_MUX_CONTROL                              0x4
+/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION    0x5
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION      0x6
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING               0x8
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of display connectors
+ * WORD  - connector structure size in bytes (excludes connector size field)
+ * BYTE  - flags                                                     \
+ * BYTE  - ATIF display vector bit position                           } repeated
+ * BYTE  - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
+ * WORD  - connector ACPI id                                         /
+ */
+/* flags */
+#       define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE  (1 << 0)
+#       define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 1)
+#       define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 2)
+#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS                  0x9
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of HPD/DDC ports
+ * WORD  - port structure size in bytes (excludes port size field)
+ * BYTE  - ATIF display vector bit position \
+ * BYTE  - hpd id                            } reapeated structure
+ * BYTE  - ddc id                           /
+ *
+ * available on A+A systems only
+ */
+/* hpd id */
+#       define ATPX_HPD_NONE                                       0
+#       define ATPX_HPD1                                           1
+#       define ATPX_HPD2                                           2
+#       define ATPX_HPD3                                           3
+#       define ATPX_HPD4                                           4
+#       define ATPX_HPD5                                           5
+#       define ATPX_HPD6                                           6
+/* ddc id */
+#       define ATPX_DDC_NONE                                       0
+#       define ATPX_DDC1                                           1
+#       define ATPX_DDC2                                           2
+#       define ATPX_DDC3                                           3
+#       define ATPX_DDC4                                           4
+#       define ATPX_DDC5                                           5
+#       define ATPX_DDC6                                           6
+#       define ATPX_DDC7                                           7
+#       define ATPX_DDC8                                           8
+
+/* ATCS */
+#define ATCS_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATCS_GET_EXTERNAL_STATE_SUPPORTED                   (1 << 0)
+#       define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED             (1 << 1)
+#       define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED       (1 << 2)
+#       define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED                   (1 << 3)
+#define ATCS_FUNCTION_GET_EXTERNAL_STATE                           0x1
+/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags (0: undocked, 1: docked)
+ */
+/* flags */
+#       define ATCS_DOCKED                                         (1 << 0)
+#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST                     0x2
+/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * WORD  - valid flags mask
+ * WORD  - flags
+ * BYTE  - request type
+ * BYTE  - performance request
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - return value
+ */
+/* flags */
+#       define ATCS_ADVERTISE_CAPS                                 (1 << 0)
+#       define ATCS_WAIT_FOR_COMPLETION                            (1 << 1)
+/* request type */
+#       define ATCS_PCIE_LINK_SPEED                                1
+/* performance request */
+#       define ATCS_REMOVE                                         0
+#       define ATCS_FORCE_LOW_POWER                                1
+#       define ATCS_PERF_LEVEL_1                                   2 /* PCIE Gen 1 */
+#       define ATCS_PERF_LEVEL_2                                   3 /* PCIE Gen 2 */
+#       define ATCS_PERF_LEVEL_3                                   4 /* PCIE Gen 3 */
+/* return value */
+#       define ATCS_REQUEST_REFUSED                                1
+#       define ATCS_REQUEST_COMPLETE                               2
+#       define ATCS_REQUEST_IN_PROGRESS                            3
+#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION               0x3
+/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
+#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH                           0x4
+/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE  - number of active lanes
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - number of active lanes
+ */
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
new file mode 100644
index 0000000..c77d349
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+
+#if IS_ENABLED(CONFIG_AGP)
+
+struct radeon_agpmode_quirk {
+	u32 hostbridge_vendor;
+	u32 hostbridge_device;
+	u32 chip_vendor;
+	u32 chip_device;
+	u32 subsys_vendor;
+	u32 subsys_device;
+	u32 default_mode;
+};
+
+static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+	/* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
+	{ PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
+	/* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
+	{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
+	/* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
+	{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
+		0x148c, 0x2073, 4},
+	/* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
+		PCI_VENDOR_ID_IBM, 0x052f, 1},
+	/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
+		PCI_VENDOR_ID_IBM, 0x0550, 1},
+	/* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_IBM, 0x054d, 1},
+	/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
+		PCI_VENDOR_ID_IBM, 0x0530, 1},
+	/* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
+		PCI_VENDOR_ID_IBM, 0x054f, 2},
+	/* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x816b, 2},
+	/* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x8195, 8},
+	/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+	{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+		PCI_VENDOR_ID_DELL, 0x00e3, 2},
+	/* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_DELL, 0x0149, 1},
+	/* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_IBM, 0x0531, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x1025, 0x0061, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x1025, 0x0064, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x10cf, 0x127f, 1},
+	/* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
+	{ 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+		0x1787, 0x5960, 4},
+	/* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
+	{ PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
+		0x17af, 0x2020, 4},
+	/* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
+	{ PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
+		PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
+	/* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
+	{ PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
+		PCI_VENDOR_ID_ATI, 0x013a, 2},
+	/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+		PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
+	/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+		PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
+	/* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
+		0x174b, 0x7149, 4},
+	/* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+		0x1462, 0x0380, 4},
+	/* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
+		0x148c, 0x2073, 4},
+	/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
+	{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x8175, 1},
+	{ 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+int radeon_agp_init(struct radeon_device *rdev)
+{
+#if IS_ENABLED(CONFIG_AGP)
+	struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
+	struct drm_agp_mode mode;
+	struct drm_agp_info info;
+	uint32_t agp_status;
+	int default_mode;
+	bool is_v3;
+	int ret;
+
+	/* Acquire AGP. */
+	ret = drm_agp_acquire(rdev->ddev);
+	if (ret) {
+		DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+		return ret;
+	}
+
+	ret = drm_agp_info(rdev->ddev, &info);
+	if (ret) {
+		drm_agp_release(rdev->ddev);
+		DRM_ERROR("Unable to get AGP info: %d\n", ret);
+		return ret;
+	}
+
+	if (rdev->ddev->agp->agp_info.aper_size < 32) {
+		drm_agp_release(rdev->ddev);
+		dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
+			"need at least 32M, disabling AGP\n",
+			rdev->ddev->agp->agp_info.aper_size);
+		return -EINVAL;
+	}
+
+	mode.mode = info.mode;
+	/* chips with the agp to pcie bridge don't have the AGP_STATUS register
+	 * Just use the whatever mode the host sets up.
+	 */
+	if (rdev->family <= CHIP_RV350)
+		agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+	else
+		agp_status = mode.mode;
+	is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
+
+	if (is_v3) {
+		default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
+	} else {
+		if (agp_status & RADEON_AGP_4X_MODE) {
+			default_mode = 4;
+		} else if (agp_status & RADEON_AGP_2X_MODE) {
+			default_mode = 2;
+		} else {
+			default_mode = 1;
+		}
+	}
+
+	/* Apply AGPMode Quirks */
+	while (p && p->chip_device != 0) {
+		if (info.id_vendor == p->hostbridge_vendor &&
+		    info.id_device == p->hostbridge_device &&
+		    rdev->pdev->vendor == p->chip_vendor &&
+		    rdev->pdev->device == p->chip_device &&
+		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+		    rdev->pdev->subsystem_device == p->subsys_device) {
+			default_mode = p->default_mode;
+		}
+		++p;
+	}
+
+	if (radeon_agpmode > 0) {
+		if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
+		    (radeon_agpmode > (is_v3 ? 8 : 4)) ||
+		    (radeon_agpmode & (radeon_agpmode - 1))) {
+			DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
+				  radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
+				  default_mode);
+			radeon_agpmode = default_mode;
+		} else {
+			DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
+		}
+	} else {
+		radeon_agpmode = default_mode;
+	}
+
+	mode.mode &= ~RADEON_AGP_MODE_MASK;
+	if (is_v3) {
+		switch (radeon_agpmode) {
+		case 8:
+			mode.mode |= RADEON_AGPv3_8X_MODE;
+			break;
+		case 4:
+		default:
+			mode.mode |= RADEON_AGPv3_4X_MODE;
+			break;
+		}
+	} else {
+		switch (radeon_agpmode) {
+		case 4:
+			mode.mode |= RADEON_AGP_4X_MODE;
+			break;
+		case 2:
+			mode.mode |= RADEON_AGP_2X_MODE;
+			break;
+		case 1:
+		default:
+			mode.mode |= RADEON_AGP_1X_MODE;
+			break;
+		}
+	}
+
+	mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
+	ret = drm_agp_enable(rdev->ddev, mode);
+	if (ret) {
+		DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+		drm_agp_release(rdev->ddev);
+		return ret;
+	}
+
+	rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
+	rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+	rdev->mc.gtt_start = rdev->mc.agp_base;
+	rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
+	dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+		rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
+
+	/* workaround some hw issues */
+	if (rdev->family < CHIP_R200) {
+		WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
+	}
+	return 0;
+#else
+	return 0;
+#endif
+}
+
+void radeon_agp_resume(struct radeon_device *rdev)
+{
+#if IS_ENABLED(CONFIG_AGP)
+	int r;
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			dev_warn(rdev->dev, "radeon AGP reinit failed\n");
+	}
+#endif
+}
+
+void radeon_agp_fini(struct radeon_device *rdev)
+{
+#if IS_ENABLED(CONFIG_AGP)
+	if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+		drm_agp_release(rdev->ddev);
+	}
+#endif
+}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+	radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 0000000..bc5121d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,2706 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * Registers accessors functions.
+ */
+/**
+ * radeon_invalid_rreg - dummy reg read function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+	BUG_ON(1);
+	return 0;
+}
+
+/**
+ * radeon_invalid_wreg - dummy reg write function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+		  reg, v);
+	BUG_ON(1);
+}
+
+/**
+ * radeon_register_accessor_init - sets up the register accessor callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the register accessor callbacks for various register
+ * apertures.  Not all asics have all apertures (all asics).
+ */
+static void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+	rdev->mc_rreg = &radeon_invalid_rreg;
+	rdev->mc_wreg = &radeon_invalid_wreg;
+	rdev->pll_rreg = &radeon_invalid_rreg;
+	rdev->pll_wreg = &radeon_invalid_wreg;
+	rdev->pciep_rreg = &radeon_invalid_rreg;
+	rdev->pciep_wreg = &radeon_invalid_wreg;
+
+	/* Don't change order as we are overridding accessor. */
+	if (rdev->family < CHIP_RV515) {
+		rdev->pcie_reg_mask = 0xff;
+	} else {
+		rdev->pcie_reg_mask = 0x7ff;
+	}
+	/* FIXME: not sure here */
+	if (rdev->family <= CHIP_R580) {
+		rdev->pll_rreg = &r100_pll_rreg;
+		rdev->pll_wreg = &r100_pll_wreg;
+	}
+	if (rdev->family >= CHIP_R420) {
+		rdev->mc_rreg = &r420_mc_rreg;
+		rdev->mc_wreg = &r420_mc_wreg;
+	}
+	if (rdev->family >= CHIP_RV515) {
+		rdev->mc_rreg = &rv515_mc_rreg;
+		rdev->mc_wreg = &rv515_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+		rdev->mc_rreg = &rs400_mc_rreg;
+		rdev->mc_wreg = &rs400_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		rdev->mc_rreg = &rs690_mc_rreg;
+		rdev->mc_wreg = &rs690_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS600) {
+		rdev->mc_rreg = &rs600_mc_rreg;
+		rdev->mc_wreg = &rs600_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+		rdev->mc_rreg = &rs780_mc_rreg;
+		rdev->mc_wreg = &rs780_mc_wreg;
+	}
+
+	if (rdev->family >= CHIP_BONAIRE) {
+		rdev->pciep_rreg = &cik_pciep_rreg;
+		rdev->pciep_wreg = &cik_pciep_wreg;
+	} else if (rdev->family >= CHIP_R600) {
+		rdev->pciep_rreg = &r600_pciep_rreg;
+		rdev->pciep_wreg = &r600_pciep_wreg;
+	}
+}
+
+static int radeon_invalid_get_allowed_info_register(struct radeon_device *rdev,
+						    u32 reg, u32 *val)
+{
+	return -EINVAL;
+}
+
+/* helper to disable agp */
+/**
+ * radeon_agp_disable - AGP disable helper function
+ *
+ * @rdev: radeon device pointer
+ *
+ * Removes AGP flags and changes the gart callbacks on AGP
+ * cards when using the internal gart rather than AGP (all asics).
+ */
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+	rdev->flags &= ~RADEON_IS_AGP;
+	if (rdev->family >= CHIP_R600) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+	} else if (rdev->family >= CHIP_RV515 ||
+			rdev->family == CHIP_RV380 ||
+			rdev->family == CHIP_RV410 ||
+			rdev->family == CHIP_R423) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+		rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+		rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
+		rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+	} else {
+		DRM_INFO("Forcing AGP to PCI mode\n");
+		rdev->flags |= RADEON_IS_PCI;
+		rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+		rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
+		rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+	}
+	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+/*
+ * ASIC
+ */
+
+static const struct radeon_asic_ring r100_gfx_ring = {
+	.ib_execute = &r100_ring_ib_execute,
+	.emit_fence = &r100_fence_ring_emit,
+	.emit_semaphore = &r100_semaphore_ring_emit,
+	.cs_parse = &r100_cs_parse,
+	.ring_start = &r100_ring_start,
+	.ring_test = &r100_ring_test,
+	.ib_test = &r100_ib_test,
+	.is_lockup = &r100_gpu_is_lockup,
+	.get_rptr = &r100_gfx_get_rptr,
+	.get_wptr = &r100_gfx_get_wptr,
+	.set_wptr = &r100_gfx_set_wptr,
+};
+
+static struct radeon_asic r100_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r100_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r100_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.get_page_entry = &r100_pci_gart_get_page_entry,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = NULL,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &r100_page_flip,
+		.page_flip_pending = &r100_page_flip_pending,
+	},
+};
+
+static struct radeon_asic r200_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r100_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r100_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.get_page_entry = &r100_pci_gart_get_page_entry,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &r100_page_flip,
+		.page_flip_pending = &r100_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring r300_gfx_ring = {
+	.ib_execute = &r100_ring_ib_execute,
+	.emit_fence = &r300_fence_ring_emit,
+	.emit_semaphore = &r100_semaphore_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.ring_start = &r300_ring_start,
+	.ring_test = &r100_ring_test,
+	.ib_test = &r100_ib_test,
+	.is_lockup = &r100_gpu_is_lockup,
+	.get_rptr = &r100_gfx_get_rptr,
+	.get_wptr = &r100_gfx_get_wptr,
+	.set_wptr = &r100_gfx_set_wptr,
+};
+
+static const struct radeon_asic_ring rv515_gfx_ring = {
+	.ib_execute = &r100_ring_ib_execute,
+	.emit_fence = &r300_fence_ring_emit,
+	.emit_semaphore = &r100_semaphore_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.ring_start = &rv515_ring_start,
+	.ring_test = &r100_ring_test,
+	.ib_test = &r100_ib_test,
+	.is_lockup = &r100_gpu_is_lockup,
+	.get_rptr = &r100_gfx_get_rptr,
+	.get_wptr = &r100_gfx_get_wptr,
+	.set_wptr = &r100_gfx_set_wptr,
+};
+
+static struct radeon_asic r300_asic = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.get_page_entry = &r100_pci_gart_get_page_entry,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &r100_page_flip,
+		.page_flip_pending = &r100_page_flip_pending,
+	},
+};
+
+static struct radeon_asic r300_asic_pcie = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.get_page_entry = &rv370_pcie_gart_get_page_entry,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &r100_page_flip,
+		.page_flip_pending = &r100_page_flip_pending,
+	},
+};
+
+static struct radeon_asic r420_asic = {
+	.init = &r420_init,
+	.fini = &r420_fini,
+	.suspend = &r420_suspend,
+	.resume = &r420_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.get_page_entry = &rv370_pcie_gart_get_page_entry,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &r100_page_flip,
+		.page_flip_pending = &r100_page_flip_pending,
+	},
+};
+
+static struct radeon_asic rs400_asic = {
+	.init = &rs400_init,
+	.fini = &rs400_fini,
+	.suspend = &rs400_suspend,
+	.resume = &rs400_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs400_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &rs400_gart_tlb_flush,
+		.get_page_entry = &rs400_gart_get_page_entry,
+		.set_page = &rs400_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &r100_page_flip,
+		.page_flip_pending = &r100_page_flip_pending,
+	},
+};
+
+static struct radeon_asic rs600_asic = {
+	.init = &rs600_init,
+	.fini = &rs600_fini,
+	.suspend = &rs600_suspend,
+	.resume = &rs600_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs600_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &rs600_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rs600_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &rs600_page_flip,
+		.page_flip_pending = &rs600_page_flip_pending,
+	},
+};
+
+static struct radeon_asic rs690_asic = {
+	.init = &rs690_init,
+	.fini = &rs690_fini,
+	.suspend = &rs690_suspend,
+	.resume = &rs690_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs690_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &rs400_gart_tlb_flush,
+		.get_page_entry = &rs400_gart_get_page_entry,
+		.set_page = &rs400_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.bandwidth_update = &rs690_bandwidth_update,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r200_copy_dma,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &rs600_page_flip,
+		.page_flip_pending = &rs600_page_flip_pending,
+	},
+};
+
+static struct radeon_asic rv515_asic = {
+	.init = &rv515_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &rv515_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rv515_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.get_page_entry = &rv370_pcie_gart_get_page_entry,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.bandwidth_update = &rv515_bandwidth_update,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &rs600_page_flip,
+		.page_flip_pending = &rs600_page_flip_pending,
+	},
+};
+
+static struct radeon_asic r520_asic = {
+	.init = &r520_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &r520_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.mmio_hdp_flush = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r520_mc_wait_for_idle,
+	.get_allowed_info_register = radeon_invalid_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.get_page_entry = &rv370_pcie_gart_get_page_entry,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.page_flip = &rs600_page_flip,
+		.page_flip_pending = &rs600_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring r600_gfx_ring = {
+	.ib_execute = &r600_ring_ib_execute,
+	.emit_fence = &r600_fence_ring_emit,
+	.emit_semaphore = &r600_semaphore_ring_emit,
+	.cs_parse = &r600_cs_parse,
+	.ring_test = &r600_ring_test,
+	.ib_test = &r600_ib_test,
+	.is_lockup = &r600_gfx_is_lockup,
+	.get_rptr = &r600_gfx_get_rptr,
+	.get_wptr = &r600_gfx_get_wptr,
+	.set_wptr = &r600_gfx_set_wptr,
+};
+
+static const struct radeon_asic_ring r600_dma_ring = {
+	.ib_execute = &r600_dma_ring_ib_execute,
+	.emit_fence = &r600_dma_fence_ring_emit,
+	.emit_semaphore = &r600_dma_semaphore_ring_emit,
+	.cs_parse = &r600_dma_cs_parse,
+	.ring_test = &r600_dma_ring_test,
+	.ib_test = &r600_dma_ib_test,
+	.is_lockup = &r600_dma_is_lockup,
+	.get_rptr = &r600_dma_get_rptr,
+	.get_wptr = &r600_dma_get_wptr,
+	.set_wptr = &r600_dma_set_wptr,
+};
+
+static struct radeon_asic r600_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.vga_set_state = &r600_vga_set_state,
+	.asic_reset = &r600_asic_reset,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = r600_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_cpdma,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.get_temperature = &rv6xx_get_temp,
+	},
+	.pflip = {
+		.page_flip = &rs600_page_flip,
+		.page_flip_pending = &rs600_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring rv6xx_uvd_ring = {
+	.ib_execute = &uvd_v1_0_ib_execute,
+	.emit_fence = &uvd_v1_0_fence_emit,
+	.emit_semaphore = &uvd_v1_0_semaphore_emit,
+	.cs_parse = &radeon_uvd_cs_parse,
+	.ring_test = &uvd_v1_0_ring_test,
+	.ib_test = &uvd_v1_0_ib_test,
+	.is_lockup = &radeon_ring_test_lockup,
+	.get_rptr = &uvd_v1_0_get_rptr,
+	.get_wptr = &uvd_v1_0_get_wptr,
+	.set_wptr = &uvd_v1_0_set_wptr,
+};
+
+static struct radeon_asic rv6xx_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.vga_set_state = &r600_vga_set_state,
+	.asic_reset = &r600_asic_reset,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = r600_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_cpdma,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.get_temperature = &rv6xx_get_temp,
+		.set_uvd_clocks = &r600_set_uvd_clocks,
+	},
+	.dpm = {
+		.init = &rv6xx_dpm_init,
+		.setup_asic = &rv6xx_setup_asic,
+		.enable = &rv6xx_dpm_enable,
+		.late_enable = &r600_dpm_late_enable,
+		.disable = &rv6xx_dpm_disable,
+		.pre_set_power_state = &r600_dpm_pre_set_power_state,
+		.set_power_state = &rv6xx_dpm_set_power_state,
+		.post_set_power_state = &r600_dpm_post_set_power_state,
+		.display_configuration_changed = &rv6xx_dpm_display_configuration_changed,
+		.fini = &rv6xx_dpm_fini,
+		.get_sclk = &rv6xx_dpm_get_sclk,
+		.get_mclk = &rv6xx_dpm_get_mclk,
+		.print_power_state = &rv6xx_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &rv6xx_dpm_force_performance_level,
+		.get_current_sclk = &rv6xx_dpm_get_current_sclk,
+		.get_current_mclk = &rv6xx_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &rs600_page_flip,
+		.page_flip_pending = &rs600_page_flip_pending,
+	},
+};
+
+static struct radeon_asic rs780_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.vga_set_state = &r600_vga_set_state,
+	.asic_reset = &r600_asic_reset,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = r600_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rs690_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_cpdma,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &rs780_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+		.get_temperature = &rv6xx_get_temp,
+		.set_uvd_clocks = &r600_set_uvd_clocks,
+	},
+	.dpm = {
+		.init = &rs780_dpm_init,
+		.setup_asic = &rs780_dpm_setup_asic,
+		.enable = &rs780_dpm_enable,
+		.late_enable = &r600_dpm_late_enable,
+		.disable = &rs780_dpm_disable,
+		.pre_set_power_state = &r600_dpm_pre_set_power_state,
+		.set_power_state = &rs780_dpm_set_power_state,
+		.post_set_power_state = &r600_dpm_post_set_power_state,
+		.display_configuration_changed = &rs780_dpm_display_configuration_changed,
+		.fini = &rs780_dpm_fini,
+		.get_sclk = &rs780_dpm_get_sclk,
+		.get_mclk = &rs780_dpm_get_mclk,
+		.print_power_state = &rs780_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &rs780_dpm_force_performance_level,
+		.get_current_sclk = &rs780_dpm_get_current_sclk,
+		.get_current_mclk = &rs780_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &rs600_page_flip,
+		.page_flip_pending = &rs600_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring rv770_uvd_ring = {
+	.ib_execute = &uvd_v1_0_ib_execute,
+	.emit_fence = &uvd_v2_2_fence_emit,
+	.emit_semaphore = &uvd_v2_2_semaphore_emit,
+	.cs_parse = &radeon_uvd_cs_parse,
+	.ring_test = &uvd_v1_0_ring_test,
+	.ib_test = &uvd_v1_0_ib_test,
+	.is_lockup = &radeon_ring_test_lockup,
+	.get_rptr = &uvd_v1_0_get_rptr,
+	.get_wptr = &uvd_v1_0_get_wptr,
+	.set_wptr = &uvd_v1_0_set_wptr,
+};
+
+static struct radeon_asic rv770_asic = {
+	.init = &rv770_init,
+	.fini = &rv770_fini,
+	.suspend = &rv770_suspend,
+	.resume = &rv770_resume,
+	.asic_reset = &r600_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = r600_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &rv770_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &rv770_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rv770_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+		.set_uvd_clocks = &rv770_set_uvd_clocks,
+		.get_temperature = &rv770_get_temp,
+	},
+	.dpm = {
+		.init = &rv770_dpm_init,
+		.setup_asic = &rv770_dpm_setup_asic,
+		.enable = &rv770_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
+		.disable = &rv770_dpm_disable,
+		.pre_set_power_state = &r600_dpm_pre_set_power_state,
+		.set_power_state = &rv770_dpm_set_power_state,
+		.post_set_power_state = &r600_dpm_post_set_power_state,
+		.display_configuration_changed = &rv770_dpm_display_configuration_changed,
+		.fini = &rv770_dpm_fini,
+		.get_sclk = &rv770_dpm_get_sclk,
+		.get_mclk = &rv770_dpm_get_mclk,
+		.print_power_state = &rv770_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &rv770_dpm_force_performance_level,
+		.vblank_too_short = &rv770_dpm_vblank_too_short,
+		.get_current_sclk = &rv770_dpm_get_current_sclk,
+		.get_current_mclk = &rv770_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &rv770_page_flip,
+		.page_flip_pending = &rv770_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring evergreen_gfx_ring = {
+	.ib_execute = &evergreen_ring_ib_execute,
+	.emit_fence = &r600_fence_ring_emit,
+	.emit_semaphore = &r600_semaphore_ring_emit,
+	.cs_parse = &evergreen_cs_parse,
+	.ring_test = &r600_ring_test,
+	.ib_test = &r600_ib_test,
+	.is_lockup = &evergreen_gfx_is_lockup,
+	.get_rptr = &r600_gfx_get_rptr,
+	.get_wptr = &r600_gfx_get_wptr,
+	.set_wptr = &r600_gfx_set_wptr,
+};
+
+static const struct radeon_asic_ring evergreen_dma_ring = {
+	.ib_execute = &evergreen_dma_ring_ib_execute,
+	.emit_fence = &evergreen_dma_fence_ring_emit,
+	.emit_semaphore = &r600_dma_semaphore_ring_emit,
+	.cs_parse = &evergreen_dma_cs_parse,
+	.ring_test = &r600_dma_ring_test,
+	.ib_test = &r600_dma_ib_test,
+	.is_lockup = &evergreen_dma_is_lockup,
+	.get_rptr = &r600_dma_get_rptr,
+	.get_wptr = &r600_dma_get_wptr,
+	.set_wptr = &r600_dma_set_wptr,
+};
+
+static struct radeon_asic evergreen_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = evergreen_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &evergreen_set_uvd_clocks,
+		.get_temperature = &evergreen_get_temp,
+	},
+	.dpm = {
+		.init = &cypress_dpm_init,
+		.setup_asic = &cypress_dpm_setup_asic,
+		.enable = &cypress_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
+		.disable = &cypress_dpm_disable,
+		.pre_set_power_state = &r600_dpm_pre_set_power_state,
+		.set_power_state = &cypress_dpm_set_power_state,
+		.post_set_power_state = &r600_dpm_post_set_power_state,
+		.display_configuration_changed = &cypress_dpm_display_configuration_changed,
+		.fini = &cypress_dpm_fini,
+		.get_sclk = &rv770_dpm_get_sclk,
+		.get_mclk = &rv770_dpm_get_mclk,
+		.print_power_state = &rv770_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &rv770_dpm_force_performance_level,
+		.vblank_too_short = &cypress_dpm_vblank_too_short,
+		.get_current_sclk = &rv770_dpm_get_current_sclk,
+		.get_current_mclk = &rv770_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+static struct radeon_asic sumo_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = evergreen_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &sumo_set_uvd_clocks,
+		.get_temperature = &sumo_get_temp,
+	},
+	.dpm = {
+		.init = &sumo_dpm_init,
+		.setup_asic = &sumo_dpm_setup_asic,
+		.enable = &sumo_dpm_enable,
+		.late_enable = &sumo_dpm_late_enable,
+		.disable = &sumo_dpm_disable,
+		.pre_set_power_state = &sumo_dpm_pre_set_power_state,
+		.set_power_state = &sumo_dpm_set_power_state,
+		.post_set_power_state = &sumo_dpm_post_set_power_state,
+		.display_configuration_changed = &sumo_dpm_display_configuration_changed,
+		.fini = &sumo_dpm_fini,
+		.get_sclk = &sumo_dpm_get_sclk,
+		.get_mclk = &sumo_dpm_get_mclk,
+		.print_power_state = &sumo_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &sumo_dpm_force_performance_level,
+		.get_current_sclk = &sumo_dpm_get_current_sclk,
+		.get_current_mclk = &sumo_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+static struct radeon_asic btc_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = evergreen_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &btc_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &evergreen_set_uvd_clocks,
+		.get_temperature = &evergreen_get_temp,
+	},
+	.dpm = {
+		.init = &btc_dpm_init,
+		.setup_asic = &btc_dpm_setup_asic,
+		.enable = &btc_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
+		.disable = &btc_dpm_disable,
+		.pre_set_power_state = &btc_dpm_pre_set_power_state,
+		.set_power_state = &btc_dpm_set_power_state,
+		.post_set_power_state = &btc_dpm_post_set_power_state,
+		.display_configuration_changed = &cypress_dpm_display_configuration_changed,
+		.fini = &btc_dpm_fini,
+		.get_sclk = &btc_dpm_get_sclk,
+		.get_mclk = &btc_dpm_get_mclk,
+		.print_power_state = &rv770_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &rv770_dpm_force_performance_level,
+		.vblank_too_short = &btc_dpm_vblank_too_short,
+		.get_current_sclk = &btc_dpm_get_current_sclk,
+		.get_current_mclk = &btc_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring cayman_gfx_ring = {
+	.ib_execute = &cayman_ring_ib_execute,
+	.ib_parse = &evergreen_ib_parse,
+	.emit_fence = &cayman_fence_ring_emit,
+	.emit_semaphore = &r600_semaphore_ring_emit,
+	.cs_parse = &evergreen_cs_parse,
+	.ring_test = &r600_ring_test,
+	.ib_test = &r600_ib_test,
+	.is_lockup = &cayman_gfx_is_lockup,
+	.vm_flush = &cayman_vm_flush,
+	.get_rptr = &cayman_gfx_get_rptr,
+	.get_wptr = &cayman_gfx_get_wptr,
+	.set_wptr = &cayman_gfx_set_wptr,
+};
+
+static const struct radeon_asic_ring cayman_dma_ring = {
+	.ib_execute = &cayman_dma_ring_ib_execute,
+	.ib_parse = &evergreen_dma_ib_parse,
+	.emit_fence = &evergreen_dma_fence_ring_emit,
+	.emit_semaphore = &r600_dma_semaphore_ring_emit,
+	.cs_parse = &evergreen_dma_cs_parse,
+	.ring_test = &r600_dma_ring_test,
+	.ib_test = &r600_dma_ib_test,
+	.is_lockup = &cayman_dma_is_lockup,
+	.vm_flush = &cayman_dma_vm_flush,
+	.get_rptr = &cayman_dma_get_rptr,
+	.get_wptr = &cayman_dma_get_wptr,
+	.set_wptr = &cayman_dma_set_wptr
+};
+
+static const struct radeon_asic_ring cayman_uvd_ring = {
+	.ib_execute = &uvd_v1_0_ib_execute,
+	.emit_fence = &uvd_v2_2_fence_emit,
+	.emit_semaphore = &uvd_v3_1_semaphore_emit,
+	.cs_parse = &radeon_uvd_cs_parse,
+	.ring_test = &uvd_v1_0_ring_test,
+	.ib_test = &uvd_v1_0_ib_test,
+	.is_lockup = &radeon_ring_test_lockup,
+	.get_rptr = &uvd_v1_0_get_rptr,
+	.get_wptr = &uvd_v1_0_get_wptr,
+	.set_wptr = &uvd_v1_0_set_wptr,
+};
+
+static struct radeon_asic cayman_asic = {
+	.init = &cayman_init,
+	.fini = &cayman_fini,
+	.suspend = &cayman_suspend,
+	.resume = &cayman_resume,
+	.asic_reset = &cayman_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = cayman_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &cayman_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.copy_pages = &cayman_dma_vm_copy_pages,
+		.write_pages = &cayman_dma_vm_write_pages,
+		.set_pages = &cayman_dma_vm_set_pages,
+		.pad_ib = &cayman_dma_vm_pad_ib,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+		[CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+		[CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &btc_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &evergreen_set_uvd_clocks,
+		.get_temperature = &evergreen_get_temp,
+	},
+	.dpm = {
+		.init = &ni_dpm_init,
+		.setup_asic = &ni_dpm_setup_asic,
+		.enable = &ni_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
+		.disable = &ni_dpm_disable,
+		.pre_set_power_state = &ni_dpm_pre_set_power_state,
+		.set_power_state = &ni_dpm_set_power_state,
+		.post_set_power_state = &ni_dpm_post_set_power_state,
+		.display_configuration_changed = &cypress_dpm_display_configuration_changed,
+		.fini = &ni_dpm_fini,
+		.get_sclk = &ni_dpm_get_sclk,
+		.get_mclk = &ni_dpm_get_mclk,
+		.print_power_state = &ni_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &ni_dpm_force_performance_level,
+		.vblank_too_short = &ni_dpm_vblank_too_short,
+		.get_current_sclk = &ni_dpm_get_current_sclk,
+		.get_current_mclk = &ni_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring trinity_vce_ring = {
+	.ib_execute = &radeon_vce_ib_execute,
+	.emit_fence = &radeon_vce_fence_emit,
+	.emit_semaphore = &radeon_vce_semaphore_emit,
+	.cs_parse = &radeon_vce_cs_parse,
+	.ring_test = &radeon_vce_ring_test,
+	.ib_test = &radeon_vce_ib_test,
+	.is_lockup = &radeon_ring_test_lockup,
+	.get_rptr = &vce_v1_0_get_rptr,
+	.get_wptr = &vce_v1_0_get_wptr,
+	.set_wptr = &vce_v1_0_set_wptr,
+};
+
+static struct radeon_asic trinity_asic = {
+	.init = &cayman_init,
+	.fini = &cayman_fini,
+	.suspend = &cayman_suspend,
+	.resume = &cayman_resume,
+	.asic_reset = &cayman_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.get_allowed_info_register = cayman_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &cayman_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.copy_pages = &cayman_dma_vm_copy_pages,
+		.write_pages = &cayman_dma_vm_write_pages,
+		.set_pages = &cayman_dma_vm_set_pages,
+		.pad_ib = &cayman_dma_vm_pad_ib,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+		[CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+		[CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+		[TN_RING_TYPE_VCE1_INDEX] = &trinity_vce_ring,
+		[TN_RING_TYPE_VCE2_INDEX] = &trinity_vce_ring,
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce6_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &sumo_set_uvd_clocks,
+		.set_vce_clocks = &tn_set_vce_clocks,
+		.get_temperature = &tn_get_temp,
+	},
+	.dpm = {
+		.init = &trinity_dpm_init,
+		.setup_asic = &trinity_dpm_setup_asic,
+		.enable = &trinity_dpm_enable,
+		.late_enable = &trinity_dpm_late_enable,
+		.disable = &trinity_dpm_disable,
+		.pre_set_power_state = &trinity_dpm_pre_set_power_state,
+		.set_power_state = &trinity_dpm_set_power_state,
+		.post_set_power_state = &trinity_dpm_post_set_power_state,
+		.display_configuration_changed = &trinity_dpm_display_configuration_changed,
+		.fini = &trinity_dpm_fini,
+		.get_sclk = &trinity_dpm_get_sclk,
+		.get_mclk = &trinity_dpm_get_mclk,
+		.print_power_state = &trinity_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &trinity_dpm_force_performance_level,
+		.enable_bapm = &trinity_dpm_enable_bapm,
+		.get_current_sclk = &trinity_dpm_get_current_sclk,
+		.get_current_mclk = &trinity_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring si_gfx_ring = {
+	.ib_execute = &si_ring_ib_execute,
+	.ib_parse = &si_ib_parse,
+	.emit_fence = &si_fence_ring_emit,
+	.emit_semaphore = &r600_semaphore_ring_emit,
+	.cs_parse = NULL,
+	.ring_test = &r600_ring_test,
+	.ib_test = &r600_ib_test,
+	.is_lockup = &si_gfx_is_lockup,
+	.vm_flush = &si_vm_flush,
+	.get_rptr = &cayman_gfx_get_rptr,
+	.get_wptr = &cayman_gfx_get_wptr,
+	.set_wptr = &cayman_gfx_set_wptr,
+};
+
+static const struct radeon_asic_ring si_dma_ring = {
+	.ib_execute = &cayman_dma_ring_ib_execute,
+	.ib_parse = &evergreen_dma_ib_parse,
+	.emit_fence = &evergreen_dma_fence_ring_emit,
+	.emit_semaphore = &r600_dma_semaphore_ring_emit,
+	.cs_parse = NULL,
+	.ring_test = &r600_dma_ring_test,
+	.ib_test = &r600_dma_ib_test,
+	.is_lockup = &si_dma_is_lockup,
+	.vm_flush = &si_dma_vm_flush,
+	.get_rptr = &cayman_dma_get_rptr,
+	.get_wptr = &cayman_dma_get_wptr,
+	.set_wptr = &cayman_dma_set_wptr,
+};
+
+static struct radeon_asic si_asic = {
+	.init = &si_init,
+	.fini = &si_fini,
+	.suspend = &si_suspend,
+	.resume = &si_resume,
+	.asic_reset = &si_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &si_get_xclk,
+	.get_gpu_clock_counter = &si_get_gpu_clock_counter,
+	.get_allowed_info_register = si_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &si_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &si_vm_init,
+		.fini = &si_vm_fini,
+		.copy_pages = &si_dma_vm_copy_pages,
+		.write_pages = &si_dma_vm_write_pages,
+		.set_pages = &si_dma_vm_set_pages,
+		.pad_ib = &cayman_dma_vm_pad_ib,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
+		[CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
+		[CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+		[TN_RING_TYPE_VCE1_INDEX] = &trinity_vce_ring,
+		[TN_RING_TYPE_VCE2_INDEX] = &trinity_vce_ring,
+	},
+	.irq = {
+		.set = &si_irq_set,
+		.process = &si_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce6_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &si_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &si_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &si_set_uvd_clocks,
+		.set_vce_clocks = &si_set_vce_clocks,
+		.get_temperature = &si_get_temp,
+	},
+	.dpm = {
+		.init = &si_dpm_init,
+		.setup_asic = &si_dpm_setup_asic,
+		.enable = &si_dpm_enable,
+		.late_enable = &si_dpm_late_enable,
+		.disable = &si_dpm_disable,
+		.pre_set_power_state = &si_dpm_pre_set_power_state,
+		.set_power_state = &si_dpm_set_power_state,
+		.post_set_power_state = &si_dpm_post_set_power_state,
+		.display_configuration_changed = &si_dpm_display_configuration_changed,
+		.fini = &si_dpm_fini,
+		.get_sclk = &ni_dpm_get_sclk,
+		.get_mclk = &ni_dpm_get_mclk,
+		.print_power_state = &ni_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &si_dpm_force_performance_level,
+		.vblank_too_short = &ni_dpm_vblank_too_short,
+		.fan_ctrl_set_mode = &si_fan_ctrl_set_mode,
+		.fan_ctrl_get_mode = &si_fan_ctrl_get_mode,
+		.get_fan_speed_percent = &si_fan_ctrl_get_fan_speed_percent,
+		.set_fan_speed_percent = &si_fan_ctrl_set_fan_speed_percent,
+		.get_current_sclk = &si_dpm_get_current_sclk,
+		.get_current_mclk = &si_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+static const struct radeon_asic_ring ci_gfx_ring = {
+	.ib_execute = &cik_ring_ib_execute,
+	.ib_parse = &cik_ib_parse,
+	.emit_fence = &cik_fence_gfx_ring_emit,
+	.emit_semaphore = &cik_semaphore_ring_emit,
+	.cs_parse = NULL,
+	.ring_test = &cik_ring_test,
+	.ib_test = &cik_ib_test,
+	.is_lockup = &cik_gfx_is_lockup,
+	.vm_flush = &cik_vm_flush,
+	.get_rptr = &cik_gfx_get_rptr,
+	.get_wptr = &cik_gfx_get_wptr,
+	.set_wptr = &cik_gfx_set_wptr,
+};
+
+static const struct radeon_asic_ring ci_cp_ring = {
+	.ib_execute = &cik_ring_ib_execute,
+	.ib_parse = &cik_ib_parse,
+	.emit_fence = &cik_fence_compute_ring_emit,
+	.emit_semaphore = &cik_semaphore_ring_emit,
+	.cs_parse = NULL,
+	.ring_test = &cik_ring_test,
+	.ib_test = &cik_ib_test,
+	.is_lockup = &cik_gfx_is_lockup,
+	.vm_flush = &cik_vm_flush,
+	.get_rptr = &cik_compute_get_rptr,
+	.get_wptr = &cik_compute_get_wptr,
+	.set_wptr = &cik_compute_set_wptr,
+};
+
+static const struct radeon_asic_ring ci_dma_ring = {
+	.ib_execute = &cik_sdma_ring_ib_execute,
+	.ib_parse = &cik_ib_parse,
+	.emit_fence = &cik_sdma_fence_ring_emit,
+	.emit_semaphore = &cik_sdma_semaphore_ring_emit,
+	.cs_parse = NULL,
+	.ring_test = &cik_sdma_ring_test,
+	.ib_test = &cik_sdma_ib_test,
+	.is_lockup = &cik_sdma_is_lockup,
+	.vm_flush = &cik_dma_vm_flush,
+	.get_rptr = &cik_sdma_get_rptr,
+	.get_wptr = &cik_sdma_get_wptr,
+	.set_wptr = &cik_sdma_set_wptr,
+};
+
+static const struct radeon_asic_ring ci_vce_ring = {
+	.ib_execute = &radeon_vce_ib_execute,
+	.emit_fence = &radeon_vce_fence_emit,
+	.emit_semaphore = &radeon_vce_semaphore_emit,
+	.cs_parse = &radeon_vce_cs_parse,
+	.ring_test = &radeon_vce_ring_test,
+	.ib_test = &radeon_vce_ib_test,
+	.is_lockup = &radeon_ring_test_lockup,
+	.get_rptr = &vce_v1_0_get_rptr,
+	.get_wptr = &vce_v1_0_get_wptr,
+	.set_wptr = &vce_v1_0_set_wptr,
+};
+
+static struct radeon_asic ci_asic = {
+	.init = &cik_init,
+	.fini = &cik_fini,
+	.suspend = &cik_suspend,
+	.resume = &cik_resume,
+	.asic_reset = &cik_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = &r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &cik_get_xclk,
+	.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+	.get_allowed_info_register = cik_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &cik_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cik_vm_init,
+		.fini = &cik_vm_fini,
+		.copy_pages = &cik_sdma_vm_copy_pages,
+		.write_pages = &cik_sdma_vm_write_pages,
+		.set_pages = &cik_sdma_vm_set_pages,
+		.pad_ib = &cik_sdma_vm_pad_ib,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+		[CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+		[CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+		[TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
+		[TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
+	},
+	.irq = {
+		.set = &cik_irq_set,
+		.process = &cik_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce8_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &cik_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &cik_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &cik_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &cik_set_uvd_clocks,
+		.set_vce_clocks = &cik_set_vce_clocks,
+		.get_temperature = &ci_get_temp,
+	},
+	.dpm = {
+		.init = &ci_dpm_init,
+		.setup_asic = &ci_dpm_setup_asic,
+		.enable = &ci_dpm_enable,
+		.late_enable = &ci_dpm_late_enable,
+		.disable = &ci_dpm_disable,
+		.pre_set_power_state = &ci_dpm_pre_set_power_state,
+		.set_power_state = &ci_dpm_set_power_state,
+		.post_set_power_state = &ci_dpm_post_set_power_state,
+		.display_configuration_changed = &ci_dpm_display_configuration_changed,
+		.fini = &ci_dpm_fini,
+		.get_sclk = &ci_dpm_get_sclk,
+		.get_mclk = &ci_dpm_get_mclk,
+		.print_power_state = &ci_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &ci_dpm_force_performance_level,
+		.vblank_too_short = &ci_dpm_vblank_too_short,
+		.powergate_uvd = &ci_dpm_powergate_uvd,
+		.fan_ctrl_set_mode = &ci_fan_ctrl_set_mode,
+		.fan_ctrl_get_mode = &ci_fan_ctrl_get_mode,
+		.get_fan_speed_percent = &ci_fan_ctrl_get_fan_speed_percent,
+		.set_fan_speed_percent = &ci_fan_ctrl_set_fan_speed_percent,
+		.get_current_sclk = &ci_dpm_get_current_sclk,
+		.get_current_mclk = &ci_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+static struct radeon_asic kv_asic = {
+	.init = &cik_init,
+	.fini = &cik_fini,
+	.suspend = &cik_suspend,
+	.resume = &cik_resume,
+	.asic_reset = &cik_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.mmio_hdp_flush = &r600_mmio_hdp_flush,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &cik_get_xclk,
+	.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+	.get_allowed_info_register = cik_get_allowed_info_register,
+	.gart = {
+		.tlb_flush = &cik_pcie_gart_tlb_flush,
+		.get_page_entry = &rs600_gart_get_page_entry,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cik_vm_init,
+		.fini = &cik_vm_fini,
+		.copy_pages = &cik_sdma_vm_copy_pages,
+		.write_pages = &cik_sdma_vm_write_pages,
+		.set_pages = &cik_sdma_vm_set_pages,
+		.pad_ib = &cik_sdma_vm_pad_ib,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+		[CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+		[CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+		[R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+		[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+		[TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
+		[TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
+	},
+	.irq = {
+		.set = &cik_irq_set,
+		.process = &cik_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce8_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &cik_copy_cpdma,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &cik_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &cik_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &cik_set_uvd_clocks,
+		.set_vce_clocks = &cik_set_vce_clocks,
+		.get_temperature = &kv_get_temp,
+	},
+	.dpm = {
+		.init = &kv_dpm_init,
+		.setup_asic = &kv_dpm_setup_asic,
+		.enable = &kv_dpm_enable,
+		.late_enable = &kv_dpm_late_enable,
+		.disable = &kv_dpm_disable,
+		.pre_set_power_state = &kv_dpm_pre_set_power_state,
+		.set_power_state = &kv_dpm_set_power_state,
+		.post_set_power_state = &kv_dpm_post_set_power_state,
+		.display_configuration_changed = &kv_dpm_display_configuration_changed,
+		.fini = &kv_dpm_fini,
+		.get_sclk = &kv_dpm_get_sclk,
+		.get_mclk = &kv_dpm_get_mclk,
+		.print_power_state = &kv_dpm_print_power_state,
+		.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &kv_dpm_force_performance_level,
+		.powergate_uvd = &kv_dpm_powergate_uvd,
+		.enable_bapm = &kv_dpm_enable_bapm,
+		.get_current_sclk = &kv_dpm_get_current_sclk,
+		.get_current_mclk = &kv_dpm_get_current_mclk,
+	},
+	.pflip = {
+		.page_flip = &evergreen_page_flip,
+		.page_flip_pending = &evergreen_page_flip_pending,
+	},
+};
+
+/**
+ * radeon_asic_init - register asic specific callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Registers the appropriate asic specific callbacks for each
+ * chip family.  Also sets other asics specific info like the number
+ * of crtcs and the register aperture accessors (all asics).
+ * Returns 0 for success.
+ */
+int radeon_asic_init(struct radeon_device *rdev)
+{
+	radeon_register_accessor_init(rdev);
+
+	/* set the number of crtcs */
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		rdev->num_crtc = 1;
+	else
+		rdev->num_crtc = 2;
+
+	rdev->has_uvd = false;
+	rdev->has_vce = false;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+		rdev->asic = &r100_asic;
+		break;
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+		rdev->asic = &r200_asic;
+		break;
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+	case CHIP_RV380:
+		if (rdev->flags & RADEON_IS_PCIE)
+			rdev->asic = &r300_asic_pcie;
+		else
+			rdev->asic = &r300_asic;
+		break;
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+		rdev->asic = &r420_asic;
+		/* handle macs */
+		if (rdev->bios == NULL) {
+			rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
+			rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
+			rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
+			rdev->asic->pm.set_memory_clock = NULL;
+			rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
+		}
+		break;
+	case CHIP_RS400:
+	case CHIP_RS480:
+		rdev->asic = &rs400_asic;
+		break;
+	case CHIP_RS600:
+		rdev->asic = &rs600_asic;
+		break;
+	case CHIP_RS690:
+	case CHIP_RS740:
+		rdev->asic = &rs690_asic;
+		break;
+	case CHIP_RV515:
+		rdev->asic = &rv515_asic;
+		break;
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		rdev->asic = &r520_asic;
+		break;
+	case CHIP_R600:
+		rdev->asic = &r600_asic;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RV670:
+		rdev->asic = &rv6xx_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_RS780:
+	case CHIP_RS880:
+		rdev->asic = &rs780_asic;
+		/* 760G/780V/880V don't have UVD */
+		if ((rdev->pdev->device == 0x9616)||
+		    (rdev->pdev->device == 0x9611)||
+		    (rdev->pdev->device == 0x9613)||
+		    (rdev->pdev->device == 0x9711)||
+		    (rdev->pdev->device == 0x9713))
+			rdev->has_uvd = false;
+		else
+			rdev->has_uvd = true;
+		break;
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		rdev->asic = &rv770_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* set num crtcs */
+		if (rdev->family == CHIP_CEDAR)
+			rdev->num_crtc = 4;
+		else
+			rdev->num_crtc = 6;
+		rdev->asic = &evergreen_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		rdev->asic = &sumo_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+		/* set num crtcs */
+		if (rdev->family == CHIP_CAICOS)
+			rdev->num_crtc = 4;
+		else
+			rdev->num_crtc = 6;
+		rdev->asic = &btc_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_CAYMAN:
+		rdev->asic = &cayman_asic;
+		/* set num crtcs */
+		rdev->num_crtc = 6;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_ARUBA:
+		rdev->asic = &trinity_asic;
+		/* set num crtcs */
+		rdev->num_crtc = 4;
+		rdev->has_uvd = true;
+		rdev->has_vce = true;
+		rdev->cg_flags =
+			RADEON_CG_SUPPORT_VCE_MGCG;
+		break;
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+	case CHIP_HAINAN:
+		rdev->asic = &si_asic;
+		/* set num crtcs */
+		if (rdev->family == CHIP_HAINAN)
+			rdev->num_crtc = 0;
+		else if (rdev->family == CHIP_OLAND)
+			rdev->num_crtc = 2;
+		else
+			rdev->num_crtc = 6;
+		if (rdev->family == CHIP_HAINAN) {
+			rdev->has_uvd = false;
+			rdev->has_vce = false;
+		} else {
+			rdev->has_uvd = true;
+			rdev->has_vce = true;
+		}
+		switch (rdev->family) {
+		case CHIP_TAHITI:
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+			break;
+		case CHIP_PITCAIRN:
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_GFX_RLC_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+			break;
+		case CHIP_VERDE:
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_GFX_RLC_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0 |
+				/*RADEON_PG_SUPPORT_GFX_PG | */
+				RADEON_PG_SUPPORT_SDMA;
+			break;
+		case CHIP_OLAND:
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_GFX_RLC_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+			break;
+		case CHIP_HAINAN:
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_GFX_RLC_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+			break;
+		default:
+			rdev->cg_flags = 0;
+			rdev->pg_flags = 0;
+			break;
+		}
+		break;
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+		rdev->asic = &ci_asic;
+		rdev->num_crtc = 6;
+		rdev->has_uvd = true;
+		rdev->has_vce = true;
+		if (rdev->family == CHIP_BONAIRE) {
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CGTS_LS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_SDMA_LS |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+		} else {
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_MC_LS |
+				RADEON_CG_SUPPORT_MC_MGCG |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_SDMA_LS |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+		}
+		break;
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		rdev->asic = &kv_asic;
+		/* set num crtcs */
+		if (rdev->family == CHIP_KAVERI) {
+			rdev->num_crtc = 4;
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CGTS_LS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_SDMA_LS |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+				/*RADEON_PG_SUPPORT_GFX_PG |
+				RADEON_PG_SUPPORT_GFX_SMG |
+				RADEON_PG_SUPPORT_GFX_DMG |
+				RADEON_PG_SUPPORT_UVD |
+				RADEON_PG_SUPPORT_VCE |
+				RADEON_PG_SUPPORT_CP |
+				RADEON_PG_SUPPORT_GDS |
+				RADEON_PG_SUPPORT_RLC_SMU_HS |
+				RADEON_PG_SUPPORT_ACP |
+				RADEON_PG_SUPPORT_SAMU;*/
+		} else {
+			rdev->num_crtc = 2;
+			rdev->cg_flags =
+				RADEON_CG_SUPPORT_GFX_MGCG |
+				RADEON_CG_SUPPORT_GFX_MGLS |
+				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGLS |
+				RADEON_CG_SUPPORT_GFX_CGTS |
+				RADEON_CG_SUPPORT_GFX_CGTS_LS |
+				RADEON_CG_SUPPORT_GFX_CP_LS |
+				RADEON_CG_SUPPORT_SDMA_MGCG |
+				RADEON_CG_SUPPORT_SDMA_LS |
+				RADEON_CG_SUPPORT_BIF_LS |
+				RADEON_CG_SUPPORT_VCE_MGCG |
+				RADEON_CG_SUPPORT_UVD_MGCG |
+				RADEON_CG_SUPPORT_HDP_LS |
+				RADEON_CG_SUPPORT_HDP_MGCG;
+			rdev->pg_flags = 0;
+				/*RADEON_PG_SUPPORT_GFX_PG |
+				RADEON_PG_SUPPORT_GFX_SMG |
+				RADEON_PG_SUPPORT_UVD |
+				RADEON_PG_SUPPORT_VCE |
+				RADEON_PG_SUPPORT_CP |
+				RADEON_PG_SUPPORT_GDS |
+				RADEON_PG_SUPPORT_RLC_SMU_HS |
+				RADEON_PG_SUPPORT_SAMU;*/
+		}
+		rdev->has_uvd = true;
+		rdev->has_vce = true;
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		rdev->asic->pm.get_memory_clock = NULL;
+		rdev->asic->pm.set_memory_clock = NULL;
+	}
+
+	if (!radeon_uvd)
+		rdev->has_uvd = false;
+	if (!radeon_vce)
+		rdev->has_vce = false;
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
new file mode 100644
index 0000000..e3f036c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -0,0 +1,986 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_ASIC_H__
+#define __RADEON_ASIC_H__
+
+/*
+ * common functions
+ */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
+void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
+void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+
+void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
+void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
+
+/*
+ * r100,rv100,rs100,rv200,rs200
+ */
+struct r100_mc_save {
+	u32	GENMO_WT;
+	u32	CRTC_EXT_CNTL;
+	u32	CRTC_GEN_CNTL;
+	u32	CRTC2_GEN_CNTL;
+	u32	CUR_OFFSET;
+	u32	CUR2_OFFSET;
+};
+int r100_init(struct radeon_device *rdev);
+void r100_fini(struct radeon_device *rdev);
+int r100_suspend(struct radeon_device *rdev);
+int r100_resume(struct radeon_device *rdev);
+void r100_vga_set_state(struct radeon_device *rdev, bool state);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r100_asic_reset(struct radeon_device *rdev, bool hard);
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
+void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
+			    uint64_t entry);
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+int r100_irq_set(struct radeon_device *rdev);
+int r100_irq_process(struct radeon_device *rdev);
+void r100_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence);
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *cp,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait);
+int r100_cs_parse(struct radeon_cs_parser *p);
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
+struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
+				    uint64_t src_offset,
+				    uint64_t dst_offset,
+				    unsigned num_gpu_pages,
+				    struct reservation_object *resv);
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size);
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r100_bandwidth_update(struct radeon_device *rdev);
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r100_irq_disable(struct radeon_device *rdev);
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_vram_init_sizes(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+void r100_vga_render_disable(struct radeon_device *rdev);
+void r100_restore_sanity(struct radeon_device *rdev);
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+					 struct radeon_cs_packet *pkt,
+					 struct radeon_bo *robj);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+			  struct radeon_cs_packet *pkt,
+			  const unsigned *auth, unsigned n,
+			  radeon_packet0_check_t check);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt,
+			 unsigned idx);
+void r100_enable_bm(struct radeon_device *rdev);
+void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_page_flip(struct radeon_device *rdev, int crtc,
+			   u64 crtc_base, bool async);
+extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
+extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
+
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
+
+/*
+ * r200,rv250,rs300,rv280
+ */
+struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
+				   uint64_t src_offset,
+				   uint64_t dst_offset,
+				   unsigned num_gpu_pages,
+				   struct reservation_object *resv);
+void r200_set_safe_registers(struct radeon_device *rdev);
+
+/*
+ * r300,r350,rv350,rv380
+ */
+extern int r300_init(struct radeon_device *rdev);
+extern void r300_fini(struct radeon_device *rdev);
+extern int r300_suspend(struct radeon_device *rdev);
+extern int r300_resume(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
+extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+extern void r300_fence_ring_emit(struct radeon_device *rdev,
+				struct radeon_fence *fence);
+extern int r300_cs_parse(struct radeon_cs_parser *p);
+extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
+extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+				     uint64_t entry);
+extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r420,r423,rv410
+ */
+extern int r420_init(struct radeon_device *rdev);
+extern void r420_fini(struct radeon_device *rdev);
+extern int r420_suspend(struct radeon_device *rdev);
+extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
+
+/*
+ * rs400,rs480
+ */
+extern int rs400_init(struct radeon_device *rdev);
+extern void rs400_fini(struct radeon_device *rdev);
+extern int rs400_suspend(struct radeon_device *rdev);
+extern int rs400_resume(struct radeon_device *rdev);
+void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+			 uint64_t entry);
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs600.
+ */
+extern int rs600_asic_reset(struct radeon_device *rdev, bool hard);
+extern int rs600_init(struct radeon_device *rdev);
+extern void rs600_fini(struct radeon_device *rdev);
+extern int rs600_suspend(struct radeon_device *rdev);
+extern int rs600_resume(struct radeon_device *rdev);
+int rs600_irq_set(struct radeon_device *rdev);
+int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+			 uint64_t entry);
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
+			    u64 crtc_base, bool async);
+extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs690,rs740
+ */
+int rs690_init(struct radeon_device *rdev);
+void rs690_fini(struct radeon_device *rdev);
+int rs690_resume(struct radeon_device *rdev);
+int rs690_suspend(struct radeon_device *rdev);
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+					struct drm_display_mode *mode1,
+					struct drm_display_mode *mode2);
+extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rv515
+ */
+struct rv515_mc_save {
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	bool crtc_enabled[2];
+};
+
+int rv515_init(struct radeon_device *rdev);
+void rv515_fini(struct radeon_device *rdev);
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+void rv515_bandwidth_update(struct radeon_device *rdev);
+int rv515_resume(struct radeon_device *rdev);
+int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r520,rv530,rv560,rv570,r580
+ */
+int r520_init(struct radeon_device *rdev);
+int r520_resume(struct radeon_device *rdev);
+int r520_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
+ */
+int r600_init(struct radeon_device *rdev);
+void r600_fini(struct radeon_device *rdev);
+int r600_suspend(struct radeon_device *rdev);
+int r600_resume(struct radeon_device *rdev);
+void r600_vga_set_state(struct radeon_device *rdev, bool state);
+int r600_wb_init(struct radeon_device *rdev);
+void r600_wb_fini(struct radeon_device *rdev);
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
+void r600_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence);
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *cp,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence);
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_asic_reset(struct radeon_device *rdev, bool hard);
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size);
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
+				     uint64_t src_offset, uint64_t dst_offset,
+				     unsigned num_gpu_pages,
+				     struct reservation_object *resv);
+struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
+				   uint64_t src_offset, uint64_t dst_offset,
+				   unsigned num_gpu_pages,
+				   struct reservation_object *resv);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
+extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
+int r600_get_allowed_info_register(struct radeon_device *rdev,
+				   u32 reg, u32 *val);
+/* r600 irq */
+int r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+void r600_audio_fini(struct radeon_device *rdev);
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
+void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
+				    size_t size);
+void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+u32 r600_get_xclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
+int rv6xx_get_temp(struct radeon_device *rdev);
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
+void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+int r600_dpm_late_enable(struct radeon_device *rdev);
+/* r600 dma */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring);
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring);
+void r600_dma_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
+/* rv6xx dpm */
+int rv6xx_dpm_init(struct radeon_device *rdev);
+int rv6xx_dpm_enable(struct radeon_device *rdev);
+void rv6xx_dpm_disable(struct radeon_device *rdev);
+int rv6xx_dpm_set_power_state(struct radeon_device *rdev);
+void rv6xx_setup_asic(struct radeon_device *rdev);
+void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev);
+void rv6xx_dpm_fini(struct radeon_device *rdev);
+u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
+				 struct radeon_ps *ps);
+void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						       struct seq_file *m);
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level);
+u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev);
+/* rs780 dpm */
+int rs780_dpm_init(struct radeon_device *rdev);
+int rs780_dpm_enable(struct radeon_device *rdev);
+void rs780_dpm_disable(struct radeon_device *rdev);
+int rs780_dpm_set_power_state(struct radeon_device *rdev);
+void rs780_dpm_setup_asic(struct radeon_device *rdev);
+void rs780_dpm_display_configuration_changed(struct radeon_device *rdev);
+void rs780_dpm_fini(struct radeon_device *rdev);
+u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void rs780_dpm_print_power_state(struct radeon_device *rdev,
+				 struct radeon_ps *ps);
+void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						       struct seq_file *m);
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level);
+u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev);
+
+/*
+ * rv770,rv730,rv710,rv740
+ */
+int rv770_init(struct radeon_device *rdev);
+void rv770_fini(struct radeon_device *rdev);
+int rv770_suspend(struct radeon_device *rdev);
+int rv770_resume(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev);
+void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base,
+		     bool async);
+bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
+struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
+				    uint64_t src_offset, uint64_t dst_offset,
+				    unsigned num_gpu_pages,
+				    struct reservation_object *resv);
+u32 rv770_get_xclk(struct radeon_device *rdev);
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int rv770_get_temp(struct radeon_device *rdev);
+/* rv7xx pm */
+int rv770_dpm_init(struct radeon_device *rdev);
+int rv770_dpm_enable(struct radeon_device *rdev);
+int rv770_dpm_late_enable(struct radeon_device *rdev);
+void rv770_dpm_disable(struct radeon_device *rdev);
+int rv770_dpm_set_power_state(struct radeon_device *rdev);
+void rv770_dpm_setup_asic(struct radeon_device *rdev);
+void rv770_dpm_display_configuration_changed(struct radeon_device *rdev);
+void rv770_dpm_fini(struct radeon_device *rdev);
+u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void rv770_dpm_print_power_state(struct radeon_device *rdev,
+				 struct radeon_ps *ps);
+void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						       struct seq_file *m);
+int rv770_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level);
+bool rv770_dpm_vblank_too_short(struct radeon_device *rdev);
+u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev);
+
+/*
+ * evergreen
+ */
+struct evergreen_mc_save {
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	bool crtc_enabled[RADEON_MAX_CRTCS];
+};
+
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int evergreen_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+int evergreen_suspend(struct radeon_device *rdev);
+int evergreen_resume(struct radeon_device *rdev);
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int evergreen_asic_reset(struct radeon_device *rdev, bool hard);
+void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+void evergreen_hpd_init(struct radeon_device *rdev);
+void evergreen_hpd_fini(struct radeon_device *rdev);
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+				enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void sumo_pm_init_profile(struct radeon_device *rdev);
+extern void btc_pm_init_profile(struct radeon_device *rdev);
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
+				u64 crtc_base, bool async);
+extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
+extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib);
+struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
+					uint64_t src_offset, uint64_t dst_offset,
+					unsigned num_gpu_pages,
+					struct reservation_object *resv);
+int evergreen_get_temp(struct radeon_device *rdev);
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+					u32 reg, u32 *val);
+int sumo_get_temp(struct radeon_device *rdev);
+int tn_get_temp(struct radeon_device *rdev);
+int cypress_dpm_init(struct radeon_device *rdev);
+void cypress_dpm_setup_asic(struct radeon_device *rdev);
+int cypress_dpm_enable(struct radeon_device *rdev);
+void cypress_dpm_disable(struct radeon_device *rdev);
+int cypress_dpm_set_power_state(struct radeon_device *rdev);
+void cypress_dpm_display_configuration_changed(struct radeon_device *rdev);
+void cypress_dpm_fini(struct radeon_device *rdev);
+bool cypress_dpm_vblank_too_short(struct radeon_device *rdev);
+int btc_dpm_init(struct radeon_device *rdev);
+void btc_dpm_setup_asic(struct radeon_device *rdev);
+int btc_dpm_enable(struct radeon_device *rdev);
+void btc_dpm_disable(struct radeon_device *rdev);
+int btc_dpm_pre_set_power_state(struct radeon_device *rdev);
+int btc_dpm_set_power_state(struct radeon_device *rdev);
+void btc_dpm_post_set_power_state(struct radeon_device *rdev);
+void btc_dpm_fini(struct radeon_device *rdev);
+u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
+bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						     struct seq_file *m);
+u32 btc_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 btc_dpm_get_current_mclk(struct radeon_device *rdev);
+int sumo_dpm_init(struct radeon_device *rdev);
+int sumo_dpm_enable(struct radeon_device *rdev);
+int sumo_dpm_late_enable(struct radeon_device *rdev);
+void sumo_dpm_disable(struct radeon_device *rdev);
+int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
+int sumo_dpm_set_power_state(struct radeon_device *rdev);
+void sumo_dpm_post_set_power_state(struct radeon_device *rdev);
+void sumo_dpm_setup_asic(struct radeon_device *rdev);
+void sumo_dpm_display_configuration_changed(struct radeon_device *rdev);
+void sumo_dpm_fini(struct radeon_device *rdev);
+u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void sumo_dpm_print_power_state(struct radeon_device *rdev,
+				struct radeon_ps *ps);
+void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						      struct seq_file *m);
+int sumo_dpm_force_performance_level(struct radeon_device *rdev,
+				     enum radeon_dpm_forced_level level);
+u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev);
+
+/*
+ * cayman
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+			    struct radeon_fence *fence);
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cayman_init(struct radeon_device *rdev);
+void cayman_fini(struct radeon_device *rdev);
+int cayman_suspend(struct radeon_device *rdev);
+int cayman_resume(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev, bool hard);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		     unsigned vm_id, uint64_t pd_addr);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib);
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+			      struct radeon_ib *ib,
+			      uint64_t pe, uint64_t src,
+			      unsigned count);
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+			       struct radeon_ib *ib,
+			       uint64_t pe,
+			       uint64_t addr, unsigned count,
+			       uint32_t incr, uint32_t flags);
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags);
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+			 unsigned vm_id, uint64_t pd_addr);
+
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+			struct radeon_ring *ring);
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+			struct radeon_ring *ring);
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+			     struct radeon_ring *ring);
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+			     struct radeon_ring *ring);
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+int cayman_get_allowed_info_register(struct radeon_device *rdev,
+				     u32 reg, u32 *val);
+
+int ni_dpm_init(struct radeon_device *rdev);
+void ni_dpm_setup_asic(struct radeon_device *rdev);
+int ni_dpm_enable(struct radeon_device *rdev);
+void ni_dpm_disable(struct radeon_device *rdev);
+int ni_dpm_pre_set_power_state(struct radeon_device *rdev);
+int ni_dpm_set_power_state(struct radeon_device *rdev);
+void ni_dpm_post_set_power_state(struct radeon_device *rdev);
+void ni_dpm_fini(struct radeon_device *rdev);
+u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void ni_dpm_print_power_state(struct radeon_device *rdev,
+			      struct radeon_ps *ps);
+void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m);
+int ni_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level);
+bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
+u32 ni_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 ni_dpm_get_current_mclk(struct radeon_device *rdev);
+int trinity_dpm_init(struct radeon_device *rdev);
+int trinity_dpm_enable(struct radeon_device *rdev);
+int trinity_dpm_late_enable(struct radeon_device *rdev);
+void trinity_dpm_disable(struct radeon_device *rdev);
+int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
+int trinity_dpm_set_power_state(struct radeon_device *rdev);
+void trinity_dpm_post_set_power_state(struct radeon_device *rdev);
+void trinity_dpm_setup_asic(struct radeon_device *rdev);
+void trinity_dpm_display_configuration_changed(struct radeon_device *rdev);
+void trinity_dpm_fini(struct radeon_device *rdev);
+u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void trinity_dpm_print_power_state(struct radeon_device *rdev,
+				   struct radeon_ps *ps);
+void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+							 struct seq_file *m);
+int trinity_dpm_force_performance_level(struct radeon_device *rdev,
+					enum radeon_dpm_forced_level level);
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
+u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev);
+int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
+
+/* DCE6 - SI */
+void dce6_bandwidth_update(struct radeon_device *rdev);
+void dce6_audio_fini(struct radeon_device *rdev);
+
+/*
+ * si
+ */
+void si_fence_ring_emit(struct radeon_device *rdev,
+			struct radeon_fence *fence);
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int si_init(struct radeon_device *rdev);
+void si_fini(struct radeon_device *rdev);
+int si_suspend(struct radeon_device *rdev);
+int si_resume(struct radeon_device *rdev);
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int si_asic_reset(struct radeon_device *rdev, bool hard);
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int si_irq_set(struct radeon_device *rdev);
+int si_irq_process(struct radeon_device *rdev);
+int si_vm_init(struct radeon_device *rdev);
+void si_vm_fini(struct radeon_device *rdev);
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		 unsigned vm_id, uint64_t pd_addr);
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
+				 uint64_t src_offset, uint64_t dst_offset,
+				 unsigned num_gpu_pages,
+				 struct reservation_object *resv);
+
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+			  struct radeon_ib *ib,
+			  uint64_t pe, uint64_t src,
+			  unsigned count);
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags);
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+			 struct radeon_ib *ib,
+			 uint64_t pe,
+			 uint64_t addr, unsigned count,
+			 uint32_t incr, uint32_t flags);
+
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		     unsigned vm_id, uint64_t pd_addr);
+u32 si_get_xclk(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
+int si_get_temp(struct radeon_device *rdev);
+int si_get_allowed_info_register(struct radeon_device *rdev,
+				 u32 reg, u32 *val);
+int si_dpm_init(struct radeon_device *rdev);
+void si_dpm_setup_asic(struct radeon_device *rdev);
+int si_dpm_enable(struct radeon_device *rdev);
+int si_dpm_late_enable(struct radeon_device *rdev);
+void si_dpm_disable(struct radeon_device *rdev);
+int si_dpm_pre_set_power_state(struct radeon_device *rdev);
+int si_dpm_set_power_state(struct radeon_device *rdev);
+void si_dpm_post_set_power_state(struct radeon_device *rdev);
+void si_dpm_fini(struct radeon_device *rdev);
+void si_dpm_display_configuration_changed(struct radeon_device *rdev);
+void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m);
+int si_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level);
+int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+						 u32 *speed);
+int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+						 u32 speed);
+u32 si_fan_ctrl_get_mode(struct radeon_device *rdev);
+void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
+u32 si_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 si_dpm_get_current_mclk(struct radeon_device *rdev);
+
+/* DCE8 - CIK */
+void dce8_bandwidth_update(struct radeon_device *rdev);
+
+/*
+ * cik
+ */
+uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev);
+u32 cik_get_xclk(struct radeon_device *rdev);
+uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
+void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence);
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait);
+void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
+				  uint64_t src_offset, uint64_t dst_offset,
+				  unsigned num_gpu_pages,
+				  struct reservation_object *resv);
+struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
+				    uint64_t src_offset, uint64_t dst_offset,
+				    unsigned num_gpu_pages,
+				    struct reservation_object *resv);
+int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
+			     struct radeon_fence *fence);
+void cik_fence_compute_ring_emit(struct radeon_device *rdev,
+				 struct radeon_fence *fence);
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
+			     struct radeon_ring *cp,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait);
+void cik_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cik_init(struct radeon_device *rdev);
+void cik_fini(struct radeon_device *rdev);
+int cik_suspend(struct radeon_device *rdev);
+int cik_resume(struct radeon_device *rdev);
+bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int cik_asic_reset(struct radeon_device *rdev, bool hard);
+void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int cik_irq_set(struct radeon_device *rdev);
+int cik_irq_process(struct radeon_device *rdev);
+int cik_vm_init(struct radeon_device *rdev);
+void cik_vm_fini(struct radeon_device *rdev);
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		  unsigned vm_id, uint64_t pd_addr);
+
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+			    struct radeon_ib *ib,
+			    uint64_t pe, uint64_t src,
+			    unsigned count);
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+			     struct radeon_ib *ib,
+			     uint64_t pe,
+			     uint64_t addr, unsigned count,
+			     uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags);
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
+
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		      unsigned vm_id, uint64_t pd_addr);
+int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring);
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring);
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+void cik_compute_set_wptr(struct radeon_device *rdev,
+			  struct radeon_ring *ring);
+u32 cik_sdma_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 cik_sdma_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
+int ci_get_temp(struct radeon_device *rdev);
+int kv_get_temp(struct radeon_device *rdev);
+int cik_get_allowed_info_register(struct radeon_device *rdev,
+				  u32 reg, u32 *val);
+
+int ci_dpm_init(struct radeon_device *rdev);
+int ci_dpm_enable(struct radeon_device *rdev);
+int ci_dpm_late_enable(struct radeon_device *rdev);
+void ci_dpm_disable(struct radeon_device *rdev);
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
+int ci_dpm_set_power_state(struct radeon_device *rdev);
+void ci_dpm_post_set_power_state(struct radeon_device *rdev);
+void ci_dpm_setup_asic(struct radeon_device *rdev);
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
+void ci_dpm_fini(struct radeon_device *rdev);
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+			      struct radeon_ps *ps);
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m);
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level);
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+u32 ci_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 ci_dpm_get_current_mclk(struct radeon_device *rdev);
+
+int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+						 u32 *speed);
+int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+						 u32 speed);
+u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev);
+void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
+
+int kv_dpm_init(struct radeon_device *rdev);
+int kv_dpm_enable(struct radeon_device *rdev);
+int kv_dpm_late_enable(struct radeon_device *rdev);
+void kv_dpm_disable(struct radeon_device *rdev);
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
+int kv_dpm_set_power_state(struct radeon_device *rdev);
+void kv_dpm_post_set_power_state(struct radeon_device *rdev);
+void kv_dpm_setup_asic(struct radeon_device *rdev);
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
+void kv_dpm_fini(struct radeon_device *rdev);
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+			      struct radeon_ps *ps);
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m);
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level);
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
+u32 kv_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 kv_dpm_get_current_mclk(struct radeon_device *rdev);
+
+/* uvd v1.0 */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+                           struct radeon_ring *ring);
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+                           struct radeon_ring *ring);
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+                       struct radeon_ring *ring);
+int uvd_v1_0_resume(struct radeon_device *rdev);
+
+int uvd_v1_0_init(struct radeon_device *rdev);
+void uvd_v1_0_fini(struct radeon_device *rdev);
+int uvd_v1_0_start(struct radeon_device *rdev);
+void uvd_v1_0_stop(struct radeon_device *rdev);
+
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void uvd_v1_0_fence_emit(struct radeon_device *rdev,
+			 struct radeon_fence *fence);
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait);
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
+/* uvd v2.2 */
+int uvd_v2_2_resume(struct radeon_device *rdev);
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+			 struct radeon_fence *fence);
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait);
+
+/* uvd v3.1 */
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait);
+
+/* uvd v4.2 */
+int uvd_v4_2_resume(struct radeon_device *rdev);
+
+/* vce v1.0 */
+uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring);
+uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring);
+void vce_v1_0_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
+int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data);
+unsigned vce_v1_0_bo_size(struct radeon_device *rdev);
+int vce_v1_0_resume(struct radeon_device *rdev);
+int vce_v1_0_init(struct radeon_device *rdev);
+int vce_v1_0_start(struct radeon_device *rdev);
+
+/* vce v2.0 */
+unsigned vce_v2_0_bo_size(struct radeon_device *rdev);
+int vce_v2_0_resume(struct radeon_device *rdev);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
new file mode 100644
index 0000000..f422a8d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -0,0 +1,4482 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "radeon_asic.h"
+
+extern void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			uint32_t supported_device, u16 caps);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			  uint32_t supported_device);
+
+union atom_supported_devices {
+	struct _ATOM_SUPPORTED_DEVICES_INFO info;
+	struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
+	struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
+};
+
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+					  ATOM_GPIO_I2C_ASSIGMENT *gpio,
+					  u8 index)
+{
+	/* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+	if ((rdev->family == CHIP_R420) ||
+	    (rdev->family == CHIP_R423) ||
+	    (rdev->family == CHIP_RV410)) {
+		if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+			gpio->ucClkMaskShift = 0x19;
+			gpio->ucDataMaskShift = 0x18;
+		}
+	}
+
+	/* some evergreen boards have bad data for this entry */
+	if (ASIC_IS_DCE4(rdev)) {
+		if ((index == 7) &&
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+		    (gpio->sucI2cId.ucAccess == 0)) {
+			gpio->sucI2cId.ucAccess = 0x97;
+			gpio->ucDataMaskShift = 8;
+			gpio->ucDataEnShift = 8;
+			gpio->ucDataY_Shift = 8;
+			gpio->ucDataA_Shift = 8;
+		}
+	}
+
+	/* some DCE3 boards have bad data for this entry */
+	if (ASIC_IS_DCE3(rdev)) {
+		if ((index == 4) &&
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+		    (gpio->sucI2cId.ucAccess == 0x94))
+			gpio->sucI2cId.ucAccess = 0x14;
+	}
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+	struct radeon_i2c_bus_rec i2c;
+
+	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+	i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+	i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+	i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+	i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+	i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+	i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+	i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+	i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+	i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+	i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+	i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+	i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+	i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+	i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+	i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+	i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+	if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+		i2c.hw_capable = true;
+	else
+		i2c.hw_capable = false;
+
+	if (gpio->sucI2cId.ucAccess == 0xa0)
+		i2c.mm_i2c = true;
+	else
+		i2c.mm_i2c = false;
+
+	i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+	if (i2c.mask_clk_reg)
+		i2c.valid = true;
+	else
+		i2c.valid = false;
+
+	return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+							       uint8_t id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct radeon_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+
+	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+	i2c.valid = false;
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		gpio = &i2c_info->asGPIO_Info[0];
+		for (i = 0; i < num_indices; i++) {
+
+			radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+			if (gpio->sucI2cId.ucAccess == id) {
+				i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+				break;
+			}
+			gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+				((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+		}
+	}
+
+	return i2c;
+}
+
+void radeon_atombios_i2c_init(struct radeon_device *rdev)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct radeon_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+	char stmp[32];
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		gpio = &i2c_info->asGPIO_Info[0];
+		for (i = 0; i < num_indices; i++) {
+			radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+			i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+
+			if (i2c.valid) {
+				sprintf(stmp, "0x%x", i2c.i2c_id);
+				rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+			}
+			gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+				((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+		}
+	}
+}
+
+struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+						   u8 id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	struct radeon_gpio_rec gpio;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+	struct _ATOM_GPIO_PIN_LUT *gpio_info;
+	ATOM_GPIO_PIN_ASSIGNMENT *pin;
+	u16 data_offset, size;
+	int i, num_indices;
+
+	memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+	gpio.valid = false;
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+		pin = gpio_info->asGPIO_Pin;
+		for (i = 0; i < num_indices; i++) {
+			if (id == pin->ucGPIO_ID) {
+				gpio.id = pin->ucGPIO_ID;
+				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+				gpio.shift = pin->ucGpioPinBitShift;
+				gpio.mask = (1 << pin->ucGpioPinBitShift);
+				gpio.valid = true;
+				break;
+			}
+			pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
+				((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
+		}
+	}
+
+	return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+							    struct radeon_gpio_rec *gpio)
+{
+	struct radeon_hpd hpd;
+	u32 reg;
+
+	memset(&hpd, 0, sizeof(struct radeon_hpd));
+
+	if (ASIC_IS_DCE6(rdev))
+		reg = SI_DC_GPIO_HPD_A;
+	else if (ASIC_IS_DCE4(rdev))
+		reg = EVERGREEN_DC_GPIO_HPD_A;
+	else
+		reg = AVIVO_DC_GPIO_HPD_A;
+
+	hpd.gpio = *gpio;
+	if (gpio->reg == reg) {
+		switch(gpio->mask) {
+		case (1 << 0):
+			hpd.hpd = RADEON_HPD_1;
+			break;
+		case (1 << 8):
+			hpd.hpd = RADEON_HPD_2;
+			break;
+		case (1 << 16):
+			hpd.hpd = RADEON_HPD_3;
+			break;
+		case (1 << 24):
+			hpd.hpd = RADEON_HPD_4;
+			break;
+		case (1 << 26):
+			hpd.hpd = RADEON_HPD_5;
+			break;
+		case (1 << 28):
+			hpd.hpd = RADEON_HPD_6;
+			break;
+		default:
+			hpd.hpd = RADEON_HPD_NONE;
+			break;
+		}
+	} else
+		hpd.hpd = RADEON_HPD_NONE;
+	return hpd;
+}
+
+static bool radeon_atom_apply_quirks(struct drm_device *dev,
+				     uint32_t supported_device,
+				     int *connector_type,
+				     struct radeon_i2c_bus_rec *i2c_bus,
+				     uint16_t *line_mux,
+				     struct radeon_hpd *hpd)
+{
+
+	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x791e) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x826d)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
+	/* Asrock RS600 board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x7941) &&
+	    (dev->pdev->subsystem_vendor == 0x1849) &&
+	    (dev->pdev->subsystem_device == 0x7941)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
+	/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+	if ((dev->pdev->device == 0x796e) &&
+	    (dev->pdev->subsystem_vendor == 0x1462) &&
+	    (dev->pdev->subsystem_device == 0x7302)) {
+		if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			return false;
+	}
+
+	/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+	if ((dev->pdev->device == 0x7941) &&
+	    (dev->pdev->subsystem_vendor == 0x147b) &&
+	    (dev->pdev->subsystem_device == 0x2412)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_DVII)
+			return false;
+	}
+
+	/* Falcon NW laptop lists vga ddc line for LVDS */
+	if ((dev->pdev->device == 0x5653) &&
+	    (dev->pdev->subsystem_vendor == 0x1462) &&
+	    (dev->pdev->subsystem_device == 0x0291)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			i2c_bus->valid = false;
+			*line_mux = 53;
+		}
+	}
+
+	/* HIS X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7146) &&
+	    (dev->pdev->subsystem_vendor == 0x17af) &&
+	    (dev->pdev->subsystem_device == 0x2058)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+	/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7142) &&
+	    (dev->pdev->subsystem_vendor == 0x1458) &&
+	    (dev->pdev->subsystem_device == 0x2134)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+
+	/* Funky macbooks */
+	if ((dev->pdev->device == 0x71C5) &&
+	    (dev->pdev->subsystem_vendor == 0x106b) &&
+	    (dev->pdev->subsystem_device == 0x0080)) {
+		if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
+		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+			return false;
+		if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
+			*line_mux = 0x90;
+	}
+
+	/* mac rv630, rv730, others */
+	if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+	    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+		*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+		*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+	}
+
+	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x9598) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x01da)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* ASUS HD 3600 board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x9598) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x01e4)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* ASUS HD 3450 board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x95C5) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x01e2)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* some BIOSes seem to report DAC on HDMI - usually this is a board with
+	 * HDMI + VGA reporting as HDMI
+	 */
+	if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+		if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
+			*connector_type = DRM_MODE_CONNECTOR_VGA;
+			*line_mux = 0;
+		}
+	}
+
+	/* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
+	 * on the laptop and a DVI port on the docking station and
+	 * both share the same encoder, hpd pin, and ddc line.
+	 * So while the bios table is technically correct,
+	 * we drop the DVI port here since xrandr has no concept of
+	 * encoders and will try and drive both connectors
+	 * with different crtcs which isn't possible on the hardware
+	 * side and leaves no crtcs for LVDS or VGA.
+	 */
+	if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
+	    (dev->pdev->subsystem_vendor == 0x1025) &&
+	    (dev->pdev->subsystem_device == 0x013c)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+			/* actually it's a DVI-D port not DVI-I */
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+			return false;
+		}
+	}
+
+	/* XFX Pine Group device rv730 reports no VGA DDC lines
+	 * even though they are wired up to record 0x93
+	 */
+	if ((dev->pdev->device == 0x9498) &&
+	    (dev->pdev->subsystem_vendor == 0x1682) &&
+	    (dev->pdev->subsystem_device == 0x2452) &&
+	    (i2c_bus->valid == false) &&
+	    !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
+		struct radeon_device *rdev = dev->dev_private;
+		*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+	}
+
+	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+	if (((dev->pdev->device == 0x9802) ||
+	     (dev->pdev->device == 0x9805) ||
+	     (dev->pdev->device == 0x9806)) &&
+	    (dev->pdev->subsystem_vendor == 0x1734) &&
+	    (dev->pdev->subsystem_device == 0x11bd)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+			*line_mux = 0x3103;
+		} else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	return true;
+}
+
+static const int supported_devices_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_DVIA,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_LVDS,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_HDMIA,
+	DRM_MODE_CONNECTOR_HDMIB,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_DisplayPort
+};
+
+static const uint16_t supported_devices_connector_object_id_convert[] = {
+	CONNECTOR_OBJECT_ID_NONE,
+	CONNECTOR_OBJECT_ID_VGA,
+	CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
+	CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */
+	CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */
+	CONNECTOR_OBJECT_ID_COMPOSITE,
+	CONNECTOR_OBJECT_ID_SVIDEO,
+	CONNECTOR_OBJECT_ID_LVDS,
+	CONNECTOR_OBJECT_ID_9PIN_DIN,
+	CONNECTOR_OBJECT_ID_9PIN_DIN,
+	CONNECTOR_OBJECT_ID_DISPLAYPORT,
+	CONNECTOR_OBJECT_ID_HDMI_TYPE_A,
+	CONNECTOR_OBJECT_ID_HDMI_TYPE_B,
+	CONNECTOR_OBJECT_ID_SVIDEO
+};
+
+static const int object_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_HDMIA,
+	DRM_MODE_CONNECTOR_HDMIB,
+	DRM_MODE_CONNECTOR_LVDS,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DisplayPort,
+	DRM_MODE_CONNECTOR_eDP,
+	DRM_MODE_CONNECTOR_Unknown
+};
+
+bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, Object_Header);
+	u16 size, data_offset;
+	u8 frev, crev;
+	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+	ATOM_ENCODER_OBJECT_TABLE *enc_obj;
+	ATOM_OBJECT_TABLE *router_obj;
+	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+	ATOM_OBJECT_HEADER *obj_header;
+	int i, j, k, path_size, device_support;
+	int connector_type;
+	u16 igp_lane_info, conn_id, connector_object_id;
+	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_router router;
+	struct radeon_gpio_rec gpio;
+	struct radeon_hpd hpd;
+
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+		return false;
+
+	if (crev < 2)
+		return false;
+
+	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
+	con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+	enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usEncoderObjectTableOffset));
+	router_obj = (ATOM_OBJECT_TABLE *)
+		(ctx->bios + data_offset +
+		 le16_to_cpu(obj_header->usRouterObjectTableOffset));
+	device_support = le16_to_cpu(obj_header->usDeviceSupport);
+
+	path_size = 0;
+	for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
+		uint8_t *addr = (uint8_t *) path_obj->asDispPath;
+		ATOM_DISPLAY_OBJECT_PATH *path;
+		addr += path_size;
+		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+		path_size += le16_to_cpu(path->usSize);
+
+		if (device_support & le16_to_cpu(path->usDeviceTag)) {
+			uint8_t con_obj_id, con_obj_num, con_obj_type;
+
+			con_obj_id =
+			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+			    >> OBJECT_ID_SHIFT;
+			con_obj_num =
+			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+			    >> ENUM_ID_SHIFT;
+			con_obj_type =
+			    (le16_to_cpu(path->usConnObjectId) &
+			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+			/* TODO CV support */
+			if (le16_to_cpu(path->usDeviceTag) ==
+				ATOM_DEVICE_CV_SUPPORT)
+				continue;
+
+			/* IGP chips */
+			if ((rdev->flags & RADEON_IS_IGP) &&
+			    (con_obj_id ==
+			     CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
+				uint16_t igp_offset = 0;
+				ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
+
+				index =
+				    GetIndexIntoMasterTable(DATA,
+							    IntegratedSystemInfo);
+
+				if (atom_parse_data_header(ctx, index, &size, &frev,
+							   &crev, &igp_offset)) {
+
+					if (crev >= 2) {
+						igp_obj =
+							(ATOM_INTEGRATED_SYSTEM_INFO_V2
+							 *) (ctx->bios + igp_offset);
+
+						if (igp_obj) {
+							uint32_t slot_config, ct;
+
+							if (con_obj_num == 1)
+								slot_config =
+									igp_obj->
+									ulDDISlot1Config;
+							else
+								slot_config =
+									igp_obj->
+									ulDDISlot2Config;
+
+							ct = (slot_config >> 16) & 0xff;
+							connector_type =
+								object_connector_convert
+								[ct];
+							connector_object_id = ct;
+							igp_lane_info =
+								slot_config & 0xffff;
+						} else
+							continue;
+					} else
+						continue;
+				} else {
+					igp_lane_info = 0;
+					connector_type =
+						object_connector_convert[con_obj_id];
+					connector_object_id = con_obj_id;
+				}
+			} else {
+				igp_lane_info = 0;
+				connector_type =
+				    object_connector_convert[con_obj_id];
+				connector_object_id = con_obj_id;
+			}
+
+			if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+				continue;
+
+			router.ddc_valid = false;
+			router.cd_valid = false;
+			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
+
+				grph_obj_id =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+				grph_obj_num =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+				grph_obj_type =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+							ATOM_ENCODER_CAP_RECORD *cap_record;
+							u16 caps = 0;
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_ENCODER_CAP_RECORD_TYPE:
+									cap_record =(ATOM_ENCODER_CAP_RECORD *)
+										record;
+									caps = le16_to_cpu(cap_record->usEncoderCap);
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+							radeon_add_atom_encoder(dev,
+										encoder_obj,
+										le16_to_cpu
+										(path->
+										 usDeviceTag),
+										caps);
+						}
+					}
+				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
+					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
+						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+							ATOM_I2C_RECORD *i2c_record;
+							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+							ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
+							ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
+								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+							u8 *num_dst_objs = (u8 *)
+								((u8 *)router_src_dst_table + 1 +
+								 (router_src_dst_table->ucNumberOfSrc * 2));
+							u16 *dst_objs = (u16 *)(num_dst_objs + 1);
+							int enum_id;
+
+							router.router_id = router_obj_id;
+							for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
+								if (le16_to_cpu(path->usConnObjectId) ==
+								    le16_to_cpu(dst_objs[enum_id]))
+									break;
+							}
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_I2C_RECORD_TYPE:
+									i2c_record =
+										(ATOM_I2C_RECORD *)
+										record;
+									i2c_config =
+										(ATOM_I2C_ID_CONFIG_ACCESS *)
+										&i2c_record->sucI2cId;
+									router.i2c_info =
+										radeon_lookup_i2c_gpio(rdev,
+												       i2c_config->
+												       ucAccess);
+									router.i2c_addr = i2c_record->ucI2CAddr >> 1;
+									break;
+								case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
+									ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
+										record;
+									router.ddc_valid = true;
+									router.ddc_mux_type = ddc_path->ucMuxType;
+									router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+									router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+									break;
+								case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+									cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+										record;
+									router.cd_valid = true;
+									router.cd_mux_type = cd_path->ucMuxType;
+									router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+									router.cd_mux_state = cd_path->ucMuxState[enum_id];
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+						}
+					}
+				}
+			}
+
+			/* look up gpio for ddc, hpd */
+			ddc_bus.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
+			if ((le16_to_cpu(path->usDeviceTag) &
+			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
+				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+					if (le16_to_cpu(path->usConnObjectId) ==
+					    le16_to_cpu(con_obj->asObjects[j].
+							usObjectID)) {
+						ATOM_COMMON_RECORD_HEADER
+						    *record =
+						    (ATOM_COMMON_RECORD_HEADER
+						     *)
+						    (ctx->bios + data_offset +
+						     le16_to_cpu(con_obj->
+								 asObjects[j].
+								 usRecordOffset));
+						ATOM_I2C_RECORD *i2c_record;
+						ATOM_HPD_INT_RECORD *hpd_record;
+						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+
+						while (record->ucRecordSize > 0 &&
+						       record->ucRecordType > 0 &&
+						       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+							switch (record->ucRecordType) {
+							case ATOM_I2C_RECORD_TYPE:
+								i2c_record =
+								    (ATOM_I2C_RECORD *)
+									record;
+								i2c_config =
+									(ATOM_I2C_ID_CONFIG_ACCESS *)
+									&i2c_record->sucI2cId;
+								ddc_bus = radeon_lookup_i2c_gpio(rdev,
+												 i2c_config->
+												 ucAccess);
+								break;
+							case ATOM_HPD_INT_RECORD_TYPE:
+								hpd_record =
+									(ATOM_HPD_INT_RECORD *)
+									record;
+								gpio = radeon_atombios_lookup_gpio(rdev,
+											  hpd_record->ucHPDIntGPIOID);
+								hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+								hpd.plugged_state = hpd_record->ucPlugged_PinState;
+								break;
+							}
+							record =
+							    (ATOM_COMMON_RECORD_HEADER
+							     *) ((char *)record
+								 +
+								 record->
+								 ucRecordSize);
+						}
+						break;
+					}
+				}
+			}
+
+			/* needed for aux chan transactions */
+			ddc_bus.hpd = hpd.hpd;
+
+			conn_id = le16_to_cpu(path->usConnObjectId);
+
+			if (!radeon_atom_apply_quirks
+			    (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
+			     &ddc_bus, &conn_id, &hpd))
+				continue;
+
+			radeon_add_atom_connector(dev,
+						  conn_id,
+						  le16_to_cpu(path->
+							      usDeviceTag),
+						  connector_type, &ddc_bus,
+						  igp_lane_info,
+						  connector_object_id,
+						  &hpd,
+						  &router);
+
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	radeon_setup_mst_connector(dev);
+	return true;
+}
+
+static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
+						 int connector_type,
+						 uint16_t devices)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		return supported_devices_connector_object_id_convert
+			[connector_type];
+	} else if (((connector_type == DRM_MODE_CONNECTOR_DVII) ||
+		    (connector_type == DRM_MODE_CONNECTOR_DVID)) &&
+		   (devices & ATOM_DEVICE_DFP2_SUPPORT))  {
+		struct radeon_mode_info *mode_info = &rdev->mode_info;
+		struct atom_context *ctx = mode_info->atom_context;
+		int index = GetIndexIntoMasterTable(DATA, XTMDS_Info);
+		uint16_t size, data_offset;
+		uint8_t frev, crev;
+		ATOM_XTMDS_INFO *xtmds;
+
+		if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
+			xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+
+			if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+			} else {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+			}
+		} else
+			return supported_devices_connector_object_id_convert
+				[connector_type];
+	} else {
+		return supported_devices_connector_object_id_convert
+			[connector_type];
+	}
+}
+
+struct bios_connector {
+	bool valid;
+	uint16_t line_mux;
+	uint16_t devices;
+	int connector_type;
+	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_hpd hpd;
+};
+
+bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+								 drm_device
+								 *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
+	uint16_t size, data_offset;
+	uint8_t frev, crev;
+	uint16_t device_support;
+	uint8_t dac;
+	union atom_supported_devices *supported_devices;
+	int i, j, max_device;
+	struct bios_connector *bios_connectors;
+	size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
+	struct radeon_router router;
+
+	router.ddc_valid = false;
+	router.cd_valid = false;
+
+	bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+	if (!bios_connectors)
+		return false;
+
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+				    &data_offset)) {
+		kfree(bios_connectors);
+		return false;
+	}
+
+	supported_devices =
+	    (union atom_supported_devices *)(ctx->bios + data_offset);
+
+	device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
+
+	if (frev > 1)
+		max_device = ATOM_MAX_SUPPORTED_DEVICE;
+	else
+		max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+	for (i = 0; i < max_device; i++) {
+		ATOM_CONNECTOR_INFO_I2C ci =
+		    supported_devices->info.asConnInfo[i];
+
+		bios_connectors[i].valid = false;
+
+		if (!(device_support & (1 << i))) {
+			continue;
+		}
+
+		if (i == ATOM_DEVICE_CV_INDEX) {
+			DRM_DEBUG_KMS("Skipping Component Video\n");
+			continue;
+		}
+
+		bios_connectors[i].connector_type =
+		    supported_devices_connector_convert[ci.sucConnectorInfo.
+							sbfAccess.
+							bfConnectorType];
+
+		if (bios_connectors[i].connector_type ==
+		    DRM_MODE_CONNECTOR_Unknown)
+			continue;
+
+		dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
+
+		bios_connectors[i].line_mux =
+			ci.sucI2cId.ucAccess;
+
+		/* give tv unique connector ids */
+		if (i == ATOM_DEVICE_TV1_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 50;
+		} else if (i == ATOM_DEVICE_TV2_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 51;
+		} else if (i == ATOM_DEVICE_CV_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 52;
+		} else
+			bios_connectors[i].ddc_bus =
+			    radeon_lookup_i2c_gpio(rdev,
+						   bios_connectors[i].line_mux);
+
+		if ((crev > 1) && (frev > 1)) {
+			u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+			switch (isb) {
+			case 0x4:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+				break;
+			case 0xa:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+				break;
+			default:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+		} else {
+			if (i == ATOM_DEVICE_DFP1_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+			else if (i == ATOM_DEVICE_DFP2_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+			else
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+		}
+
+		/* Always set the connector type to VGA for CRT1/CRT2. if they are
+		 * shared with a DVI port, we'll pick up the DVI connector when we
+		 * merge the outputs.  Some bioses incorrectly list VGA ports as DVI.
+		 */
+		if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
+			bios_connectors[i].connector_type =
+			    DRM_MODE_CONNECTOR_VGA;
+
+		if (!radeon_atom_apply_quirks
+		    (dev, (1 << i), &bios_connectors[i].connector_type,
+		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+		     &bios_connectors[i].hpd))
+			continue;
+
+		bios_connectors[i].valid = true;
+		bios_connectors[i].devices = (1 << i);
+
+		if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
+			radeon_add_atom_encoder(dev,
+						radeon_get_encoder_enum(dev,
+								      (1 << i),
+								      dac),
+						(1 << i),
+						0);
+		else
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									(1 << i),
+									dac),
+						  (1 << i));
+	}
+
+	/* combine shared connectors */
+	for (i = 0; i < max_device; i++) {
+		if (bios_connectors[i].valid) {
+			for (j = 0; j < max_device; j++) {
+				if (bios_connectors[j].valid && (i != j)) {
+					if (bios_connectors[i].line_mux ==
+					    bios_connectors[j].line_mux) {
+						/* make sure not to combine LVDS */
+						if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+							bios_connectors[i].line_mux = 53;
+							bios_connectors[i].ddc_bus.valid = false;
+							continue;
+						}
+						if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+							bios_connectors[j].line_mux = 53;
+							bios_connectors[j].ddc_bus.valid = false;
+							continue;
+						}
+						/* combine analog and digital for DVI-I */
+						if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+						     (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
+						    ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+						     (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
+							bios_connectors[i].devices |=
+								bios_connectors[j].devices;
+							bios_connectors[i].connector_type =
+								DRM_MODE_CONNECTOR_DVII;
+							if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
+								bios_connectors[i].hpd =
+									bios_connectors[j].hpd;
+							bios_connectors[j].valid = false;
+						}
+					}
+				}
+			}
+		}
+	}
+
+	/* add the connectors */
+	for (i = 0; i < max_device; i++) {
+		if (bios_connectors[i].valid) {
+			uint16_t connector_object_id =
+				atombios_get_connector_object_id(dev,
+						      bios_connectors[i].connector_type,
+						      bios_connectors[i].devices);
+			radeon_add_atom_connector(dev,
+						  bios_connectors[i].line_mux,
+						  bios_connectors[i].devices,
+						  bios_connectors[i].
+						  connector_type,
+						  &bios_connectors[i].ddc_bus,
+						  0,
+						  connector_object_id,
+						  &bios_connectors[i].hpd,
+						  &router);
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	kfree(bios_connectors);
+	return true;
+}
+
+union firmware_info {
+	ATOM_FIRMWARE_INFO info;
+	ATOM_FIRMWARE_INFO_V1_2 info_12;
+	ATOM_FIRMWARE_INFO_V1_3 info_13;
+	ATOM_FIRMWARE_INFO_V1_4 info_14;
+	ATOM_FIRMWARE_INFO_V2_1 info_21;
+	ATOM_FIRMWARE_INFO_V2_2 info_22;
+};
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+			&frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
+			data_offset);
+		rdev->clock.vco_freq =
+			le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
+	}
+}
+
+bool radeon_atom_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	union firmware_info *firmware_info;
+	uint8_t frev, crev;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *dcpll = &rdev->clock.dcpll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	uint16_t data_offset;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		/* pixel clocks */
+		p1pll->reference_freq =
+		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		p1pll->reference_div = 0;
+
+		if ((frev < 2) && (crev < 2))
+			p1pll->pll_out_min =
+				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+		else
+			p1pll->pll_out_min =
+				le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+		p1pll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+
+		if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
+			p1pll->lcd_pll_out_min =
+				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_min == 0)
+				p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max =
+				le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_max == 0)
+				p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		} else {
+			p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		}
+
+		if (p1pll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				p1pll->pll_out_min = 64800;
+			else
+				p1pll->pll_out_min = 20000;
+		}
+
+		p1pll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
+		p1pll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
+
+		*p2pll = *p1pll;
+
+		/* system clock */
+		if (ASIC_IS_DCE4(rdev))
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+		else
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
+		spll->reference_div = 0;
+
+		spll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
+		spll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
+
+		/* ??? */
+		if (spll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				spll->pll_out_min = 64800;
+			else
+				spll->pll_out_min = 20000;
+		}
+
+		spll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
+		spll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
+
+		/* memory clock */
+		if (ASIC_IS_DCE4(rdev))
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+		else
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
+		mpll->reference_div = 0;
+
+		mpll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
+		mpll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
+
+		/* ??? */
+		if (mpll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				mpll->pll_out_min = 64800;
+			else
+				mpll->pll_out_min = 20000;
+		}
+
+		mpll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
+		mpll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
+
+		rdev->clock.default_sclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
+		rdev->clock.default_mclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+
+		if (ASIC_IS_DCE4(rdev)) {
+			rdev->clock.default_dispclk =
+				le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+			if (rdev->clock.default_dispclk == 0) {
+				if (ASIC_IS_DCE6(rdev))
+					rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+				else if (ASIC_IS_DCE5(rdev))
+					rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+				else
+					rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+			}
+			/* set a reasonable default for DP */
+			if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
+				DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+					 rdev->clock.default_dispclk / 100);
+				rdev->clock.default_dispclk = 60000;
+			}
+			rdev->clock.dp_extclk =
+				le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+			rdev->clock.current_dispclk = rdev->clock.default_dispclk;
+		}
+		*dcpll = *p1pll;
+
+		rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+		if (rdev->clock.max_pixel_clock == 0)
+			rdev->clock.max_pixel_clock = 40000;
+
+		/* not technically a clock, but... */
+		rdev->mode_info.firmware_flags =
+			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
+		if (ASIC_IS_DCE8(rdev))
+			rdev->clock.vco_freq =
+				le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
+		else if (ASIC_IS_DCE5(rdev))
+			rdev->clock.vco_freq = rdev->clock.current_dispclk;
+		else if (ASIC_IS_DCE41(rdev))
+			radeon_atombios_get_dentist_vco_freq(rdev);
+		else
+			rdev->clock.vco_freq = rdev->clock.current_dispclk;
+
+		if (rdev->clock.vco_freq == 0)
+			rdev->clock.vco_freq = 360000;	/* 3.6 GHz */
+
+		return true;
+	}
+
+	return false;
+}
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+
+	/* sideport is AMD only */
+	if (rdev->family == CHIP_RS600)
+		return false;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
+				      data_offset);
+		switch (crev) {
+		case 1:
+			if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
+				return true;
+			break;
+		case 2:
+			if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
+				return true;
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+	}
+	return false;
+}
+
+bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+				   struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
+	uint16_t data_offset;
+	struct _ATOM_TMDS_INFO *tmds_info;
+	uint8_t frev, crev;
+	uint16_t maxfreq;
+	int i;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		tmds_info =
+			(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+						   data_offset);
+
+		maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
+		for (i = 0; i < 4; i++) {
+			tmds->tmds_pll[i].freq =
+			    le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
+			tmds->tmds_pll[i].value =
+			    tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_VCO_Gain & 0x3f) << 6;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_DutyCycle & 0xf) << 12;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_VoltageSwing & 0xf) << 16;
+
+			DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n",
+				  tmds->tmds_pll[i].freq,
+				  tmds->tmds_pll[i].value);
+
+			if (maxfreq == tmds->tmds_pll[i].freq) {
+				tmds->tmds_pll[i].freq = 0xffffffff;
+				break;
+			}
+		}
+		return true;
+	}
+	return false;
+}
+
+bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+				      struct radeon_atom_ss *ss,
+				      int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
+	uint16_t data_offset, size;
+	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+	struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
+	uint8_t frev, crev;
+	int i, num_indices;
+
+	memset(ss, 0, sizeof(struct radeon_atom_ss));
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		ss_info =
+			(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+		ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+			((u8 *)&ss_info->asSS_Info[0]);
+		for (i = 0; i < num_indices; i++) {
+			if (ss_assign->ucSS_Id == id) {
+				ss->percentage =
+					le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
+				ss->type = ss_assign->ucSpreadSpectrumType;
+				ss->step = ss_assign->ucSS_Step;
+				ss->delay = ss_assign->ucSS_Delay;
+				ss->range = ss_assign->ucSS_Range;
+				ss->refdiv = ss_assign->ucRecommendedRef_Div;
+				return true;
+			}
+			ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+				((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
+		}
+	}
+	return false;
+}
+
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+						 struct radeon_atom_ss *ss,
+						 int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	u16 data_offset, size;
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 percentage = 0, rate = 0;
+
+	/* get any igp specific overrides */
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)
+			(mode_info->atom_context->bios + data_offset);
+		switch (crev) {
+		case 6:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		case 7:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		case 8:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+		if (percentage)
+			ss->percentage = percentage;
+		if (rate)
+			ss->rate = rate;
+	}
+}
+
+union asic_ss_info {
+	struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+union asic_ss_assignment {
+	struct _ATOM_ASIC_SS_ASSIGNMENT v1;
+	struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
+	struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
+};
+
+bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+				      struct radeon_atom_ss *ss,
+				      int id, u32 clock)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+	uint16_t data_offset, size;
+	union asic_ss_info *ss_info;
+	union asic_ss_assignment *ss_assign;
+	uint8_t frev, crev;
+	int i, num_indices;
+
+	if (id == ASIC_INTERNAL_MEMORY_SS) {
+		if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
+			return false;
+	}
+	if (id == ASIC_INTERNAL_ENGINE_SS) {
+		if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
+			return false;
+	}
+
+	memset(ss, 0, sizeof(struct radeon_atom_ss));
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+
+		ss_info =
+			(union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 1:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_assign->v1.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v1.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
+					ss->percentage_divider = 100;
+					return true;
+				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
+			}
+			break;
+		case 2:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_assign->v2.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v2.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
+					ss->percentage_divider = 100;
+					if ((crev == 2) &&
+					    ((id == ASIC_INTERNAL_ENGINE_SS) ||
+					     (id == ASIC_INTERNAL_MEMORY_SS)))
+						ss->rate /= 100;
+					return true;
+				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
+			}
+			break;
+		case 3:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_assign->v3.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v3.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
+					if (ss_assign->v3.ucSpreadSpectrumMode &
+					    SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
+						ss->percentage_divider = 1000;
+					else
+						ss->percentage_divider = 100;
+					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
+					    (id == ASIC_INTERNAL_MEMORY_SS))
+						ss->rate /= 100;
+					if (rdev->flags & RADEON_IS_IGP)
+						radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
+					return true;
+				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+			break;
+		}
+
+	}
+	return false;
+}
+
+union lvds_info {
+	struct _ATOM_LVDS_INFO info;
+	struct _ATOM_LVDS_INFO_V12 info_12;
+};
+
+struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+							      radeon_encoder
+							      *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+	uint16_t data_offset, misc;
+	union lvds_info *lvds_info;
+	uint8_t frev, crev;
+	struct radeon_encoder_atom_dig *lvds = NULL;
+	int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		lvds_info =
+			(union lvds_info *)(mode_info->atom_context->bios + data_offset);
+		lvds =
+		    kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+		if (!lvds)
+			return NULL;
+
+		lvds->native_mode.clock =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
+		lvds->native_mode.hdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
+		lvds->native_mode.vdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
+		lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+		lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+		lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+		lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+		lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+		lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+		lvds->panel_pwr_delay =
+		    le16_to_cpu(lvds_info->info.usOffDelayInMs);
+		lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
+
+		misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+		lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
+
+		/* set crtc values */
+		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+		lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
+
+		encoder->native_mode = lvds->native_mode;
+
+		if (encoder_enum == 2)
+			lvds->linkb = true;
+		else
+			lvds->linkb = false;
+
+		/* parse the lcd record table */
+		if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
+			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+			bool bad_record = false;
+			u8 *record;
+
+			if ((frev == 1) && (crev < 2))
+				/* absolute */
+				record = (u8 *)(mode_info->atom_context->bios +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			else
+				/* relative */
+				record = (u8 *)(mode_info->atom_context->bios +
+						data_offset +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			while (*record != ATOM_RECORD_END_TYPE) {
+				switch (*record) {
+				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+					record += sizeof(ATOM_PATCH_RECORD_MODE);
+					break;
+				case LCD_RTS_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_RTS_RECORD);
+					break;
+				case LCD_CAP_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+					break;
+				case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+					fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+					if (fake_edid_record->ucFakeEDIDLength) {
+						struct edid *edid;
+						int edid_size =
+							max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+						edid = kmalloc(edid_size, GFP_KERNEL);
+						if (edid) {
+							memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+							       fake_edid_record->ucFakeEDIDLength);
+
+							if (drm_edid_is_valid(edid)) {
+								rdev->mode_info.bios_hardcoded_edid = edid;
+								rdev->mode_info.bios_hardcoded_edid_size = edid_size;
+							} else
+								kfree(edid);
+						}
+					}
+					record += fake_edid_record->ucFakeEDIDLength ?
+						fake_edid_record->ucFakeEDIDLength + 2 :
+						sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+					break;
+				case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+					panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+					lvds->native_mode.width_mm = panel_res_record->usHSize;
+					lvds->native_mode.height_mm = panel_res_record->usVSize;
+					record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+					break;
+				default:
+					DRM_ERROR("Bad LCD record %d\n", *record);
+					bad_record = true;
+					break;
+				}
+				if (bad_record)
+					break;
+			}
+		}
+	}
+	return lvds;
+}
+
+struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+	uint16_t data_offset;
+	struct _COMPASSIONATE_DATA *dac_info;
+	uint8_t frev, crev;
+	uint8_t bg, dac;
+	struct radeon_encoder_primary_dac *p_dac = NULL;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			(mode_info->atom_context->bios + data_offset);
+
+		p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
+
+		if (!p_dac)
+			return NULL;
+
+		bg = dac_info->ucDAC1_BG_Adjustment;
+		dac = dac_info->ucDAC1_DAC_Adjustment;
+		p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+
+	}
+	return p_dac;
+}
+
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+				struct drm_display_mode *mode)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	ATOM_ANALOG_TV_INFO *tv_info;
+	ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2;
+	ATOM_DTD_FORMAT *dtd_timings;
+	int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+	u8 frev, crev;
+	u16 data_offset, misc;
+
+	if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
+				    &frev, &crev, &data_offset))
+		return false;
+
+	switch (crev) {
+	case 1:
+		tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+		if (index >= MAX_SUPPORTED_TV_TIMING)
+			return false;
+
+		mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
+		mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
+		mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
+		mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) +
+			le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
+
+		mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
+		mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
+		mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
+		mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) +
+			le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
+
+		mode->flags = 0;
+		misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			mode->flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			mode->flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		mode->crtc_clock = mode->clock =
+			le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+
+		if (index == 1) {
+			/* PAL timings appear to have wrong values for totals */
+			mode->crtc_htotal -= 1;
+			mode->crtc_vtotal -= 1;
+		}
+		break;
+	case 2:
+		tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
+		if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
+			return false;
+
+		dtd_timings = &tv_info_v1_2->aModeTimings[index];
+		mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) +
+			le16_to_cpu(dtd_timings->usHBlanking_Time);
+		mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive);
+		mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) +
+			le16_to_cpu(dtd_timings->usHSyncOffset);
+		mode->crtc_hsync_end = mode->crtc_hsync_start +
+			le16_to_cpu(dtd_timings->usHSyncWidth);
+
+		mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) +
+			le16_to_cpu(dtd_timings->usVBlanking_Time);
+		mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive);
+		mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) +
+			le16_to_cpu(dtd_timings->usVSyncOffset);
+		mode->crtc_vsync_end = mode->crtc_vsync_start +
+			le16_to_cpu(dtd_timings->usVSyncWidth);
+
+		mode->flags = 0;
+		misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			mode->flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			mode->flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		mode->crtc_clock = mode->clock =
+			le16_to_cpu(dtd_timings->usPixClk) * 10;
+		break;
+	}
+	return true;
+}
+
+enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+	uint16_t data_offset;
+	uint8_t frev, crev;
+	struct _ATOM_ANALOG_TV_INFO *tv_info;
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+
+		tv_info = (struct _ATOM_ANALOG_TV_INFO *)
+			(mode_info->atom_context->bios + data_offset);
+
+		switch (tv_info->ucTV_BootUpDefaultStandard) {
+		case ATOM_TV_NTSC:
+			tv_std = TV_STD_NTSC;
+			DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+			break;
+		case ATOM_TV_NTSCJ:
+			tv_std = TV_STD_NTSC_J;
+			DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+			break;
+		case ATOM_TV_PAL:
+			tv_std = TV_STD_PAL;
+			DRM_DEBUG_KMS("Default TV standard: PAL\n");
+			break;
+		case ATOM_TV_PALM:
+			tv_std = TV_STD_PAL_M;
+			DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+			break;
+		case ATOM_TV_PALN:
+			tv_std = TV_STD_PAL_N;
+			DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
+			break;
+		case ATOM_TV_PALCN:
+			tv_std = TV_STD_PAL_CN;
+			DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
+			break;
+		case ATOM_TV_PAL60:
+			tv_std = TV_STD_PAL_60;
+			DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+			break;
+		case ATOM_TV_SECAM:
+			tv_std = TV_STD_SECAM;
+			DRM_DEBUG_KMS("Default TV standard: SECAM\n");
+			break;
+		default:
+			tv_std = TV_STD_NTSC;
+			DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
+			break;
+		}
+	}
+	return tv_std;
+}
+
+struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+	uint16_t data_offset;
+	struct _COMPASSIONATE_DATA *dac_info;
+	uint8_t frev, crev;
+	uint8_t bg, dac;
+	struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			(mode_info->atom_context->bios + data_offset);
+
+		tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+		if (!tv_dac)
+			return NULL;
+
+		bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
+		dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
+		tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+		bg = dac_info->ucDAC2_PAL_BG_Adjustment;
+		dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
+		tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+		bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
+		dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
+		tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+
+		tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
+	}
+	return tv_dac;
+}
+
+static const char *thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm63", /* lm64 */
+	"f75375",
+	"asc7xxx",
+};
+
+static const char *pp_lib_thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm63", /* lm64 */
+	"f75375",
+	"RV6xx",
+	"RV770",
+	"adt7473",
+	"NONE",
+	"External GPIO",
+	"Evergreen",
+	"emc2103",
+	"Sumo",
+	"Northern Islands",
+	"Southern Islands",
+	"lm96163",
+	"Sea Islands",
+};
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+						 int state_index,
+						 u32 misc, u32 misc2)
+{
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	/* order matters! */
+	if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_POWERSAVE;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	}
+	if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[0];
+	} else if (state_index == 0) {
+		rdev->pm.power_state[state_index].clock_info[0].flags |=
+			RADEON_PM_MODE_NO_DISPLAY;
+	}
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	u32 misc, misc2 = 0;
+	int num_modes = 0, i;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	/* add the i2c bus for thermal/fan chip */
+	if ((power_info->info.ucOverdriveThermalController > 0) &&
+	    (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
+		DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+			 thermal_controller_names[power_info->info.ucOverdriveThermalController],
+			 power_info->info.ucOverdriveControllerAddress >> 1);
+		i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+		rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		if (rdev->pm.i2c_bus) {
+			struct i2c_board_info info = { };
+			const char *name = thermal_controller_names[power_info->info.
+								    ucOverdriveThermalController];
+			info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+			strlcpy(info.type, name, sizeof(info.type));
+			i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+		}
+	}
+	num_modes = power_info->info.ucNumOfPowerModeEntries;
+	if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+		num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+	if (num_modes == 0)
+		return state_index;
+	rdev->pm.power_state = kcalloc(num_modes,
+				       sizeof(struct radeon_power_state),
+				       GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
+	/* last mode is usually default, array is low to high */
+	for (i = 0; i < num_modes; i++) {
+		rdev->pm.power_state[state_index].clock_info =
+			kcalloc(1, sizeof(struct radeon_pm_clock_info),
+				GFP_KERNEL);
+		if (!rdev->pm.power_state[state_index].clock_info)
+			return state_index;
+		rdev->pm.power_state[state_index].num_clock_modes = 1;
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+		switch (frev) {
+		case 1:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_atombios_lookup_gpio(rdev,
+							   power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+			state_index++;
+			break;
+		case 2:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_atombios_lookup_gpio(rdev,
+							   power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		case 3:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_atombios_lookup_gpio(rdev,
+							   power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+				if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+						true;
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+						power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+				}
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		}
+	}
+	/* last mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[state_index - 1].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index - 1;
+		rdev->pm.power_state[state_index - 1].default_clock_mode =
+			&rdev->pm.power_state[state_index - 1].clock_info[0];
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+		rdev->pm.power_state[state_index].misc = 0;
+		rdev->pm.power_state[state_index].misc2 = 0;
+	}
+	return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+							 ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* add the i2c bus for thermal/fan chip */
+	if (controller->ucType > 0) {
+		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+			rdev->pm.no_fan = true;
+		rdev->pm.fan_pulses_per_revolution =
+			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+		if (rdev->pm.fan_pulses_per_revolution) {
+			rdev->pm.fan_min_rpm = controller->ucFanMinRPM;
+			rdev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+		}
+		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
+		} else if (controller->ucType ==
+			   ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+			DRM_INFO("External GPIO thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+		} else if (controller->ucType ==
+			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+		} else if (controller->ucType ==
+			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
+		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+				 pp_lib_thermal_controller_names[controller->ucType],
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
+			i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = pp_lib_thermal_controller_names[controller->ucType];
+				info.addr = controller->ucI2cAddress >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+			}
+		} else {
+			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+				 controller->ucType,
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+		}
+	}
+}
+
+void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+					  u16 *vddc, u16 *vddci, u16 *mvdd)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	u8 frev, crev;
+	u16 data_offset;
+	union firmware_info *firmware_info;
+
+	*vddc = 0;
+	*vddci = 0;
+	*mvdd = 0;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+		if ((frev == 2) && (crev >= 2)) {
+			*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+			*mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
+		}
+	}
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+						       int state_index, int mode_index,
+						       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+	int j;
+	u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+	u16 vddc, vddci, mvdd;
+
+	radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
+
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	rdev->pm.power_state[state_index].pcie_lanes =
+		((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+		 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+	switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+		if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	}
+	rdev->pm.power_state[state_index].flags = 0;
+	if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+		rdev->pm.power_state[state_index].flags |=
+			RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+		if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
+			/* NI chips post without MC ucode, so default clocks are strobe mode only */
+			rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+			rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+			rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+			rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
+		} else {
+			u16 max_vddci = 0;
+
+			if (ASIC_IS_DCE4(rdev))
+				radeon_atom_get_max_voltage(rdev,
+							    SET_VOLTAGE_TYPE_ASIC_VDDCI,
+							    &max_vddci);
+			/* patch the table values with the default sclk/mclk from firmware info */
+			for (j = 0; j < mode_index; j++) {
+				rdev->pm.power_state[state_index].clock_info[j].mclk =
+					rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[j].sclk =
+					rdev->clock.default_sclk;
+				if (vddc)
+					rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+						vddc;
+				if (max_vddci)
+					rdev->pm.power_state[state_index].clock_info[j].voltage.vddci =
+						max_vddci;
+			}
+		}
+	}
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+						   int state_index, int mode_index,
+						   union pplib_clock_info *clock_info)
+{
+	u32 sclk, mclk;
+	u16 vddc;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family >= CHIP_PALM) {
+			sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+			sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		} else {
+			sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+			sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		}
+	} else if (rdev->family >= CHIP_BONAIRE) {
+		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+		sclk |= clock_info->ci.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_NONE;
+	} else if (rdev->family >= CHIP_TAHITI) {
+		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+		sclk |= clock_info->si.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+		mclk |= clock_info->si.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->si.usVDDC);
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+			le16_to_cpu(clock_info->si.usVDDCI);
+	} else if (rdev->family >= CHIP_CEDAR) {
+		sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+		sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+		mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->evergreen.usVDDC);
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+			le16_to_cpu(clock_info->evergreen.usVDDCI);
+	} else {
+		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+		sclk |= clock_info->r600.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+		mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->r600.usVDDC);
+	}
+
+	/* patch up vddc if necessary */
+	switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) {
+	case ATOM_VIRTUAL_VOLTAGE_ID0:
+	case ATOM_VIRTUAL_VOLTAGE_ID1:
+	case ATOM_VIRTUAL_VOLTAGE_ID2:
+	case ATOM_VIRTUAL_VOLTAGE_ID3:
+	case ATOM_VIRTUAL_VOLTAGE_ID4:
+	case ATOM_VIRTUAL_VOLTAGE_ID5:
+	case ATOM_VIRTUAL_VOLTAGE_ID6:
+	case ATOM_VIRTUAL_VOLTAGE_ID7:
+		if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
+					     rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
+					     &vddc) == 0)
+			rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+		break;
+	default:
+		break;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* skip invalid modes */
+		if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+			return false;
+	} else {
+		/* skip invalid modes */
+		if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+		    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+			return false;
+	}
+	return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	if (power_info->pplib.ucNumStates == 0)
+		return state_index;
+	rdev->pm.power_state = kcalloc(power_info->pplib.ucNumStates,
+				       sizeof(struct radeon_power_state),
+				       GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
+	/* first mode is usually default, followed by low to high */
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		rdev->pm.power_state[i].clock_info =
+			kcalloc((power_info->pplib.ucStateEntrySize - 1) ?
+				(power_info->pplib.ucStateEntrySize - 1) : 1,
+				sizeof(struct radeon_pm_clock_info),
+				GFP_KERNEL);
+		if (!rdev->pm.power_state[i].clock_info)
+			return state_index;
+		if (power_info->pplib.ucStateEntrySize - 1) {
+			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+				clock_info = (union pplib_clock_info *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+					 (power_state->v1.ucClockStateIndices[j] *
+					  power_info->pplib.ucClockInfoSize));
+				valid = radeon_atombios_parse_pplib_clock_info(rdev,
+									       state_index, mode_index,
+									       clock_info);
+				if (valid)
+					mode_index++;
+			}
+		} else {
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				rdev->clock.default_sclk;
+			mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, non_clock_array_index, clock_array_index;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+	if (state_array->ucNumEntries == 0)
+		return state_index;
+	rdev->pm.power_state = kcalloc(state_array->ucNumEntries,
+				       sizeof(struct radeon_power_state),
+				       GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		rdev->pm.power_state[i].clock_info =
+			kcalloc(power_state->v2.ucNumDPMLevels ?
+				power_state->v2.ucNumDPMLevels : 1,
+				sizeof(struct radeon_pm_clock_info),
+				GFP_KERNEL);
+		if (!rdev->pm.power_state[i].clock_info)
+			return state_index;
+		if (power_state->v2.ucNumDPMLevels) {
+			for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+				clock_array_index = power_state->v2.clockInfoIndex[j];
+				clock_info = (union pplib_clock_info *)
+					&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+				valid = radeon_atombios_parse_pplib_clock_info(rdev,
+									       state_index, mode_index,
+									       clock_info);
+				if (valid)
+					mode_index++;
+			}
+		} else {
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				rdev->clock.default_sclk;
+			mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	int state_index = 0;
+
+	rdev->pm.default_power_state_index = -1;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		switch (frev) {
+		case 1:
+		case 2:
+		case 3:
+			state_index = radeon_atombios_parse_power_table_1_3(rdev);
+			break;
+		case 4:
+		case 5:
+			state_index = radeon_atombios_parse_power_table_4_5(rdev);
+			break;
+		case 6:
+			state_index = radeon_atombios_parse_power_table_6(rdev);
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (state_index == 0) {
+		rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+		if (rdev->pm.power_state) {
+			rdev->pm.power_state[0].clock_info =
+				kcalloc(1,
+				        sizeof(struct radeon_pm_clock_info),
+				        GFP_KERNEL);
+			if (rdev->pm.power_state[0].clock_info) {
+				/* add the default mode */
+				rdev->pm.power_state[state_index].type =
+					POWER_STATE_TYPE_DEFAULT;
+				rdev->pm.power_state[state_index].num_clock_modes = 1;
+				rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+				rdev->pm.power_state[state_index].default_clock_mode =
+					&rdev->pm.power_state[state_index].clock_info[0];
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+				rdev->pm.power_state[state_index].pcie_lanes = 16;
+				rdev->pm.default_power_state_index = state_index;
+				rdev->pm.power_state[state_index].flags = 0;
+				state_index++;
+			}
+		}
+	}
+
+	rdev->pm.num_power_states = state_index;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	if (rdev->pm.default_power_state_index >= 0)
+		rdev->pm.current_vddc =
+			rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+	else
+		rdev->pm.current_vddc = 0;
+}
+
+union get_clock_dividers {
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
+	struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
+	struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
+};
+
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+				   u8 clock_type,
+				   u32 clock,
+				   bool strobe_mode,
+				   struct atom_clock_dividers *dividers)
+{
+	union get_clock_dividers args;
+	int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
+	u8 frev, crev;
+
+	memset(&args, 0, sizeof(args));
+	memset(dividers, 0, sizeof(struct atom_clock_dividers));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		/* r4xx, r5xx */
+		args.v1.ucAction = clock_type;
+		args.v1.ulClock = cpu_to_le32(clock);	/* 10 khz */
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		dividers->post_div = args.v1.ucPostDiv;
+		dividers->fb_div = args.v1.ucFbDiv;
+		dividers->enable_post_div = true;
+		break;
+	case 2:
+	case 3:
+	case 5:
+		/* r6xx, r7xx, evergreen, ni, si */
+		if (rdev->family <= CHIP_RV770) {
+			args.v2.ucAction = clock_type;
+			args.v2.ulClock = cpu_to_le32(clock);	/* 10 khz */
+
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			dividers->post_div = args.v2.ucPostDiv;
+			dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
+			dividers->ref_div = args.v2.ucAction;
+			if (rdev->family == CHIP_RV770) {
+				dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
+					true : false;
+				dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
+			} else
+				dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
+		} else {
+			if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+				args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+				dividers->post_div = args.v3.ucPostDiv;
+				dividers->enable_post_div = (args.v3.ucCntlFlag &
+							     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+				dividers->enable_dithen = (args.v3.ucCntlFlag &
+							   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+				dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+				dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+				dividers->ref_div = args.v3.ucRefDiv;
+				dividers->vco_mode = (args.v3.ucCntlFlag &
+						      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+			} else {
+				/* for SI we use ComputeMemoryClockParam for memory plls */
+				if (rdev->family >= CHIP_TAHITI)
+					return -EINVAL;
+				args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+				if (strobe_mode)
+					args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+				dividers->post_div = args.v5.ucPostDiv;
+				dividers->enable_post_div = (args.v5.ucCntlFlag &
+							     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+				dividers->enable_dithen = (args.v5.ucCntlFlag &
+							   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+				dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+				dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+				dividers->ref_div = args.v5.ucRefDiv;
+				dividers->vco_mode = (args.v5.ucCntlFlag &
+						      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+			}
+		}
+		break;
+	case 4:
+		/* fusion */
+		args.v4.ulClock = cpu_to_le32(clock);	/* 10 khz */
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
+		dividers->real_clock = le32_to_cpu(args.v4.ulClock);
+		break;
+	case 6:
+		/* CI */
+		/* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
+		args.v6_in.ulClock.ulComputeClockFlag = clock_type;
+		args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock);	/* 10 khz */
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
+		dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
+		dividers->ref_div = args.v6_out.ucPllRefDiv;
+		dividers->post_div = args.v6_out.ucPllPostDiv;
+		dividers->flags = args.v6_out.ucPllCntlFlag;
+		dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
+		dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
+					u32 clock,
+					bool strobe_mode,
+					struct atom_mpll_param *mpll_param)
+{
+	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
+	int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
+	u8 frev, crev;
+
+	memset(&args, 0, sizeof(args));
+	memset(mpll_param, 0, sizeof(struct atom_mpll_param));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (frev) {
+	case 2:
+		switch (crev) {
+		case 1:
+			/* SI */
+			args.ulClock = cpu_to_le32(clock);	/* 10 khz */
+			args.ucInputFlag = 0;
+			if (strobe_mode)
+				args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
+
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
+			mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
+			mpll_param->post_div = args.ucPostDiv;
+			mpll_param->dll_speed = args.ucDllSpeed;
+			mpll_param->bwcntl = args.ucBWCntl;
+			mpll_param->vco_mode =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
+			mpll_param->yclk_sel =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
+			mpll_param->qdr =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
+			mpll_param->half_rate =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+	DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+
+	args.ucEnable = enable;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
+{
+	GET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnEngineClock);
+}
+
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
+{
+	GET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnMemoryClock);
+}
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev,
+				  uint32_t eng_clock)
+{
+	SET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
+
+	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_memory_clock(struct radeon_device *rdev,
+				  uint32_t mem_clock)
+{
+	SET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
+					 u32 eng_clock, u32 mem_clock)
+{
+	SET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
+	u32 tmp;
+
+	memset(&args, 0, sizeof(args));
+
+	tmp = eng_clock & SET_CLOCK_FREQ_MASK;
+	tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
+
+	args.ulTargetEngineClock = cpu_to_le32(tmp);
+	if (mem_clock)
+		args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_update_memory_dll(struct radeon_device *rdev,
+				   u32 mem_clock)
+{
+	u32 args;
+	int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
+
+	args = cpu_to_le32(mem_clock);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_ac_timing(struct radeon_device *rdev,
+			       u32 mem_clock)
+{
+	SET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
+	u32 tmp = mem_clock | (COMPUTE_MEMORY_PLL_PARAM << 24);
+
+	args.ulTargetMemoryClock = cpu_to_le32(tmp);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union set_voltage {
+	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+	struct _SET_VOLTAGE_PARAMETERS v1;
+	struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
+};
+
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev, volt_index = voltage_level;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	/* 0xff01 is a flag rather then an actual voltage */
+	if (voltage_level == 0xff01)
+		return;
+
+	switch (crev) {
+	case 1:
+		args.v1.ucVoltageType = voltage_type;
+		args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+		args.v1.ucVoltageIndex = volt_index;
+		break;
+	case 2:
+		args.v2.ucVoltageType = voltage_type;
+		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+			     u16 voltage_id, u16 *voltage)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+		args.v2.ucVoltageMode = 0;
+		args.v2.usVoltageLevel = 0;
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v2.usVoltageLevel);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v3.usVoltageLevel);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
+						      u16 *voltage,
+						      u16 leakage_idx)
+{
+	return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
+}
+
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+					  u16 *leakage_id)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 3:
+	case 4:
+		args.v3.ucVoltageType = 0;
+		args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+		args.v3.usVoltageLevel = 0;
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+							 u16 *vddc, u16 *vddci,
+							 u16 virtual_voltage_id,
+							 u16 vbios_voltage_id)
+{
+	int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	int i, j;
+	ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+	u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+
+	*vddc = 0;
+	*vddci = 0;
+
+	if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				    &frev, &crev, &data_offset))
+		return -EINVAL;
+
+	profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+		(rdev->mode_info.atom_context->bios + data_offset);
+
+	switch (frev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		switch (crev) {
+		case 1:
+			if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
+				return -EINVAL;
+			leakage_bin = (u16 *)
+				(rdev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usLeakageBinArrayOffset));
+			vddc_id_buf = (u16 *)
+				(rdev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
+			vddc_buf = (u16 *)
+				(rdev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
+			vddci_id_buf = (u16 *)
+				(rdev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
+			vddci_buf = (u16 *)
+				(rdev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
+
+			if (profile->ucElbVDDC_Num > 0) {
+				for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+					if (vddc_id_buf[i] == virtual_voltage_id) {
+						for (j = 0; j < profile->ucLeakageBinNum; j++) {
+							if (vbios_voltage_id <= leakage_bin[j]) {
+								*vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+								break;
+							}
+						}
+						break;
+					}
+				}
+			}
+			if (profile->ucElbVDDCI_Num > 0) {
+				for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+					if (vddci_id_buf[i] == virtual_voltage_id) {
+						for (j = 0; j < profile->ucLeakageBinNum; j++) {
+							if (vbios_voltage_id <= leakage_bin[j]) {
+								*vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
+								break;
+							}
+						}
+						break;
+					}
+				}
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+union get_voltage_info {
+	struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
+	struct  _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
+};
+
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+				u16 virtual_voltage_id,
+				u16 *voltage)
+{
+	int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
+	u32 entry_id;
+	u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
+	union get_voltage_info args;
+
+	for (entry_id = 0; entry_id < count; entry_id++) {
+		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
+		    virtual_voltage_id)
+			break;
+	}
+
+	if (entry_id >= count)
+		return -EINVAL;
+
+	args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+	args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+	args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
+	args.in.ulSCLKFreq =
+		cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	*voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
+
+	return 0;
+}
+
+int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
+					  u16 voltage_level, u8 voltage_type,
+					  u32 *gpio_value, u32 *gpio_mask)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		args.v2.ucVoltageType = voltage_type;
+		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
+		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
+
+		args.v2.ucVoltageType = voltage_type;
+		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
+		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*gpio_value = le32_to_cpu(*(u32 *)&args.v2);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+union voltage_object_info {
+	struct _ATOM_VOLTAGE_OBJECT_INFO v1;
+	struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
+	struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
+};
+
+union voltage_object {
+	struct _ATOM_VOLTAGE_OBJECT v1;
+	struct _ATOM_VOLTAGE_OBJECT_V2 v2;
+	union _ATOM_VOLTAGE_OBJECT_V3 v3;
+};
+
+static ATOM_VOLTAGE_OBJECT *atom_lookup_voltage_object_v1(ATOM_VOLTAGE_OBJECT_INFO *v1,
+							  u8 voltage_type)
+{
+	u32 size = le16_to_cpu(v1->sHeader.usStructureSize);
+	u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO, asVoltageObj[0]);
+	u8 *start = (u8 *)v1;
+
+	while (offset < size) {
+		ATOM_VOLTAGE_OBJECT *vo = (ATOM_VOLTAGE_OBJECT *)(start + offset);
+		if (vo->ucVoltageType == voltage_type)
+			return vo;
+		offset += offsetof(ATOM_VOLTAGE_OBJECT, asFormula.ucVIDAdjustEntries) +
+			vo->asFormula.ucNumOfVoltageEntries;
+	}
+	return NULL;
+}
+
+static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT_INFO_V2 *v2,
+							     u8 voltage_type)
+{
+	u32 size = le16_to_cpu(v2->sHeader.usStructureSize);
+	u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]);
+	u8 *start = (u8*)v2;
+
+	while (offset < size) {
+		ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset);
+		if (vo->ucVoltageType == voltage_type)
+			return vo;
+		offset += offsetof(ATOM_VOLTAGE_OBJECT_V2, asFormula.asVIDAdjustEntries) +
+			(vo->asFormula.ucNumOfVoltageEntries * sizeof(VOLTAGE_LUT_ENTRY));
+	}
+	return NULL;
+}
+
+static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
+							     u8 voltage_type, u8 voltage_mode)
+{
+	u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
+	u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
+	u8 *start = (u8*)v3;
+
+	while (offset < size) {
+		ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
+		if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
+		    (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
+			return vo;
+		offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
+	}
+	return NULL;
+}
+
+bool
+radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
+			    u8 voltage_type, u8 voltage_mode)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 1:
+		case 2:
+			switch (crev) {
+			case 1:
+				voltage_object = (union voltage_object *)
+					atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
+				if (voltage_object &&
+				    (voltage_object->v1.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
+					return true;
+				break;
+			case 2:
+				voltage_object = (union voltage_object *)
+					atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
+				if (voltage_object &&
+				    (voltage_object->v2.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
+					return true;
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return false;
+			}
+			break;
+		case 3:
+			switch (crev) {
+			case 1:
+				if (atom_lookup_voltage_object_v3(&voltage_info->v3,
+								  voltage_type, voltage_mode))
+					return true;
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return false;
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return false;
+		}
+
+	}
+	return false;
+}
+
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+			      u8 voltage_type,
+			      u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 3:
+			switch (crev) {
+			case 1:
+				voltage_object = (union voltage_object *)
+					atom_lookup_voltage_object_v3(&voltage_info->v3,
+								      voltage_type,
+								      VOLTAGE_OBJ_SVID2);
+				if (voltage_object) {
+					*svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+					*svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+				} else {
+					return -EINVAL;
+				}
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+
+	}
+	return 0;
+}
+
+int radeon_atom_get_max_voltage(struct radeon_device *rdev,
+				u8 voltage_type, u16 *max_voltage)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+
+		switch (crev) {
+		case 1:
+			voltage_object = (union voltage_object *)
+				atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
+			if (voltage_object) {
+				ATOM_VOLTAGE_FORMULA *formula =
+					&voltage_object->v1.asFormula;
+				if (formula->ucFlag & 1)
+					*max_voltage =
+						le16_to_cpu(formula->usVoltageBaseLevel) +
+						formula->ucNumOfVoltageEntries / 2 *
+						le16_to_cpu(formula->usVoltageStep);
+				else
+					*max_voltage =
+						le16_to_cpu(formula->usVoltageBaseLevel) +
+						(formula->ucNumOfVoltageEntries - 1) *
+						le16_to_cpu(formula->usVoltageStep);
+				return 0;
+			}
+			break;
+		case 2:
+			voltage_object = (union voltage_object *)
+				atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
+			if (voltage_object) {
+				ATOM_VOLTAGE_FORMULA_V2 *formula =
+					&voltage_object->v2.asFormula;
+				if (formula->ucNumOfVoltageEntries) {
+					VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
+						((u8 *)&formula->asVIDAdjustEntries[0] +
+						 (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
+					*max_voltage =
+						le16_to_cpu(lut->usVoltageValue);
+					return 0;
+				}
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+
+	}
+	return -EINVAL;
+}
+
+int radeon_atom_get_min_voltage(struct radeon_device *rdev,
+				u8 voltage_type, u16 *min_voltage)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+
+		switch (crev) {
+		case 1:
+			voltage_object = (union voltage_object *)
+				atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
+			if (voltage_object) {
+				ATOM_VOLTAGE_FORMULA *formula =
+					&voltage_object->v1.asFormula;
+				*min_voltage =
+					le16_to_cpu(formula->usVoltageBaseLevel);
+				return 0;
+			}
+			break;
+		case 2:
+			voltage_object = (union voltage_object *)
+				atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
+			if (voltage_object) {
+				ATOM_VOLTAGE_FORMULA_V2 *formula =
+					&voltage_object->v2.asFormula;
+				if (formula->ucNumOfVoltageEntries) {
+					*min_voltage =
+						le16_to_cpu(formula->asVIDAdjustEntries[
+								    0
+								    ].usVoltageValue);
+					return 0;
+				}
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+
+	}
+	return -EINVAL;
+}
+
+int radeon_atom_get_voltage_step(struct radeon_device *rdev,
+				 u8 voltage_type, u16 *voltage_step)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+
+		switch (crev) {
+		case 1:
+			voltage_object = (union voltage_object *)
+				atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
+			if (voltage_object) {
+				ATOM_VOLTAGE_FORMULA *formula =
+					&voltage_object->v1.asFormula;
+				if (formula->ucFlag & 1)
+					*voltage_step =
+						(le16_to_cpu(formula->usVoltageStep) + 1) / 2;
+				else
+					*voltage_step =
+						le16_to_cpu(formula->usVoltageStep);
+				return 0;
+			}
+			break;
+		case 2:
+			return -EINVAL;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+
+	}
+	return -EINVAL;
+}
+
+int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
+				      u8 voltage_type,
+				      u16 nominal_voltage,
+				      u16 *true_voltage)
+{
+	u16 min_voltage, max_voltage, voltage_step;
+
+	if (radeon_atom_get_max_voltage(rdev, voltage_type, &max_voltage))
+		return -EINVAL;
+	if (radeon_atom_get_min_voltage(rdev, voltage_type, &min_voltage))
+		return -EINVAL;
+	if (radeon_atom_get_voltage_step(rdev, voltage_type, &voltage_step))
+		return -EINVAL;
+
+	if (nominal_voltage <= min_voltage)
+		*true_voltage = min_voltage;
+	else if (nominal_voltage >= max_voltage)
+		*true_voltage = max_voltage;
+	else
+		*true_voltage = min_voltage +
+			((nominal_voltage - min_voltage) / voltage_step) *
+			voltage_step;
+
+	return 0;
+}
+
+int radeon_atom_get_voltage_table(struct radeon_device *rdev,
+				  u8 voltage_type, u8 voltage_mode,
+				  struct atom_voltage_table *voltage_table)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	int i, ret;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 1:
+		case 2:
+			switch (crev) {
+			case 1:
+				DRM_ERROR("old table version %d, %d\n", frev, crev);
+				return -EINVAL;
+			case 2:
+				voltage_object = (union voltage_object *)
+					atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
+				if (voltage_object) {
+					ATOM_VOLTAGE_FORMULA_V2 *formula =
+						&voltage_object->v2.asFormula;
+					VOLTAGE_LUT_ENTRY *lut;
+					if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
+						return -EINVAL;
+					lut = &formula->asVIDAdjustEntries[0];
+					for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
+						voltage_table->entries[i].value =
+							le16_to_cpu(lut->usVoltageValue);
+						ret = radeon_atom_get_voltage_gpio_settings(rdev,
+											    voltage_table->entries[i].value,
+											    voltage_type,
+											    &voltage_table->entries[i].smio_low,
+											    &voltage_table->mask_low);
+						if (ret)
+							return ret;
+						lut = (VOLTAGE_LUT_ENTRY *)
+							((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
+					}
+					voltage_table->count = formula->ucNumOfVoltageEntries;
+					return 0;
+				}
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return -EINVAL;
+			}
+			break;
+		case 3:
+			switch (crev) {
+			case 1:
+				voltage_object = (union voltage_object *)
+					atom_lookup_voltage_object_v3(&voltage_info->v3,
+								      voltage_type, voltage_mode);
+				if (voltage_object) {
+					ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
+						&voltage_object->v3.asGpioVoltageObj;
+					VOLTAGE_LUT_ENTRY_V2 *lut;
+					if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
+						return -EINVAL;
+					lut = &gpio->asVolGpioLut[0];
+					for (i = 0; i < gpio->ucGpioEntryNum; i++) {
+						voltage_table->entries[i].value =
+							le16_to_cpu(lut->usVoltageValue);
+						voltage_table->entries[i].smio_low =
+							le32_to_cpu(lut->ulVoltageId);
+						lut = (VOLTAGE_LUT_ENTRY_V2 *)
+							((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
+					}
+					voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
+					voltage_table->count = gpio->ucGpioEntryNum;
+					voltage_table->phase_delay = gpio->ucPhaseDelay;
+					return 0;
+				}
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+	}
+	return -EINVAL;
+}
+
+union vram_info {
+	struct _ATOM_VRAM_INFO_V3 v1_3;
+	struct _ATOM_VRAM_INFO_V4 v1_4;
+	struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
+};
+
+int radeon_atom_get_memory_info(struct radeon_device *rdev,
+				u8 module_index, struct atom_memory_info *mem_info)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
+	u8 frev, crev, i;
+	u16 data_offset, size;
+	union vram_info *vram_info;
+
+	memset(mem_info, 0, sizeof(struct atom_memory_info));
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		vram_info = (union vram_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+		switch (frev) {
+		case 1:
+			switch (crev) {
+			case 3:
+				/* r6xx */
+				if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
+					ATOM_VRAM_MODULE_V3 *vram_module =
+						(ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
+
+					for (i = 0; i < module_index; i++) {
+						if (le16_to_cpu(vram_module->usSize) == 0)
+							return -EINVAL;
+						vram_module = (ATOM_VRAM_MODULE_V3 *)
+							((u8 *)vram_module + le16_to_cpu(vram_module->usSize));
+					}
+					mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
+					mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
+				} else
+					return -EINVAL;
+				break;
+			case 4:
+				/* r7xx, evergreen */
+				if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
+					ATOM_VRAM_MODULE_V4 *vram_module =
+						(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
+
+					for (i = 0; i < module_index; i++) {
+						if (le16_to_cpu(vram_module->usModuleSize) == 0)
+							return -EINVAL;
+						vram_module = (ATOM_VRAM_MODULE_V4 *)
+							((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
+					}
+					mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
+					mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
+				} else
+					return -EINVAL;
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+				return -EINVAL;
+			}
+			break;
+		case 2:
+			switch (crev) {
+			case 1:
+				/* ni */
+				if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
+					ATOM_VRAM_MODULE_V7 *vram_module =
+						(ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
+
+					for (i = 0; i < module_index; i++) {
+						if (le16_to_cpu(vram_module->usModuleSize) == 0)
+							return -EINVAL;
+						vram_module = (ATOM_VRAM_MODULE_V7 *)
+							((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
+					}
+					mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
+					mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
+				} else
+					return -EINVAL;
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		}
+		return 0;
+	}
+	return -EINVAL;
+}
+
+int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
+				     bool gddr5, u8 module_index,
+				     struct atom_memory_clock_range_table *mclk_range_table)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
+	u8 frev, crev, i;
+	u16 data_offset, size;
+	union vram_info *vram_info;
+	u32 mem_timing_size = gddr5 ?
+		sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
+
+	memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		vram_info = (union vram_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+		switch (frev) {
+		case 1:
+			switch (crev) {
+			case 3:
+				DRM_ERROR("old table version %d, %d\n", frev, crev);
+				return -EINVAL;
+			case 4:
+				/* r7xx, evergreen */
+				if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
+					ATOM_VRAM_MODULE_V4 *vram_module =
+						(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
+					ATOM_MEMORY_TIMING_FORMAT *format;
+
+					for (i = 0; i < module_index; i++) {
+						if (le16_to_cpu(vram_module->usModuleSize) == 0)
+							return -EINVAL;
+						vram_module = (ATOM_VRAM_MODULE_V4 *)
+							((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
+					}
+					mclk_range_table->num_entries = (u8)
+						((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
+						 mem_timing_size);
+					format = &vram_module->asMemTiming[0];
+					for (i = 0; i < mclk_range_table->num_entries; i++) {
+						mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
+						format = (ATOM_MEMORY_TIMING_FORMAT *)
+							((u8 *)format + mem_timing_size);
+					}
+				} else
+					return -EINVAL;
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+				return -EINVAL;
+			}
+			break;
+		case 2:
+			DRM_ERROR("new table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		}
+		return 0;
+	}
+	return -EINVAL;
+}
+
+#define MEM_ID_MASK           0xff000000
+#define MEM_ID_SHIFT          24
+#define CLOCK_RANGE_MASK      0x00ffffff
+#define CLOCK_RANGE_SHIFT     0
+#define LOW_NIBBLE_MASK       0xf
+#define DATA_EQU_PREV         0
+#define DATA_FROM_TABLE       4
+
+int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
+				  u8 module_index,
+				  struct atom_mc_reg_table *reg_table)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
+	u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
+	u32 i = 0, j;
+	u16 data_offset, size;
+	union vram_info *vram_info;
+
+	memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		vram_info = (union vram_info *)
+			(rdev->mode_info.atom_context->bios + data_offset);
+		switch (frev) {
+		case 1:
+			DRM_ERROR("old table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		case 2:
+			switch (crev) {
+			case 1:
+				if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
+					ATOM_INIT_REG_BLOCK *reg_block =
+						(ATOM_INIT_REG_BLOCK *)
+						((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
+					ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
+						(ATOM_MEMORY_SETTING_DATA_BLOCK *)
+						((u8 *)reg_block + (2 * sizeof(u16)) +
+						 le16_to_cpu(reg_block->usRegIndexTblSize));
+					ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
+					num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
+							   sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
+					if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
+						return -EINVAL;
+					while (i < num_entries) {
+						if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
+							break;
+						reg_table->mc_reg_address[i].s1 =
+							(u16)(le16_to_cpu(format->usRegIndex));
+						reg_table->mc_reg_address[i].pre_reg_data =
+							(u8)(format->ucPreRegDataLength);
+						i++;
+						format = (ATOM_INIT_REG_INDEX_FORMAT *)
+							((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
+					}
+					reg_table->last = i;
+					while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
+					       (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
+						t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
+								>> MEM_ID_SHIFT);
+						if (module_index == t_mem_id) {
+							reg_table->mc_reg_table_entry[num_ranges].mclk_max =
+								(u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
+								      >> CLOCK_RANGE_SHIFT);
+							for (i = 0, j = 1; i < reg_table->last; i++) {
+								if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
+									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+										(u32)le32_to_cpu(*((u32 *)reg_data + j));
+									j++;
+								} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
+									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+										reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
+								}
+							}
+							num_ranges++;
+						}
+						reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
+							((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
+					}
+					if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
+						return -EINVAL;
+					reg_table->num_entries = num_ranges;
+				} else
+					return -EINVAL;
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		}
+		return 0;
+	}
+	return -EINVAL;
+}
+
+void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_2_scratch, bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600) {
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	} else {
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	}
+
+	/* let the bios control the backlight */
+	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
+
+	/* tell the bios not to handle mode switching */
+	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+
+	/* clear the vbios dpms state */
+	if (ASIC_IS_DCE4(rdev))
+		bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
+	if (rdev->family >= CHIP_R600) {
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	} else {
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	}
+
+}
+
+void radeon_save_bios_scratch_regs(struct radeon_device *rdev)
+{
+	uint32_t scratch_reg;
+	int i;
+
+	if (rdev->family >= CHIP_R600)
+		scratch_reg = R600_BIOS_0_SCRATCH;
+	else
+		scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+	for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+		rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4));
+}
+
+void radeon_restore_bios_scratch_regs(struct radeon_device *rdev)
+{
+	uint32_t scratch_reg;
+	int i;
+
+	if (rdev->family >= CHIP_R600)
+		scratch_reg = R600_BIOS_0_SCRATCH;
+	else
+		scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+	for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+		WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]);
+}
+
+void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	else
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (lock) {
+		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+	} else {
+		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch |= ATOM_S6_ACC_MODE;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	else
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+/* at some point we may want to break this out into individual functions */
+void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector =
+	    to_radeon_connector(connector);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600) {
+		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+		bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	} else {
+		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+		bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	}
+
+	if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("TV1 connected\n");
+			bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
+		} else {
+			DRM_DEBUG_KMS("TV1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_TV1_MASK;
+			bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CV connected\n");
+			bios_3_scratch |= ATOM_S3_CV_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
+		} else {
+			DRM_DEBUG_KMS("CV disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CV_MASK;
+			bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("LCD1 connected\n");
+			bios_0_scratch |= ATOM_S0_LCD1;
+			bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
+		} else {
+			DRM_DEBUG_KMS("LCD1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_LCD1;
+			bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT1 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT1_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
+		} else {
+			DRM_DEBUG_KMS("CRT1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT2 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT2_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
+		} else {
+			DRM_DEBUG_KMS("CRT2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP1 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP1;
+			bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
+		} else {
+			DRM_DEBUG_KMS("DFP1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP1;
+			bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP2 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP2;
+			bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
+		} else {
+			DRM_DEBUG_KMS("DFP2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP2;
+			bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP3 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP3;
+			bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
+		} else {
+			DRM_DEBUG_KMS("DFP3 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP3;
+			bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP4 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP4;
+			bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
+		} else {
+			DRM_DEBUG_KMS("DFP4 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP4;
+			bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP5 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP5;
+			bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
+		} else {
+			DRM_DEBUG_KMS("DFP5 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP5;
+			bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP6 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP6;
+			bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
+		} else {
+			DRM_DEBUG_KMS("DFP6 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP6;
+			bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
+		}
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
+		WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	} else {
+		WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+		WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	}
+}
+
+void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_3_scratch;
+
+	if (ASIC_IS_DCE4(rdev))
+		return;
+
+	if (rdev->family >= CHIP_R600)
+		bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+	else
+		bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 18);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 24);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 16);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 20);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 17);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 19);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 23);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 25);
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+	else
+		WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+}
+
+void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_2_scratch;
+
+	if (ASIC_IS_DCE4(rdev))
+		return;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+	else
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 0000000..fa5fada
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * ATPX support for both Intel/ATI
+ */
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "radeon_acpi.h"
+
+struct radeon_atpx_functions {
+	bool px_params;
+	bool power_cntl;
+	bool disp_mux_cntl;
+	bool i2c_mux_cntl;
+	bool switch_start;
+	bool switch_end;
+	bool disp_connectors_mapping;
+	bool disp_detetion_ports;
+};
+
+struct radeon_atpx {
+	acpi_handle handle;
+	struct radeon_atpx_functions functions;
+	bool is_hybrid;
+	bool dgpu_req_power_for_displays;
+};
+
+static struct radeon_atpx_priv {
+	bool atpx_detected;
+	bool bridge_pm_usable;
+	/* handle for device - and atpx */
+	acpi_handle dhandle;
+	struct radeon_atpx atpx;
+} radeon_atpx_priv;
+
+struct atpx_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atpx_px_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_flags;	/* which flags are valid */
+	u32 flags;		/* flags */
+} __packed;
+
+struct atpx_power_control {
+	u16 size;
+	u8 dgpu_state;
+} __packed;
+
+struct atpx_mux {
+	u16 size;
+	u16 mux;
+} __packed;
+
+bool radeon_has_atpx(void) {
+	return radeon_atpx_priv.atpx_detected;
+}
+
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+	return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool radeon_is_atpx_hybrid(void) {
+	return radeon_atpx_priv.atpx.is_hybrid;
+}
+
+bool radeon_atpx_dgpu_req_power_for_displays(void) {
+	return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
+/**
+ * radeon_atpx_call - call an ATPX method
+ *
+ * @handle: acpi handle
+ * @function: the ATPX function to execute
+ * @params: ATPX function params
+ *
+ * Executes the requested ATPX function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atpx_arg_elements[2];
+	struct acpi_object_list atpx_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atpx_arg.count = 2;
+	atpx_arg.pointer = &atpx_arg_elements[0];
+
+	atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atpx_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atpx_arg_elements[1].buffer.length = params->length;
+		atpx_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atpx_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATPX is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		printk("failed to evaluate ATPX got %s\n",
+		       acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atpx_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATPX
+ *
+ * Use the supported functions mask from ATPX function
+ * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
+{
+	f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
+	f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
+	f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
+	f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
+	f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
+	f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
+	f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
+	f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+}
+
+/**
+ * radeon_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_validate(struct radeon_atpx *atpx)
+{
+	u32 valid_bits = 0;
+
+	if (atpx->functions.px_params) {
+		union acpi_object *info;
+		struct atpx_px_params output;
+		size_t size;
+
+		info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+		if (!info)
+			return -EIO;
+
+		memset(&output, 0, sizeof(output));
+
+		size = *(u16 *) info->buffer.pointer;
+		if (size < 10) {
+			printk("ATPX buffer is too small: %zu\n", size);
+			kfree(info);
+			return -EINVAL;
+		}
+		size = min(sizeof(output), size);
+
+		memcpy(&output, info->buffer.pointer, size);
+
+		valid_bits = output.flags & output.valid_flags;
+
+		kfree(info);
+	}
+
+	/* if separate mux flag is set, mux controls are required */
+	if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+		atpx->functions.i2c_mux_cntl = true;
+		atpx->functions.disp_mux_cntl = true;
+	}
+	/* if any outputs are muxed, mux controls are required */
+	if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+			  ATPX_TV_SIGNAL_MUXED |
+			  ATPX_DFP_SIGNAL_MUXED))
+		atpx->functions.disp_mux_cntl = true;
+
+	/* some bioses set these bits rather than flagging power_cntl as supported */
+	if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+			  ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+		atpx->functions.power_cntl = true;
+
+	atpx->is_hybrid = false;
+	if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+		printk("ATPX Hybrid Graphics\n");
+		/*
+		 * Disable legacy PM methods only when pcie port PM is usable,
+		 * otherwise the device might fail to power off or power on.
+		 */
+		atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
+		atpx->is_hybrid = true;
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_atpx_verify_interface - verify ATPX
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
+ * to initialize ATPX and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
+{
+	union acpi_object *info;
+	struct atpx_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		printk("ATPX buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	printk("ATPX version %u, functions 0x%08x\n",
+	       output.version, output.function_bits);
+
+	radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atpx_set_discrete_state - power up/down discrete GPU
+ *
+ * @atpx: atpx info struct
+ * @state: discrete GPU state (0 = power down, 1 = power up)
+ *
+ * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
+ * power down/up the discrete GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_power_control input;
+
+	if (atpx->functions.power_cntl) {
+		input.size = 3;
+		input.dgpu_state = state;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_POWER_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+
+		/* 200ms delay is required after off */
+		if (state == 0)
+			msleep(200);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_disp_mux - switch display mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
+ * switch the display mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.disp_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
+ * switch the i2c/hpd mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.i2c_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_I2C_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_start - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has begun (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_start) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_end - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has ended (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_end) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ *
+ * Execute the necessary ATPX functions to switch between the discrete GPU and
+ * integrated GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
+{
+	u16 gpu_id;
+
+	if (id == VGA_SWITCHEROO_IGD)
+		gpu_id = ATPX_INTEGRATED_GPU;
+	else
+		gpu_id = ATPX_DISCRETE_GPU;
+
+	radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
+
+	return 0;
+}
+
+/**
+ * radeon_atpx_power_state - power down/up the requested GPU
+ *
+ * @id: GPU to power down/up
+ * @state: requested power state (0 = off, 1 = on)
+ *
+ * Execute the necessary ATPX function to power down/up the discrete GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+				   enum vga_switcheroo_state state)
+{
+	/* on w500 ACPI can't change intel gpu state */
+	if (id == VGA_SWITCHEROO_IGD)
+		return 0;
+
+	radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
+	return 0;
+}
+
+/**
+ * radeon_atpx_pci_probe_handle - look up the ATPX handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATPX handles (all asics).
+ * Returns true if the handles are found, false if not.
+ */
+static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+{
+	acpi_handle dhandle, atpx_handle;
+	acpi_status status;
+
+	dhandle = ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+	if (ACPI_FAILURE(status))
+		return false;
+
+	radeon_atpx_priv.dhandle = dhandle;
+	radeon_atpx_priv.atpx.handle = atpx_handle;
+	return true;
+}
+
+/**
+ * radeon_atpx_init - verify the ATPX interface
+ *
+ * Verify the ATPX interface (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_init(void)
+{
+	int r;
+
+	/* set up the ATPX handle */
+	r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+	if (r)
+		return r;
+
+	/* validate the atpx setup */
+	r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+/**
+ * radeon_atpx_get_client_id - get the client id
+ *
+ * @pdev: pci device
+ *
+ * look up whether we are the integrated or discrete GPU (all asics).
+ * Returns the client id.
+ */
+static enum vga_switcheroo_client_id radeon_atpx_get_client_id(struct pci_dev *pdev)
+{
+	if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
+		return VGA_SWITCHEROO_IGD;
+	else
+		return VGA_SWITCHEROO_DIS;
+}
+
+static const struct vga_switcheroo_handler radeon_atpx_handler = {
+	.switchto = radeon_atpx_switchto,
+	.power_state = radeon_atpx_power_state,
+	.get_client_id = radeon_atpx_get_client_id,
+};
+
+/**
+ * radeon_atpx_detect - detect whether we have PX
+ *
+ * Check if we have a PX system (all asics).
+ * Returns true if we have a PX system, false if not.
+ */
+static bool radeon_atpx_detect(void)
+{
+	char acpi_method_name[255] = { 0 };
+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+	struct pci_dev *pdev = NULL;
+	bool has_atpx = false;
+	int vga_count = 0;
+	bool d3_supported = false;
+	struct pci_dev *parent_pdev;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		vga_count++;
+
+		has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+		parent_pdev = pci_upstream_bridge(pdev);
+		d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+	}
+
+	/* some newer PX laptops mark the dGPU as a non-VGA display device */
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+		vga_count++;
+
+		has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+		parent_pdev = pci_upstream_bridge(pdev);
+		d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+	}
+
+	if (has_atpx && vga_count == 2) {
+		acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
+		pr_info("vga_switcheroo: detected switching method %s handle\n",
+			acpi_method_name);
+		radeon_atpx_priv.atpx_detected = true;
+		radeon_atpx_priv.bridge_pm_usable = d3_supported;
+		radeon_atpx_init();
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_register_atpx_handler - register with vga_switcheroo
+ *
+ * Register the PX callbacks with vga_switcheroo (all asics).
+ */
+void radeon_register_atpx_handler(void)
+{
+	bool r;
+	enum vga_switcheroo_handler_flags_t handler_flags = 0;
+
+	/* detect if we have any ATPX + 2 VGA in the system */
+	r = radeon_atpx_detect();
+	if (!r)
+		return;
+
+	vga_switcheroo_register_handler(&radeon_atpx_handler, handler_flags);
+}
+
+/**
+ * radeon_unregister_atpx_handler - unregister with vga_switcheroo
+ *
+ * Unregister the PX callbacks with vga_switcheroo (all asics).
+ */
+void radeon_unregister_atpx_handler(void)
+{
+	vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
new file mode 100644
index 0000000..770e31f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -0,0 +1,789 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Slava Grigorev <slava.grigorev@amd.com>
+ */
+
+#include <linux/gcd.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "radeon.h"
+#include "atom.h"
+#include "radeon_audio.h"
+
+void r600_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
+		u8 enable_mask);
+void dce4_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
+		u8 enable_mask);
+void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
+		u8 enable_mask);
+u32 dce6_endpoint_rreg(struct radeon_device *rdev, u32 offset, u32 reg);
+void dce6_endpoint_wreg(struct radeon_device *rdev,
+		u32 offset, u32 reg, u32 v);
+void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder,
+		struct cea_sad *sads, int sad_count);
+void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
+		struct cea_sad *sads, int sad_count);
+void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
+		struct cea_sad *sads, int sad_count);
+void dce3_2_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+		u8 *sadb, int sad_count);
+void dce3_2_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+		u8 *sadb, int sad_count);
+void dce4_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+		u8 *sadb, int sad_count);
+void dce4_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+		u8 *sadb, int sad_count);
+void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+		u8 *sadb, int sad_count);
+void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+		u8 *sadb, int sad_count);
+void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+		struct drm_connector *connector, struct drm_display_mode *mode);
+void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+		struct drm_connector *connector, struct drm_display_mode *mode);
+struct r600_audio_pin* r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin* dce6_audio_get_pin(struct radeon_device *rdev);
+void dce6_afmt_select_pin(struct drm_encoder *encoder);
+void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock);
+void dce3_2_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock);
+void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock);
+void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock);
+void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock);
+void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+	struct radeon_crtc *crtc, unsigned int clock);
+void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
+	unsigned char *buffer, size_t size);
+void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
+	unsigned char *buffer, size_t size);
+void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+	const struct radeon_hdmi_acr *acr);
+void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+	const struct radeon_hdmi_acr *acr);
+void evergreen_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+	const struct radeon_hdmi_acr *acr);
+void r600_set_vbi_packet(struct drm_encoder *encoder, u32 offset);
+void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset);
+void dce4_hdmi_set_color_depth(struct drm_encoder *encoder,
+	u32 offset, int bpc);
+void r600_set_audio_packet(struct drm_encoder *encoder, u32 offset);
+void dce3_2_set_audio_packet(struct drm_encoder *encoder, u32 offset);
+void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset);
+void r600_set_mute(struct drm_encoder *encoder, u32 offset, bool mute);
+void dce3_2_set_mute(struct drm_encoder *encoder, u32 offset, bool mute);
+void dce4_set_mute(struct drm_encoder *encoder, u32 offset, bool mute);
+static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
+	struct drm_display_mode *mode);
+static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+	struct drm_display_mode *mode);
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
+
+static const u32 pin_offsets[7] =
+{
+	(0x5e00 - 0x5e00),
+	(0x5e18 - 0x5e00),
+	(0x5e30 - 0x5e00),
+	(0x5e48 - 0x5e00),
+	(0x5e60 - 0x5e00),
+	(0x5e78 - 0x5e00),
+	(0x5e90 - 0x5e00),
+};
+
+static u32 radeon_audio_rreg(struct radeon_device *rdev, u32 offset, u32 reg)
+{
+	return RREG32(reg);
+}
+
+static void radeon_audio_wreg(struct radeon_device *rdev, u32 offset,
+		u32 reg, u32 v)
+{
+	WREG32(reg, v);
+}
+
+static struct radeon_audio_basic_funcs r600_funcs = {
+	.endpoint_rreg = radeon_audio_rreg,
+	.endpoint_wreg = radeon_audio_wreg,
+	.enable = r600_audio_enable,
+};
+
+static struct radeon_audio_basic_funcs dce32_funcs = {
+	.endpoint_rreg = radeon_audio_rreg,
+	.endpoint_wreg = radeon_audio_wreg,
+	.enable = r600_audio_enable,
+};
+
+static struct radeon_audio_basic_funcs dce4_funcs = {
+	.endpoint_rreg = radeon_audio_rreg,
+	.endpoint_wreg = radeon_audio_wreg,
+	.enable = dce4_audio_enable,
+};
+
+static struct radeon_audio_basic_funcs dce6_funcs = {
+	.endpoint_rreg = dce6_endpoint_rreg,
+	.endpoint_wreg = dce6_endpoint_wreg,
+	.enable = dce6_audio_enable,
+};
+
+static struct radeon_audio_funcs r600_hdmi_funcs = {
+	.get_pin = r600_audio_get_pin,
+	.set_dto = r600_hdmi_audio_set_dto,
+	.update_acr = r600_hdmi_update_acr,
+	.set_vbi_packet = r600_set_vbi_packet,
+	.set_avi_packet = r600_set_avi_packet,
+	.set_audio_packet = r600_set_audio_packet,
+	.set_mute = r600_set_mute,
+	.mode_set = radeon_audio_hdmi_mode_set,
+	.dpms = r600_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce32_hdmi_funcs = {
+	.get_pin = r600_audio_get_pin,
+	.write_sad_regs = dce3_2_afmt_write_sad_regs,
+	.write_speaker_allocation = dce3_2_afmt_hdmi_write_speaker_allocation,
+	.set_dto = dce3_2_audio_set_dto,
+	.update_acr = dce3_2_hdmi_update_acr,
+	.set_vbi_packet = r600_set_vbi_packet,
+	.set_avi_packet = r600_set_avi_packet,
+	.set_audio_packet = dce3_2_set_audio_packet,
+	.set_mute = dce3_2_set_mute,
+	.mode_set = radeon_audio_hdmi_mode_set,
+	.dpms = r600_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce32_dp_funcs = {
+	.get_pin = r600_audio_get_pin,
+	.write_sad_regs = dce3_2_afmt_write_sad_regs,
+	.write_speaker_allocation = dce3_2_afmt_dp_write_speaker_allocation,
+	.set_dto = dce3_2_audio_set_dto,
+	.set_avi_packet = r600_set_avi_packet,
+	.set_audio_packet = dce3_2_set_audio_packet,
+};
+
+static struct radeon_audio_funcs dce4_hdmi_funcs = {
+	.get_pin = r600_audio_get_pin,
+	.write_sad_regs = evergreen_hdmi_write_sad_regs,
+	.write_speaker_allocation = dce4_afmt_hdmi_write_speaker_allocation,
+	.write_latency_fields = dce4_afmt_write_latency_fields,
+	.set_dto = dce4_hdmi_audio_set_dto,
+	.update_acr = evergreen_hdmi_update_acr,
+	.set_vbi_packet = dce4_set_vbi_packet,
+	.set_color_depth = dce4_hdmi_set_color_depth,
+	.set_avi_packet = evergreen_set_avi_packet,
+	.set_audio_packet = dce4_set_audio_packet,
+	.set_mute = dce4_set_mute,
+	.mode_set = radeon_audio_hdmi_mode_set,
+	.dpms = evergreen_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce4_dp_funcs = {
+	.get_pin = r600_audio_get_pin,
+	.write_sad_regs = evergreen_hdmi_write_sad_regs,
+	.write_speaker_allocation = dce4_afmt_dp_write_speaker_allocation,
+	.write_latency_fields = dce4_afmt_write_latency_fields,
+	.set_dto = dce4_dp_audio_set_dto,
+	.set_avi_packet = evergreen_set_avi_packet,
+	.set_audio_packet = dce4_set_audio_packet,
+	.mode_set = radeon_audio_dp_mode_set,
+	.dpms = evergreen_dp_enable,
+};
+
+static struct radeon_audio_funcs dce6_hdmi_funcs = {
+	.select_pin = dce6_afmt_select_pin,
+	.get_pin = dce6_audio_get_pin,
+	.write_sad_regs = dce6_afmt_write_sad_regs,
+	.write_speaker_allocation = dce6_afmt_hdmi_write_speaker_allocation,
+	.write_latency_fields = dce6_afmt_write_latency_fields,
+	.set_dto = dce6_hdmi_audio_set_dto,
+	.update_acr = evergreen_hdmi_update_acr,
+	.set_vbi_packet = dce4_set_vbi_packet,
+	.set_color_depth = dce4_hdmi_set_color_depth,
+	.set_avi_packet = evergreen_set_avi_packet,
+	.set_audio_packet = dce4_set_audio_packet,
+	.set_mute = dce4_set_mute,
+	.mode_set = radeon_audio_hdmi_mode_set,
+	.dpms = evergreen_hdmi_enable,
+};
+
+static struct radeon_audio_funcs dce6_dp_funcs = {
+	.select_pin = dce6_afmt_select_pin,
+	.get_pin = dce6_audio_get_pin,
+	.write_sad_regs = dce6_afmt_write_sad_regs,
+	.write_speaker_allocation = dce6_afmt_dp_write_speaker_allocation,
+	.write_latency_fields = dce6_afmt_write_latency_fields,
+	.set_dto = dce6_dp_audio_set_dto,
+	.set_avi_packet = evergreen_set_avi_packet,
+	.set_audio_packet = dce4_set_audio_packet,
+	.mode_set = radeon_audio_dp_mode_set,
+	.dpms = evergreen_dp_enable,
+};
+
+static void radeon_audio_enable(struct radeon_device *rdev,
+				struct r600_audio_pin *pin, u8 enable_mask)
+{
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_atom_dig *dig;
+	int pin_count = 0;
+
+	if (!pin)
+		return;
+
+	if (rdev->mode_info.mode_config_initialized) {
+		list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+			if (radeon_encoder_is_digital(encoder)) {
+				radeon_encoder = to_radeon_encoder(encoder);
+				dig = radeon_encoder->enc_priv;
+				if (dig->pin == pin)
+					pin_count++;
+			}
+		}
+
+		if ((pin_count > 1) && (enable_mask == 0))
+			return;
+	}
+
+	if (rdev->audio.funcs->enable)
+		rdev->audio.funcs->enable(rdev, pin, enable_mask);
+}
+
+static void radeon_audio_interface_init(struct radeon_device *rdev)
+{
+	if (ASIC_IS_DCE6(rdev)) {
+		rdev->audio.funcs = &dce6_funcs;
+		rdev->audio.hdmi_funcs = &dce6_hdmi_funcs;
+		rdev->audio.dp_funcs = &dce6_dp_funcs;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		rdev->audio.funcs = &dce4_funcs;
+		rdev->audio.hdmi_funcs = &dce4_hdmi_funcs;
+		rdev->audio.dp_funcs = &dce4_dp_funcs;
+	} else if (ASIC_IS_DCE32(rdev)) {
+		rdev->audio.funcs = &dce32_funcs;
+		rdev->audio.hdmi_funcs = &dce32_hdmi_funcs;
+		rdev->audio.dp_funcs = &dce32_dp_funcs;
+	} else {
+		rdev->audio.funcs = &r600_funcs;
+		rdev->audio.hdmi_funcs = &r600_hdmi_funcs;
+		rdev->audio.dp_funcs = 0;
+	}
+}
+
+static int radeon_audio_chipset_supported(struct radeon_device *rdev)
+{
+	return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
+}
+
+int radeon_audio_init(struct radeon_device *rdev)
+{
+	int i;
+
+	if (!radeon_audio || !radeon_audio_chipset_supported(rdev))
+		return 0;
+
+	rdev->audio.enabled = true;
+
+	if (ASIC_IS_DCE83(rdev))		/* KB: 2 streams, 3 endpoints */
+		rdev->audio.num_pins = 3;
+	else if (ASIC_IS_DCE81(rdev))	/* KV: 4 streams, 7 endpoints */
+		rdev->audio.num_pins = 7;
+	else if (ASIC_IS_DCE8(rdev))	/* BN/HW: 6 streams, 7 endpoints */
+		rdev->audio.num_pins = 7;
+	else if (ASIC_IS_DCE64(rdev))	/* OL: 2 streams, 2 endpoints */
+		rdev->audio.num_pins = 2;
+	else if (ASIC_IS_DCE61(rdev))	/* TN: 4 streams, 6 endpoints */
+		rdev->audio.num_pins = 6;
+	else if (ASIC_IS_DCE6(rdev))	/* SI: 6 streams, 6 endpoints */
+		rdev->audio.num_pins = 6;
+	else
+		rdev->audio.num_pins = 1;
+
+	for (i = 0; i < rdev->audio.num_pins; i++) {
+		rdev->audio.pin[i].channels = -1;
+		rdev->audio.pin[i].rate = -1;
+		rdev->audio.pin[i].bits_per_sample = -1;
+		rdev->audio.pin[i].status_bits = 0;
+		rdev->audio.pin[i].category_code = 0;
+		rdev->audio.pin[i].connected = false;
+		rdev->audio.pin[i].offset = pin_offsets[i];
+		rdev->audio.pin[i].id = i;
+	}
+
+	radeon_audio_interface_init(rdev);
+
+	/* disable audio.  it will be set up later */
+	for (i = 0; i < rdev->audio.num_pins; i++)
+		radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
+
+	return 0;
+}
+
+u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, u32 offset, u32 reg)
+{
+	if (rdev->audio.funcs->endpoint_rreg)
+		return rdev->audio.funcs->endpoint_rreg(rdev, offset, reg);
+
+	return 0;
+}
+
+void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
+	u32 reg, u32 v)
+{
+	if (rdev->audio.funcs->endpoint_wreg)
+		rdev->audio.funcs->endpoint_wreg(rdev, offset, reg, v);
+}
+
+static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct cea_sad *sads;
+	int sad_count;
+
+	if (!connector)
+		return;
+
+	sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
+	if (sad_count <= 0) {
+		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+		return;
+	}
+	BUG_ON(!sads);
+
+	if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
+		radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
+
+	kfree(sads);
+}
+
+static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	u8 *sadb = NULL;
+	int sad_count;
+
+	if (!connector)
+		return;
+
+	sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+						   &sadb);
+	if (sad_count < 0) {
+		DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
+			  sad_count);
+		sad_count = 0;
+	}
+
+	if (radeon_encoder->audio && radeon_encoder->audio->write_speaker_allocation)
+		radeon_encoder->audio->write_speaker_allocation(encoder, sadb, sad_count);
+
+	kfree(sadb);
+}
+
+static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
+					      struct drm_display_mode *mode)
+{
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (!connector)
+		return;
+
+	if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
+		radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
+}
+
+struct r600_audio_pin* radeon_audio_get_pin(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->audio && radeon_encoder->audio->get_pin)
+		return radeon_encoder->audio->get_pin(rdev);
+
+	return NULL;
+}
+
+static void radeon_audio_select_pin(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->audio && radeon_encoder->audio->select_pin)
+		radeon_encoder->audio->select_pin(encoder);
+}
+
+void radeon_audio_detect(struct drm_connector *connector,
+			 struct drm_encoder *encoder,
+			 enum drm_connector_status status)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+
+	if (!radeon_audio_chipset_supported(rdev))
+		return;
+
+	if (!radeon_encoder_is_digital(encoder))
+		return;
+
+	dig = radeon_encoder->enc_priv;
+
+	if (status == connector_status_connected) {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+			if (radeon_dp_getsinktype(radeon_connector) ==
+			    CONNECTOR_OBJECT_ID_DISPLAYPORT)
+				radeon_encoder->audio = rdev->audio.dp_funcs;
+			else
+				radeon_encoder->audio = rdev->audio.hdmi_funcs;
+		} else {
+			radeon_encoder->audio = rdev->audio.hdmi_funcs;
+		}
+
+		if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+			if (!dig->pin)
+				dig->pin = radeon_audio_get_pin(encoder);
+			radeon_audio_enable(rdev, dig->pin, 0xf);
+		} else {
+			radeon_audio_enable(rdev, dig->pin, 0);
+			dig->pin = NULL;
+		}
+	} else {
+		radeon_audio_enable(rdev, dig->pin, 0);
+		dig->pin = NULL;
+	}
+}
+
+void radeon_audio_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	if (!rdev->audio.enabled)
+		return;
+
+	for (i = 0; i < rdev->audio.num_pins; i++)
+		radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
+
+	rdev->audio.enabled = false;
+}
+
+static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *crtc = to_radeon_crtc(encoder->crtc);
+
+	if (radeon_encoder->audio && radeon_encoder->audio->set_dto)
+		radeon_encoder->audio->set_dto(rdev, crtc, clock);
+}
+
+static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+	struct hdmi_avi_infoframe frame;
+	int err;
+
+	if (!connector)
+		return -EINVAL;
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+	if (err < 0) {
+		DRM_ERROR("failed to setup AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
+		if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+			if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+			else
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+		} else {
+			frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+		}
+	}
+
+	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		DRM_ERROR("failed to pack AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	if (dig && dig->afmt && radeon_encoder->audio &&
+	    radeon_encoder->audio->set_avi_packet)
+		radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
+			buffer, sizeof(buffer));
+
+	return 0;
+}
+
+/*
+ * calculate CTS and N values if they are not found in the table
+ */
+static void radeon_audio_calc_cts(unsigned int clock, int *CTS, int *N, int freq)
+{
+	int n, cts;
+	unsigned long div, mul;
+
+	/* Safe, but overly large values */
+	n = 128 * freq;
+	cts = clock * 1000;
+
+	/* Smallest valid fraction */
+	div = gcd(n, cts);
+
+	n /= div;
+	cts /= div;
+
+	/*
+	 * The optimal N is 128*freq/1000. Calculate the closest larger
+	 * value that doesn't truncate any bits.
+	 */
+	mul = ((128*freq/1000) + (n-1))/n;
+
+	n *= mul;
+	cts *= mul;
+
+	/* Check that we are in spec (not always possible) */
+	if (n < (128*freq/1500))
+		pr_warn("Calculated ACR N value is too small. You may experience audio problems.\n");
+	if (n > (128*freq/300))
+		pr_warn("Calculated ACR N value is too large. You may experience audio problems.\n");
+
+	*N = n;
+	*CTS = cts;
+
+	DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
+		*N, *CTS, freq);
+}
+
+static const struct radeon_hdmi_acr* radeon_audio_acr(unsigned int clock)
+{
+	static struct radeon_hdmi_acr res;
+	u8 i;
+
+	static const struct radeon_hdmi_acr hdmi_predefined_acr[] = {
+		/*       32kHz    44.1kHz   48kHz    */
+		/* Clock      N     CTS      N     CTS      N     CTS */
+		{  25175,  4096,  25175, 28224, 125875,  6144,  25175 }, /*  25,20/1.001 MHz */
+		{  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
+		{  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
+		{  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
+		{  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
+		{  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
+		{  74176,  4096,  74176,  5733,  75335,  6144,  74176 }, /*  74.25/1.001 MHz */
+		{  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
+		{ 148352,  4096, 148352,  5733, 150670,  6144, 148352 }, /* 148.50/1.001 MHz */
+		{ 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
+	};
+
+	/* Precalculated values for common clocks */
+	for (i = 0; i < ARRAY_SIZE(hdmi_predefined_acr); i++)
+		if (hdmi_predefined_acr[i].clock == clock)
+			return &hdmi_predefined_acr[i];
+
+	/* And odd clocks get manually calculated */
+	radeon_audio_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+	radeon_audio_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+	radeon_audio_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
+
+	return &res;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void radeon_audio_update_acr(struct drm_encoder *encoder, unsigned int clock)
+{
+	const struct radeon_hdmi_acr *acr = radeon_audio_acr(clock);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (radeon_encoder->audio && radeon_encoder->audio->update_acr)
+		radeon_encoder->audio->update_acr(encoder, dig->afmt->offset, acr);
+}
+
+static void radeon_audio_set_vbi_packet(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (radeon_encoder->audio && radeon_encoder->audio->set_vbi_packet)
+		radeon_encoder->audio->set_vbi_packet(encoder, dig->afmt->offset);
+}
+
+static void radeon_hdmi_set_color_depth(struct drm_encoder *encoder)
+{
+	int bpc = 8;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		bpc = radeon_crtc->bpc;
+	}
+
+	if (radeon_encoder->audio && radeon_encoder->audio->set_color_depth)
+		radeon_encoder->audio->set_color_depth(encoder, dig->afmt->offset, bpc);
+}
+
+static void radeon_audio_set_audio_packet(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (radeon_encoder->audio && radeon_encoder->audio->set_audio_packet)
+		radeon_encoder->audio->set_audio_packet(encoder, dig->afmt->offset);
+}
+
+static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (radeon_encoder->audio && radeon_encoder->audio->set_mute)
+		radeon_encoder->audio->set_mute(encoder, dig->afmt->offset, mute);
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (!connector)
+		return;
+
+	if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+		radeon_audio_set_mute(encoder, true);
+
+		radeon_audio_write_speaker_allocation(encoder);
+		radeon_audio_write_sad_regs(encoder);
+		radeon_audio_write_latency_fields(encoder, mode);
+		radeon_audio_set_dto(encoder, mode->clock);
+		radeon_audio_set_vbi_packet(encoder);
+		radeon_hdmi_set_color_depth(encoder);
+		radeon_audio_update_acr(encoder, mode->clock);
+		radeon_audio_set_audio_packet(encoder);
+		radeon_audio_select_pin(encoder);
+
+		if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+			return;
+
+		radeon_audio_set_mute(encoder, false);
+	} else {
+		radeon_hdmi_set_color_depth(encoder);
+
+		if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+			return;
+	}
+}
+
+static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+	if (!dig || !dig->afmt)
+		return;
+
+	if (!connector)
+		return;
+
+	if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+		radeon_audio_write_speaker_allocation(encoder);
+		radeon_audio_write_sad_regs(encoder);
+		radeon_audio_write_latency_fields(encoder, mode);
+		radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
+		radeon_audio_set_audio_packet(encoder);
+		radeon_audio_select_pin(encoder);
+
+		if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+			return;
+	}
+}
+
+void radeon_audio_mode_set(struct drm_encoder *encoder,
+			   struct drm_display_mode *mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->audio && radeon_encoder->audio->mode_set)
+		radeon_encoder->audio->mode_set(encoder, mode);
+}
+
+void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->audio && radeon_encoder->audio->dpms)
+		radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+unsigned int radeon_audio_decode_dfs_div(unsigned int div)
+{
+	if (div >= 8 && div < 64)
+		return (div - 8) * 25 + 200;
+	else if (div >= 64 && div < 96)
+		return (div - 64) * 50 + 1600;
+	else if (div >= 96 && div < 128)
+		return (div - 96) * 100 + 3200;
+	else
+		return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
new file mode 100644
index 0000000..5c70cce
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Slava Grigorev <slava.grigorev@amd.com>
+ */
+
+#ifndef __RADEON_AUDIO_H__
+#define __RADEON_AUDIO_H__
+
+#include <linux/types.h>
+
+#define RREG32_ENDPOINT(block, reg)		\
+	radeon_audio_endpoint_rreg(rdev, (block), (reg))
+#define WREG32_ENDPOINT(block, reg, v)	\
+	radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
+
+struct radeon_audio_basic_funcs
+{
+	u32  (*endpoint_rreg)(struct radeon_device *rdev, u32 offset, u32 reg);
+	void (*endpoint_wreg)(struct radeon_device *rdev,
+		u32 offset, u32 reg, u32 v);
+	void (*enable)(struct radeon_device *rdev,
+		struct r600_audio_pin *pin, u8 enable_mask);
+};
+
+struct radeon_audio_funcs
+{
+	void (*select_pin)(struct drm_encoder *encoder);
+	struct r600_audio_pin* (*get_pin)(struct radeon_device *rdev);
+	void (*write_latency_fields)(struct drm_encoder *encoder,
+		struct drm_connector *connector, struct drm_display_mode *mode);
+	void (*write_sad_regs)(struct drm_encoder *encoder,
+		struct cea_sad *sads, int sad_count);
+	void (*write_speaker_allocation)(struct drm_encoder *encoder,
+		u8 *sadb, int sad_count);
+	void (*set_dto)(struct radeon_device *rdev,
+		struct radeon_crtc *crtc, unsigned int clock);
+	void (*update_acr)(struct drm_encoder *encoder, long offset,
+		const struct radeon_hdmi_acr *acr);
+	void (*set_vbi_packet)(struct drm_encoder *encoder, u32 offset);
+	void (*set_color_depth)(struct drm_encoder *encoder, u32 offset, int bpc);
+	void (*set_avi_packet)(struct radeon_device *rdev, u32 offset,
+		unsigned char *buffer, size_t size);
+	void (*set_audio_packet)(struct drm_encoder *encoder, u32 offset);
+	void (*set_mute)(struct drm_encoder *encoder, u32 offset, bool mute);
+	void (*mode_set)(struct drm_encoder *encoder,
+		struct drm_display_mode *mode);
+	void (*dpms)(struct drm_encoder *encoder, bool mode);
+};
+
+int radeon_audio_init(struct radeon_device *rdev);
+void radeon_audio_detect(struct drm_connector *connector,
+			 struct drm_encoder *encoder,
+			 enum drm_connector_status status);
+u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
+	u32 offset, u32 reg);
+void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
+	u32 offset,	u32 reg, u32 v);
+struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder);
+void radeon_audio_fini(struct radeon_device *rdev);
+void radeon_audio_mode_set(struct drm_encoder *encoder,
+	struct drm_display_mode *mode);
+void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+unsigned int radeon_audio_decode_dfs_div(unsigned int div);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
new file mode 100644
index 0000000..87d5fb2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_BENCHMARK_COPY_BLIT 1
+#define RADEON_BENCHMARK_COPY_DMA  0
+
+#define RADEON_BENCHMARK_ITERATIONS 1024
+#define RADEON_BENCHMARK_COMMON_MODES_N 17
+
+static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
+				    uint64_t saddr, uint64_t daddr,
+				    int flag, int n,
+				    struct reservation_object *resv)
+{
+	unsigned long start_jiffies;
+	unsigned long end_jiffies;
+	struct radeon_fence *fence = NULL;
+	int i, r;
+
+	start_jiffies = jiffies;
+	for (i = 0; i < n; i++) {
+		switch (flag) {
+		case RADEON_BENCHMARK_COPY_DMA:
+			fence = radeon_copy_dma(rdev, saddr, daddr,
+						size / RADEON_GPU_PAGE_SIZE,
+						resv);
+			break;
+		case RADEON_BENCHMARK_COPY_BLIT:
+			fence = radeon_copy_blit(rdev, saddr, daddr,
+						 size / RADEON_GPU_PAGE_SIZE,
+						 resv);
+			break;
+		default:
+			DRM_ERROR("Unknown copy method\n");
+			return -EINVAL;
+		}
+		if (IS_ERR(fence))
+			return PTR_ERR(fence);
+
+		r = radeon_fence_wait(fence, false);
+		radeon_fence_unref(&fence);
+		if (r)
+			return r;
+	}
+	end_jiffies = jiffies;
+	return jiffies_to_msecs(end_jiffies - start_jiffies);
+}
+
+
+static void radeon_benchmark_log_results(int n, unsigned size,
+					 unsigned int time,
+					 unsigned sdomain, unsigned ddomain,
+					 char *kind)
+{
+	unsigned int throughput = (n * (size >> 10)) / time;
+	DRM_INFO("radeon: %s %u bo moves of %u kB from"
+		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
+		 kind, n, size >> 10, sdomain, ddomain, time,
+		 throughput * 8, throughput);
+}
+
+static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+				  unsigned sdomain, unsigned ddomain)
+{
+	struct radeon_bo *dobj = NULL;
+	struct radeon_bo *sobj = NULL;
+	uint64_t saddr, daddr;
+	int r, n;
+	int time;
+
+	n = RADEON_BENCHMARK_ITERATIONS;
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(sobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(sobj, sdomain, &saddr);
+	radeon_bo_unreserve(sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(dobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(dobj, ddomain, &daddr);
+	radeon_bo_unreserve(dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+
+	if (rdev->asic->copy.dma) {
+		time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+						RADEON_BENCHMARK_COPY_DMA, n,
+						dobj->tbo.resv);
+		if (time < 0)
+			goto out_cleanup;
+		if (time > 0)
+			radeon_benchmark_log_results(n, size, time,
+						     sdomain, ddomain, "dma");
+	}
+
+	if (rdev->asic->copy.blit) {
+		time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+						RADEON_BENCHMARK_COPY_BLIT, n,
+						dobj->tbo.resv);
+		if (time < 0)
+			goto out_cleanup;
+		if (time > 0)
+			radeon_benchmark_log_results(n, size, time,
+						     sdomain, ddomain, "blit");
+	}
+
+out_cleanup:
+	if (sobj) {
+		r = radeon_bo_reserve(sobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(sobj);
+			radeon_bo_unreserve(sobj);
+		}
+		radeon_bo_unref(&sobj);
+	}
+	if (dobj) {
+		r = radeon_bo_reserve(dobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(dobj);
+			radeon_bo_unreserve(dobj);
+		}
+		radeon_bo_unref(&dobj);
+	}
+
+	if (r) {
+		DRM_ERROR("Error while benchmarking BO move.\n");
+	}
+}
+
+void radeon_benchmark(struct radeon_device *rdev, int test_number)
+{
+	int i;
+	int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
+		640 * 480 * 4,
+		720 * 480 * 4,
+		800 * 600 * 4,
+		848 * 480 * 4,
+		1024 * 768 * 4,
+		1152 * 768 * 4,
+		1280 * 720 * 4,
+		1280 * 800 * 4,
+		1280 * 854 * 4,
+		1280 * 960 * 4,
+		1280 * 1024 * 4,
+		1440 * 900 * 4,
+		1400 * 1050 * 4,
+		1680 * 1050 * 4,
+		1600 * 1200 * 4,
+		1920 * 1080 * 4,
+		1920 * 1200 * 4
+	};
+
+	switch (test_number) {
+	case 1:
+		/* simple test, VRAM to GTT and GTT to VRAM */
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+				      RADEON_GEM_DOMAIN_VRAM);
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+				      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 2:
+		/* simple test, VRAM to VRAM */
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+				      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 3:
+		/* GTT to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_GTT,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 4:
+		/* VRAM to GTT, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 5:
+		/* VRAM to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 6:
+		/* GTT to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_GTT,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 7:
+		/* VRAM to GTT, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 8:
+		/* VRAM to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+
+	default:
+		DRM_ERROR("Unknown benchmark\n");
+	}
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
new file mode 100644
index 0000000..04c0ed4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+#include <linux/slab.h>
+#include <linux/acpi.h>
+/*
+ * BIOS.
+ */
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios.  On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
+static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+{
+	uint8_t __iomem *bios;
+	resource_size_t vram_base;
+	resource_size_t size = 256 * 1024; /* ??? */
+
+	if (!(rdev->flags & RADEON_IS_IGP))
+		if (!radeon_card_posted(rdev))
+			return false;
+
+	rdev->bios = NULL;
+	vram_base = pci_resource_start(rdev->pdev, 0);
+	bios = ioremap(vram_base, size);
+	if (!bios) {
+		return false;
+	}
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		iounmap(bios);
+		return false;
+	}
+	rdev->bios = kmalloc(size, GFP_KERNEL);
+	if (rdev->bios == NULL) {
+		iounmap(bios);
+		return false;
+	}
+	memcpy_fromio(rdev->bios, bios, size);
+	iounmap(bios);
+	return true;
+}
+
+static bool radeon_read_bios(struct radeon_device *rdev)
+{
+	uint8_t __iomem *bios, val1, val2;
+	size_t size;
+
+	rdev->bios = NULL;
+	/* XXX: some cards may return 0 for rom size? ddx has a workaround */
+	bios = pci_map_rom(rdev->pdev, &size);
+	if (!bios) {
+		return false;
+	}
+
+	val1 = readb(&bios[0]);
+	val2 = readb(&bios[1]);
+
+	if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
+		pci_unmap_rom(rdev->pdev, bios);
+		return false;
+	}
+	rdev->bios = kzalloc(size, GFP_KERNEL);
+	if (rdev->bios == NULL) {
+		pci_unmap_rom(rdev->pdev, bios);
+		return false;
+	}
+	memcpy_fromio(rdev->bios, bios, size);
+	pci_unmap_rom(rdev->pdev, bios);
+	return true;
+}
+
+static bool radeon_read_platform_bios(struct radeon_device *rdev)
+{
+	uint8_t __iomem *bios;
+	size_t size;
+
+	rdev->bios = NULL;
+
+	bios = pci_platform_rom(rdev->pdev, &size);
+	if (!bios) {
+		return false;
+	}
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		return false;
+	}
+	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+	if (rdev->bios == NULL) {
+		return false;
+	}
+
+	return true;
+}
+
+#ifdef CONFIG_ACPI
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+			    int offset, int len)
+{
+	acpi_status status;
+	union acpi_object atrm_arg_elements[2], *obj;
+	struct acpi_object_list atrm_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+	atrm_arg.count = 2;
+	atrm_arg.pointer = &atrm_arg_elements[0];
+
+	atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[0].integer.value = offset;
+
+	atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[1].integer.value = len;
+
+	status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+		return -ENODEV;
+	}
+
+	obj = (union acpi_object *)buffer.pointer;
+	memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+	len = obj->buffer.length;
+	kfree(buffer.pointer);
+	return len;
+}
+
+static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+	int ret;
+	int size = 256 * 1024;
+	int i;
+	struct pci_dev *pdev = NULL;
+	acpi_handle dhandle, atrm_handle;
+	acpi_status status;
+	bool found = false;
+
+	/* ATRM is for the discrete card only */
+	if (rdev->flags & RADEON_IS_IGP)
+		return false;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		dhandle = ACPI_HANDLE(&pdev->dev);
+		if (!dhandle)
+			continue;
+
+		status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+		if (!ACPI_FAILURE(status)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+			dhandle = ACPI_HANDLE(&pdev->dev);
+			if (!dhandle)
+				continue;
+
+			status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+			if (!ACPI_FAILURE(status)) {
+				found = true;
+				break;
+			}
+		}
+	}
+
+	if (!found)
+		return false;
+
+	rdev->bios = kmalloc(size, GFP_KERNEL);
+	if (!rdev->bios) {
+		DRM_ERROR("Unable to allocate bios\n");
+		return false;
+	}
+
+	for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+		ret = radeon_atrm_call(atrm_handle,
+				       rdev->bios,
+				       (i * ATRM_BIOS_PAGE),
+				       ATRM_BIOS_PAGE);
+		if (ret < ATRM_BIOS_PAGE)
+			break;
+	}
+
+	if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+		kfree(rdev->bios);
+		return false;
+	}
+	return true;
+}
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
+
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control;
+	u32 d2vga_control;
+	u32 vga_render_control;
+	u32 rom_cntl;
+	bool r;
+
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	if (!ASIC_IS_NODCE(rdev)) {
+		/* Disable VGA mode */
+		WREG32(AVIVO_D1VGA_CONTROL,
+		       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(AVIVO_D2VGA_CONTROL,
+		       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(AVIVO_VGA_RENDER_CONTROL,
+		       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	if (!ASIC_IS_NODCE(rdev)) {
+		WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+		WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+		WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+static bool r700_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t rom_cntl;
+	uint32_t cg_spll_func_cntl = 0;
+	uint32_t cg_spll_status;
+	bool r;
+
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	if (rdev->family == CHIP_RV730) {
+		cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
+
+		/* enable bypass mode */
+		WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
+						R600_SPLL_BYPASS_EN));
+
+		/* wait for SPLL_CHG_STATUS to change to 1 */
+		cg_spll_status = 0;
+		while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+			cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+
+		WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
+	} else
+		WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	if (rdev->family == CHIP_RV730) {
+		WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
+
+		/* wait for SPLL_CHG_STATUS to change to 1 */
+		cg_spll_status = 0;
+		while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+			cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+	}
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+static bool r600_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t rom_cntl;
+	uint32_t general_pwrmgt;
+	uint32_t low_vid_lower_gpio_cntl;
+	uint32_t medium_vid_lower_gpio_cntl;
+	uint32_t high_vid_lower_gpio_cntl;
+	uint32_t ctxsw_vid_lower_gpio_cntl;
+	uint32_t lower_gpio_enable;
+	bool r;
+
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+	general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
+	low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
+	medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
+	high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
+	ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
+	lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	WREG32(R600_ROM_CNTL,
+	       ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
+		(1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
+		R600_SCK_OVERWRITE));
+
+	WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
+	WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
+	       (low_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
+	       (medium_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
+	       (high_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
+	       (ctxsw_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
+	WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
+	WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
+	WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
+	WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
+	WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
+	return r;
+}
+
+static bool avivo_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t seprom_cntl1;
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t gpiopad_a;
+	uint32_t gpiopad_en;
+	uint32_t gpiopad_mask;
+	bool r;
+
+	seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(RV370_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+	gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
+	gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
+
+	WREG32(RADEON_SEPROM_CNTL1,
+	       ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+		(0xc << RADEON_SCK_PRESCALE_SHIFT)));
+	WREG32(RADEON_GPIOPAD_A, 0);
+	WREG32(RADEON_GPIOPAD_EN, 0);
+	WREG32(RADEON_GPIOPAD_MASK, 0);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+	/* enable the rom */
+	WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(RV370_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(RADEON_GPIOPAD_A, gpiopad_a);
+	WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
+	WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
+	return r;
+}
+
+static bool legacy_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t seprom_cntl1;
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t crtc_gen_cntl;
+	uint32_t crtc2_gen_cntl;
+	uint32_t crtc_ext_cntl;
+	uint32_t fp2_gen_cntl;
+	bool r;
+
+	seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	if (rdev->flags & RADEON_IS_PCIE)
+		bus_cntl = RREG32(RV370_BUS_CNTL);
+	else
+		bus_cntl = RREG32(RADEON_BUS_CNTL);
+	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+	crtc2_gen_cntl = 0;
+	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	fp2_gen_cntl = 0;
+
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	}
+
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	}
+
+	WREG32(RADEON_SEPROM_CNTL1,
+	       ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+		(0xc << RADEON_SCK_PRESCALE_SHIFT)));
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+	/* enable the rom */
+	if (rdev->flags & RADEON_IS_PCIE)
+		WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+	else
+		WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+	/* Turn off mem requests and CRTC for both controllers */
+	WREG32(RADEON_CRTC_GEN_CNTL,
+	       ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
+		(RADEON_CRTC_DISP_REQ_EN_B |
+		 RADEON_CRTC_EXT_DISP_EN)));
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(RADEON_CRTC2_GEN_CNTL,
+		       ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
+			RADEON_CRTC2_DISP_REQ_EN_B));
+	}
+	/* Turn off CRTC */
+	WREG32(RADEON_CRTC_EXT_CNTL,
+	       ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
+		(RADEON_CRTC_SYNC_TRISTAT |
+		 RADEON_CRTC_DISPLAY_DIS)));
+
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
+	}
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	if (rdev->flags & RADEON_IS_PCIE)
+		WREG32(RV370_BUS_CNTL, bus_cntl);
+	else
+		WREG32(RADEON_BUS_CNTL, bus_cntl);
+	WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	}
+	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	}
+	return r;
+}
+
+static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+{
+	if (rdev->flags & RADEON_IS_IGP)
+		return igp_read_bios_from_vram(rdev);
+	else if (rdev->family >= CHIP_BARTS)
+		return ni_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_RV770)
+		return r700_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_R600)
+		return r600_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_RS600)
+		return avivo_read_disabled_bios(rdev);
+	else
+		return legacy_read_disabled_bios(rdev);
+}
+
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	struct acpi_table_header *hdr;
+	acpi_size tbl_size;
+	UEFI_ACPI_VFCT *vfct;
+	unsigned offset;
+
+	if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
+		return false;
+	tbl_size = hdr->length;
+	if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+		return false;
+	}
+
+	vfct = (UEFI_ACPI_VFCT *)hdr;
+	offset = vfct->VBIOSImageOffset;
+
+	while (offset < tbl_size) {
+		GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset);
+		VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader;
+
+		offset += sizeof(VFCT_IMAGE_HEADER);
+		if (offset > tbl_size) {
+			DRM_ERROR("ACPI VFCT image header truncated\n");
+			return false;
+		}
+
+		offset += vhdr->ImageLength;
+		if (offset > tbl_size) {
+			DRM_ERROR("ACPI VFCT image truncated\n");
+			return false;
+		}
+
+		if (vhdr->ImageLength &&
+		    vhdr->PCIBus == rdev->pdev->bus->number &&
+		    vhdr->PCIDevice == PCI_SLOT(rdev->pdev->devfn) &&
+		    vhdr->PCIFunction == PCI_FUNC(rdev->pdev->devfn) &&
+		    vhdr->VendorID == rdev->pdev->vendor &&
+		    vhdr->DeviceID == rdev->pdev->device) {
+			rdev->bios = kmemdup(&vbios->VbiosContent,
+					     vhdr->ImageLength,
+					     GFP_KERNEL);
+
+			if (!rdev->bios)
+				return false;
+			return true;
+		}
+	}
+
+	DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+	return false;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
+
+bool radeon_get_bios(struct radeon_device *rdev)
+{
+	bool r;
+	uint16_t tmp;
+
+	r = radeon_atrm_get_bios(rdev);
+	if (r == false)
+		r = radeon_acpi_vfct_bios(rdev);
+	if (r == false)
+		r = igp_read_bios_from_vram(rdev);
+	if (r == false)
+		r = radeon_read_bios(rdev);
+	if (r == false)
+		r = radeon_read_disabled_bios(rdev);
+	if (r == false)
+		r = radeon_read_platform_bios(rdev);
+	if (r == false || rdev->bios == NULL) {
+		DRM_ERROR("Unable to locate a BIOS ROM\n");
+		rdev->bios = NULL;
+		return false;
+	}
+	if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+		printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
+		goto free_bios;
+	}
+
+	tmp = RBIOS16(0x18);
+	if (RBIOS8(tmp + 0x14) != 0x0) {
+		DRM_INFO("Not an x86 BIOS ROM, not using.\n");
+		goto free_bios;
+	}
+
+	rdev->bios_header_start = RBIOS16(0x48);
+	if (!rdev->bios_header_start) {
+		goto free_bios;
+	}
+	tmp = rdev->bios_header_start + 4;
+	if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
+	    !memcmp(rdev->bios + tmp, "MOTA", 4)) {
+		rdev->is_atom_bios = true;
+	} else {
+		rdev->is_atom_bios = false;
+	}
+
+	DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
+	return true;
+free_bios:
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+	return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
new file mode 100644
index 0000000..e55146c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -0,0 +1,913 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/* 10 khz */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+{
+	struct radeon_pll *spll = &rdev->clock.spll;
+	uint32_t fb_div, ref_div, post_div, sclk;
+
+	fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
+	fb_div <<= 1;
+	fb_div *= spll->reference_freq;
+
+	ref_div =
+	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
+	sclk = fb_div / ref_div;
+
+	post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
+	if (post_div == 2)
+		sclk >>= 1;
+	else if (post_div == 3)
+		sclk >>= 2;
+	else if (post_div == 4)
+		sclk >>= 3;
+
+	return sclk;
+}
+
+/* 10 khz */
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+{
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	uint32_t fb_div, ref_div, post_div, mclk;
+
+	fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
+	fb_div <<= 1;
+	fb_div *= mpll->reference_freq;
+
+	ref_div =
+	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
+	mclk = fb_div / ref_div;
+
+	post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
+	if (post_div == 2)
+		mclk >>= 1;
+	else if (post_div == 3)
+		mclk >>= 2;
+	else if (post_div == 4)
+		mclk >>= 3;
+
+	return mclk;
+}
+
+#ifdef CONFIG_OF
+/*
+ * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
+ * tree. Hopefully, ATI OF driver is kind enough to fill these
+ */
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct device_node *dp = rdev->pdev->dev.of_node;
+	const u32 *val;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+
+	if (dp == NULL)
+		return false;
+	val = of_get_property(dp, "ATY,RefCLK", NULL);
+	if (!val || !*val) {
+		pr_warn("radeonfb: No ATY,RefCLK property !\n");
+		return false;
+	}
+	p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
+	p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+	if (p1pll->reference_div < 2)
+		p1pll->reference_div = 12;
+	p2pll->reference_div = p1pll->reference_div;
+
+	/* These aren't in the device-tree */
+	if (rdev->family >= CHIP_R420) {
+		p1pll->pll_in_min = 100;
+		p1pll->pll_in_max = 1350;
+		p1pll->pll_out_min = 20000;
+		p1pll->pll_out_max = 50000;
+		p2pll->pll_in_min = 100;
+		p2pll->pll_in_max = 1350;
+		p2pll->pll_out_min = 20000;
+		p2pll->pll_out_max = 50000;
+	} else {
+		p1pll->pll_in_min = 40;
+		p1pll->pll_in_max = 500;
+		p1pll->pll_out_min = 12500;
+		p1pll->pll_out_max = 35000;
+		p2pll->pll_in_min = 40;
+		p2pll->pll_in_max = 500;
+		p2pll->pll_out_min = 12500;
+		p2pll->pll_out_max = 35000;
+	}
+	/* not sure what the max should be in all cases */
+	rdev->clock.max_pixel_clock = 35000;
+
+	spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
+	spll->reference_div = mpll->reference_div =
+		RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+			    RADEON_M_SPLL_REF_DIV_MASK;
+
+	val = of_get_property(dp, "ATY,SCLK", NULL);
+	if (val && *val)
+		rdev->clock.default_sclk = (*val) / 10;
+	else
+		rdev->clock.default_sclk =
+			radeon_legacy_get_engine_clock(rdev);
+
+	val = of_get_property(dp, "ATY,MCLK", NULL);
+	if (val && *val)
+		rdev->clock.default_mclk = (*val) / 10;
+	else
+		rdev->clock.default_mclk =
+			radeon_legacy_get_memory_clock(rdev);
+
+	DRM_INFO("Using device-tree clock info\n");
+
+	return true;
+}
+#else
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+	return false;
+}
+#endif /* CONFIG_OF */
+
+void radeon_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *dcpll = &rdev->clock.dcpll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	int ret;
+
+	if (rdev->is_atom_bios)
+		ret = radeon_atom_get_clock_info(dev);
+	else
+		ret = radeon_combios_get_clock_info(dev);
+	if (!ret)
+		ret = radeon_read_clocks_OF(dev);
+
+	if (ret) {
+		if (p1pll->reference_div < 2) {
+			if (!ASIC_IS_AVIVO(rdev)) {
+				u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+				if (ASIC_IS_R300(rdev))
+					p1pll->reference_div =
+						(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+				else
+					p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+				if (p1pll->reference_div < 2)
+					p1pll->reference_div = 12;
+			} else
+				p1pll->reference_div = 12;
+		}
+		if (p2pll->reference_div < 2)
+			p2pll->reference_div = 12;
+		if (rdev->family < CHIP_RS600) {
+			if (spll->reference_div < 2)
+				spll->reference_div =
+					RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+					RADEON_M_SPLL_REF_DIV_MASK;
+		}
+		if (mpll->reference_div < 2)
+			mpll->reference_div = spll->reference_div;
+	} else {
+		if (ASIC_IS_AVIVO(rdev)) {
+			/* TODO FALLBACK */
+		} else {
+			DRM_INFO("Using generic clock info\n");
+
+			/* may need to be per card */
+			rdev->clock.max_pixel_clock = 35000;
+
+			if (rdev->flags & RADEON_IS_IGP) {
+				p1pll->reference_freq = 1432;
+				p2pll->reference_freq = 1432;
+				spll->reference_freq = 1432;
+				mpll->reference_freq = 1432;
+			} else {
+				p1pll->reference_freq = 2700;
+				p2pll->reference_freq = 2700;
+				spll->reference_freq = 2700;
+				mpll->reference_freq = 2700;
+			}
+			p1pll->reference_div =
+			    RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+			if (p1pll->reference_div < 2)
+				p1pll->reference_div = 12;
+			p2pll->reference_div = p1pll->reference_div;
+
+			if (rdev->family >= CHIP_R420) {
+				p1pll->pll_in_min = 100;
+				p1pll->pll_in_max = 1350;
+				p1pll->pll_out_min = 20000;
+				p1pll->pll_out_max = 50000;
+				p2pll->pll_in_min = 100;
+				p2pll->pll_in_max = 1350;
+				p2pll->pll_out_min = 20000;
+				p2pll->pll_out_max = 50000;
+			} else {
+				p1pll->pll_in_min = 40;
+				p1pll->pll_in_max = 500;
+				p1pll->pll_out_min = 12500;
+				p1pll->pll_out_max = 35000;
+				p2pll->pll_in_min = 40;
+				p2pll->pll_in_max = 500;
+				p2pll->pll_out_min = 12500;
+				p2pll->pll_out_max = 35000;
+			}
+
+			spll->reference_div =
+			    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+			    RADEON_M_SPLL_REF_DIV_MASK;
+			mpll->reference_div = spll->reference_div;
+			rdev->clock.default_sclk =
+			    radeon_legacy_get_engine_clock(rdev);
+			rdev->clock.default_mclk =
+			    radeon_legacy_get_memory_clock(rdev);
+		}
+	}
+
+	/* pixel clocks */
+	if (ASIC_IS_AVIVO(rdev)) {
+		p1pll->min_post_div = 2;
+		p1pll->max_post_div = 0x7f;
+		p1pll->min_frac_feedback_div = 0;
+		p1pll->max_frac_feedback_div = 9;
+		p2pll->min_post_div = 2;
+		p2pll->max_post_div = 0x7f;
+		p2pll->min_frac_feedback_div = 0;
+		p2pll->max_frac_feedback_div = 9;
+	} else {
+		p1pll->min_post_div = 1;
+		p1pll->max_post_div = 16;
+		p1pll->min_frac_feedback_div = 0;
+		p1pll->max_frac_feedback_div = 0;
+		p2pll->min_post_div = 1;
+		p2pll->max_post_div = 12;
+		p2pll->min_frac_feedback_div = 0;
+		p2pll->max_frac_feedback_div = 0;
+	}
+
+	/* dcpll is DCE4 only */
+	dcpll->min_post_div = 2;
+	dcpll->max_post_div = 0x7f;
+	dcpll->min_frac_feedback_div = 0;
+	dcpll->max_frac_feedback_div = 9;
+	dcpll->min_ref_div = 2;
+	dcpll->max_ref_div = 0x3ff;
+	dcpll->min_feedback_div = 4;
+	dcpll->max_feedback_div = 0xfff;
+	dcpll->best_vco = 0;
+
+	p1pll->min_ref_div = 2;
+	p1pll->max_ref_div = 0x3ff;
+	p1pll->min_feedback_div = 4;
+	p1pll->max_feedback_div = 0x7ff;
+	p1pll->best_vco = 0;
+
+	p2pll->min_ref_div = 2;
+	p2pll->max_ref_div = 0x3ff;
+	p2pll->min_feedback_div = 4;
+	p2pll->max_feedback_div = 0x7ff;
+	p2pll->best_vco = 0;
+
+	/* system clock */
+	spll->min_post_div = 1;
+	spll->max_post_div = 1;
+	spll->min_ref_div = 2;
+	spll->max_ref_div = 0xff;
+	spll->min_feedback_div = 4;
+	spll->max_feedback_div = 0xff;
+	spll->best_vco = 0;
+
+	/* memory clock */
+	mpll->min_post_div = 1;
+	mpll->max_post_div = 1;
+	mpll->min_ref_div = 2;
+	mpll->max_ref_div = 0xff;
+	mpll->min_feedback_div = 4;
+	mpll->max_feedback_div = 0xff;
+	mpll->best_vco = 0;
+
+	if (!rdev->clock.default_sclk)
+		rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+	if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
+		rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+	rdev->pm.current_sclk = rdev->clock.default_sclk;
+	rdev->pm.current_mclk = rdev->clock.default_mclk;
+
+}
+
+/* 10 khz */
+static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
+				   uint32_t req_clock,
+				   int *fb_div, int *post_div)
+{
+	struct radeon_pll *spll = &rdev->clock.spll;
+	int ref_div = spll->reference_div;
+
+	if (!ref_div)
+		ref_div =
+		    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+		    RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (req_clock < 15000) {
+		*post_div = 8;
+		req_clock *= 8;
+	} else if (req_clock < 30000) {
+		*post_div = 4;
+		req_clock *= 4;
+	} else if (req_clock < 60000) {
+		*post_div = 2;
+		req_clock *= 2;
+	} else
+		*post_div = 1;
+
+	req_clock *= ref_div;
+	req_clock += spll->reference_freq;
+	req_clock /= (2 * spll->reference_freq);
+
+	*fb_div = req_clock & 0xff;
+
+	req_clock = (req_clock & 0xffff) << 1;
+	req_clock *= spll->reference_freq;
+	req_clock /= ref_div;
+	req_clock /= *post_div;
+
+	return req_clock;
+}
+
+/* 10 khz */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
+				    uint32_t eng_clock)
+{
+	uint32_t tmp;
+	int fb_div, post_div;
+
+	/* XXX: wait for idle */
+
+	eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
+
+	tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+	tmp &= ~RADEON_DONT_USE_XTALIN;
+	WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+	tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+	tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+	WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+	udelay(10);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp |= RADEON_SPLL_SLEEP;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(2);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp |= RADEON_SPLL_RESET;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(200);
+
+	tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
+	tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
+	WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
+
+	/* XXX: verify on different asics */
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_PVG_MASK;
+	if ((eng_clock * post_div) >= 90000)
+		tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
+	else
+		tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_SLEEP;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(2);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_RESET;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(200);
+
+	tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+	tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+	switch (post_div) {
+	case 1:
+	default:
+		tmp |= 1;
+		break;
+	case 2:
+		tmp |= 2;
+		break;
+	case 4:
+		tmp |= 3;
+		break;
+	case 8:
+		tmp |= 4;
+		break;
+	}
+	WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+	udelay(20);
+
+	tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+	tmp |= RADEON_DONT_USE_XTALIN;
+	WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+	udelay(10);
+}
+
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+	uint32_t tmp;
+
+	if (enable) {
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			if ((RREG32(RADEON_CONFIG_CNTL) &
+			     RADEON_CFG_ATI_REV_ID_MASK) >
+			    RADEON_CFG_ATI_REV_A13) {
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_RB);
+			}
+			tmp &=
+			    ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
+			      RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
+			      RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
+			      RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
+			      RADEON_SCLK_FORCE_TDM);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+		} else if (ASIC_IS_R300(rdev)) {
+			if ((rdev->family == CHIP_RS400) ||
+			    (rdev->family == CHIP_RS480)) {
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_DISP2 |
+				      RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_HDP |
+				      RADEON_SCLK_FORCE_DISP1 |
+				      RADEON_SCLK_FORCE_TOP |
+				      RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+				      | RADEON_SCLK_FORCE_IDCT |
+				      RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+				      | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+				      | R300_SCLK_FORCE_US |
+				      RADEON_SCLK_FORCE_TV_SCLK |
+				      R300_SCLK_FORCE_SU |
+				      RADEON_SCLK_FORCE_OV0);
+				tmp |= RADEON_DYN_STOP_LAT_MASK;
+				tmp |=
+				    RADEON_SCLK_FORCE_TOP |
+				    RADEON_SCLK_FORCE_VIP;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+				tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+				tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+				tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+					RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+					RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+					R300_DVOCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+					RADEON_PIXCLK_GV_ALWAYS_ONb |
+					R300_PIXCLK_DVO_ALWAYS_ONb |
+					RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+					RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+					R300_PIXCLK_TRANS_ALWAYS_ONb |
+					R300_PIXCLK_TVO_ALWAYS_ONb |
+					R300_P2G2CLK_ALWAYS_ONb |
+					R300_P2G2CLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			} else if (rdev->family >= CHIP_RV350) {
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp &= ~(R300_SCLK_FORCE_TCL |
+					 R300_SCLK_FORCE_GA |
+					 R300_SCLK_FORCE_CBA);
+				tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
+					R300_SCLK_GA_MAX_DYN_STOP_LAT |
+					R300_SCLK_CBA_MAX_DYN_STOP_LAT);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_DISP2 |
+				      RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_HDP |
+				      RADEON_SCLK_FORCE_DISP1 |
+				      RADEON_SCLK_FORCE_TOP |
+				      RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+				      | RADEON_SCLK_FORCE_IDCT |
+				      RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+				      | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+				      | R300_SCLK_FORCE_US |
+				      RADEON_SCLK_FORCE_TV_SCLK |
+				      R300_SCLK_FORCE_SU |
+				      RADEON_SCLK_FORCE_OV0);
+				tmp |= RADEON_DYN_STOP_LAT_MASK;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+				tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+				tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+				tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+					RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+					RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+					R300_DVOCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+					RADEON_PIXCLK_GV_ALWAYS_ONb |
+					R300_PIXCLK_DVO_ALWAYS_ONb |
+					RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+					RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+					R300_PIXCLK_TRANS_ALWAYS_ONb |
+					R300_PIXCLK_TVO_ALWAYS_ONb |
+					R300_P2G2CLK_ALWAYS_ONb |
+					R300_P2G2CLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_MCLK_MISC);
+				tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
+					RADEON_IO_MCLK_DYN_ENABLE);
+				WREG32_PLL(RADEON_MCLK_MISC, tmp);
+
+				tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+				tmp |= (RADEON_FORCEON_MCLKA |
+					RADEON_FORCEON_MCLKB);
+
+				tmp &= ~(RADEON_FORCEON_YCLKA |
+					 RADEON_FORCEON_YCLKB |
+					 RADEON_FORCEON_MC);
+
+				/* Some releases of vbios have set DISABLE_MC_MCLKA
+				   and DISABLE_MC_MCLKB bits in the vbios table.  Setting these
+				   bits will cause H/W hang when reading video memory with dynamic clocking
+				   enabled. */
+				if ((tmp & R300_DISABLE_MC_MCLKA) &&
+				    (tmp & R300_DISABLE_MC_MCLKB)) {
+					/* If both bits are set, then check the active channels */
+					tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+					if (rdev->mc.vram_width == 64) {
+						if (RREG32(RADEON_MEM_CNTL) &
+						    R300_MEM_USE_CD_CH_ONLY)
+							tmp &=
+							    ~R300_DISABLE_MC_MCLKB;
+						else
+							tmp &=
+							    ~R300_DISABLE_MC_MCLKA;
+					} else {
+						tmp &= ~(R300_DISABLE_MC_MCLKA |
+							 R300_DISABLE_MC_MCLKB);
+					}
+				}
+
+				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+			} else {
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &= ~(R300_SCLK_FORCE_VAP);
+				tmp |= RADEON_SCLK_FORCE_CP;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+				mdelay(15);
+
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp &= ~(R300_SCLK_FORCE_TCL |
+					 R300_SCLK_FORCE_GA |
+					 R300_SCLK_FORCE_CBA);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+			}
+		} else {
+			tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+
+			tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
+				 RADEON_DISP_DYN_STOP_LAT_MASK |
+				 RADEON_DYN_STOP_MODE_MASK);
+
+			tmp |= (RADEON_ENGIN_DYNCLK_MODE |
+				(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
+			WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
+			mdelay(15);
+
+			tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+			tmp |= RADEON_SCLK_DYN_START_CNTL;
+			WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+			mdelay(15);
+
+			/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
+			   to lockup randomly, leave them as set by BIOS.
+			 */
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			/*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
+			tmp &= ~RADEON_SCLK_FORCEON_MASK;
+
+			/*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
+			if (((rdev->family == CHIP_RV250) &&
+			     ((RREG32(RADEON_CONFIG_CNTL) &
+			       RADEON_CFG_ATI_REV_ID_MASK) <
+			      RADEON_CFG_ATI_REV_A13))
+			    || ((rdev->family == CHIP_RV100)
+				&&
+				((RREG32(RADEON_CONFIG_CNTL) &
+				  RADEON_CFG_ATI_REV_ID_MASK) <=
+				 RADEON_CFG_ATI_REV_A13))) {
+				tmp |= RADEON_SCLK_FORCE_CP;
+				tmp |= RADEON_SCLK_FORCE_VIP;
+			}
+
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			if ((rdev->family == CHIP_RV200) ||
+			    (rdev->family == CHIP_RV250) ||
+			    (rdev->family == CHIP_RV280)) {
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+
+				/* RV200::A11 A12 RV250::A11 A12 */
+				if (((rdev->family == CHIP_RV200) ||
+				     (rdev->family == CHIP_RV250)) &&
+				    ((RREG32(RADEON_CONFIG_CNTL) &
+				      RADEON_CFG_ATI_REV_ID_MASK) <
+				     RADEON_CFG_ATI_REV_A13)) {
+					tmp |= RADEON_SCLK_MORE_FORCEON;
+				}
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+				mdelay(15);
+			}
+
+			/* RV200::A11 A12, RV250::A11 A12 */
+			if (((rdev->family == CHIP_RV200) ||
+			     (rdev->family == CHIP_RV250)) &&
+			    ((RREG32(RADEON_CONFIG_CNTL) &
+			      RADEON_CFG_ATI_REV_ID_MASK) <
+			     RADEON_CFG_ATI_REV_A13)) {
+				tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+				tmp |= RADEON_TCL_BYPASS_DISABLE;
+				WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+			}
+			mdelay(15);
+
+			/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+				RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				RADEON_PIXCLK_GV_ALWAYS_ONb |
+				RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+				RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			mdelay(15);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+				RADEON_PIXCLK_DAC_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+			mdelay(15);
+		}
+	} else {
+		/* Turn everything OFF (ForceON to everything) */
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
+				RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
+				| RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
+				RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
+				RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
+				RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
+				RADEON_SCLK_FORCE_RB);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+		} else if ((rdev->family == CHIP_RS400) ||
+			   (rdev->family == CHIP_RS480)) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+				RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+				| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+				R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+				RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+				R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+				R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+				R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+			tmp |= RADEON_SCLK_MORE_FORCEON;
+			WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+				 R300_DVOCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 R300_PIXCLK_DVO_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+				 R300_PIXCLK_TRANS_ALWAYS_ONb |
+				 R300_PIXCLK_TVO_ALWAYS_ONb |
+				 R300_P2G2CLK_ALWAYS_ONb |
+				 R300_P2G2CLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+		} else if (rdev->family >= CHIP_RV350) {
+			/* for RV350/M10, no delays are required. */
+			tmp = RREG32_PLL(R300_SCLK_CNTL2);
+			tmp |= (R300_SCLK_FORCE_TCL |
+				R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
+			WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+				RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+				| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+				R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+				RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+				R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+				R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+				R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+			tmp |= RADEON_SCLK_MORE_FORCEON;
+			WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+			tmp |= (RADEON_FORCEON_MCLKA |
+				RADEON_FORCEON_MCLKB |
+				RADEON_FORCEON_YCLKA |
+				RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
+			WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+				 R300_DVOCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 R300_PIXCLK_DVO_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+				 R300_PIXCLK_TRANS_ALWAYS_ONb |
+				 R300_PIXCLK_TVO_ALWAYS_ONb |
+				 R300_P2G2CLK_ALWAYS_ONb |
+				 R300_P2G2CLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+		} else {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
+			tmp |= RADEON_SCLK_FORCE_SE;
+
+			if (rdev->flags & RADEON_SINGLE_CRTC) {
+				tmp |= (RADEON_SCLK_FORCE_RB |
+					RADEON_SCLK_FORCE_TDM |
+					RADEON_SCLK_FORCE_TAM |
+					RADEON_SCLK_FORCE_PB |
+					RADEON_SCLK_FORCE_RE |
+					RADEON_SCLK_FORCE_VIP |
+					RADEON_SCLK_FORCE_IDCT |
+					RADEON_SCLK_FORCE_TOP |
+					RADEON_SCLK_FORCE_DISP1 |
+					RADEON_SCLK_FORCE_DISP2 |
+					RADEON_SCLK_FORCE_HDP);
+			} else if ((rdev->family == CHIP_R300) ||
+				   (rdev->family == CHIP_R350)) {
+				tmp |= (RADEON_SCLK_FORCE_HDP |
+					RADEON_SCLK_FORCE_DISP1 |
+					RADEON_SCLK_FORCE_DISP2 |
+					RADEON_SCLK_FORCE_TOP |
+					RADEON_SCLK_FORCE_IDCT |
+					RADEON_SCLK_FORCE_VIP);
+			}
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			mdelay(16);
+
+			if ((rdev->family == CHIP_R300) ||
+			    (rdev->family == CHIP_R350)) {
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp |= (R300_SCLK_FORCE_TCL |
+					R300_SCLK_FORCE_GA |
+					R300_SCLK_FORCE_CBA);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+				mdelay(16);
+			}
+
+			if (rdev->flags & RADEON_IS_IGP) {
+				tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+				tmp &= ~(RADEON_FORCEON_MCLKA |
+					 RADEON_FORCEON_YCLKA);
+				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+				mdelay(16);
+			}
+
+			if ((rdev->family == CHIP_RV200) ||
+			    (rdev->family == CHIP_RV250) ||
+			    (rdev->family == CHIP_RV280)) {
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp |= RADEON_SCLK_MORE_FORCEON;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+				mdelay(16);
+			}
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			mdelay(16);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+		}
+	}
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
new file mode 100644
index 0000000..60a61d3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -0,0 +1,3625 @@
+/*
+ * Copyright 2004 ATI Technologies Inc., Markham, Ontario
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef CONFIG_PPC_PMAC
+/* not sure which of these are needed */
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/prom.h>
+#endif /* CONFIG_PPC_PMAC */
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			  uint32_t supported_device);
+
+/* old legacy ATI BIOS routines */
+
+/* COMBIOS table offsets */
+enum radeon_combios_table_offset {
+	/* absolute offset tables */
+	COMBIOS_ASIC_INIT_1_TABLE,
+	COMBIOS_BIOS_SUPPORT_TABLE,
+	COMBIOS_DAC_PROGRAMMING_TABLE,
+	COMBIOS_MAX_COLOR_DEPTH_TABLE,
+	COMBIOS_CRTC_INFO_TABLE,
+	COMBIOS_PLL_INFO_TABLE,
+	COMBIOS_TV_INFO_TABLE,
+	COMBIOS_DFP_INFO_TABLE,
+	COMBIOS_HW_CONFIG_INFO_TABLE,
+	COMBIOS_MULTIMEDIA_INFO_TABLE,
+	COMBIOS_TV_STD_PATCH_TABLE,
+	COMBIOS_LCD_INFO_TABLE,
+	COMBIOS_MOBILE_INFO_TABLE,
+	COMBIOS_PLL_INIT_TABLE,
+	COMBIOS_MEM_CONFIG_TABLE,
+	COMBIOS_SAVE_MASK_TABLE,
+	COMBIOS_HARDCODED_EDID_TABLE,
+	COMBIOS_ASIC_INIT_2_TABLE,
+	COMBIOS_CONNECTOR_INFO_TABLE,
+	COMBIOS_DYN_CLK_1_TABLE,
+	COMBIOS_RESERVED_MEM_TABLE,
+	COMBIOS_EXT_TMDS_INFO_TABLE,
+	COMBIOS_MEM_CLK_INFO_TABLE,
+	COMBIOS_EXT_DAC_INFO_TABLE,
+	COMBIOS_MISC_INFO_TABLE,
+	COMBIOS_CRT_INFO_TABLE,
+	COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
+	COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
+	COMBIOS_FAN_SPEED_INFO_TABLE,
+	COMBIOS_OVERDRIVE_INFO_TABLE,
+	COMBIOS_OEM_INFO_TABLE,
+	COMBIOS_DYN_CLK_2_TABLE,
+	COMBIOS_POWER_CONNECTOR_INFO_TABLE,
+	COMBIOS_I2C_INFO_TABLE,
+	/* relative offset tables */
+	COMBIOS_ASIC_INIT_3_TABLE,	/* offset from misc info */
+	COMBIOS_ASIC_INIT_4_TABLE,	/* offset from misc info */
+	COMBIOS_DETECTED_MEM_TABLE,	/* offset from misc info */
+	COMBIOS_ASIC_INIT_5_TABLE,	/* offset from misc info */
+	COMBIOS_RAM_RESET_TABLE,	/* offset from mem config */
+	COMBIOS_POWERPLAY_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_GPIO_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_LCD_DDC_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_TMDS_POWER_TABLE,	/* offset from mobile info */
+	COMBIOS_TMDS_POWER_ON_TABLE,	/* offset from tmds power */
+	COMBIOS_TMDS_POWER_OFF_TABLE,	/* offset from tmds power */
+};
+
+enum radeon_combios_ddc {
+	DDC_NONE_DETECTED,
+	DDC_MONID,
+	DDC_DVI,
+	DDC_VGA,
+	DDC_CRT2,
+	DDC_LCD,
+	DDC_GPIO,
+};
+
+enum radeon_combios_connector {
+	CONNECTOR_NONE_LEGACY,
+	CONNECTOR_PROPRIETARY_LEGACY,
+	CONNECTOR_CRT_LEGACY,
+	CONNECTOR_DVI_I_LEGACY,
+	CONNECTOR_DVI_D_LEGACY,
+	CONNECTOR_CTV_LEGACY,
+	CONNECTOR_STV_LEGACY,
+	CONNECTOR_UNSUPPORTED_LEGACY
+};
+
+static const int legacy_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Unknown,
+};
+
+static uint16_t combios_get_table_offset(struct drm_device *dev,
+					 enum radeon_combios_table_offset table)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int rev, size;
+	uint16_t offset = 0, check_offset;
+
+	if (!rdev->bios)
+		return 0;
+
+	switch (table) {
+		/* absolute offset tables */
+	case COMBIOS_ASIC_INIT_1_TABLE:
+		check_offset = 0xc;
+		break;
+	case COMBIOS_BIOS_SUPPORT_TABLE:
+		check_offset = 0x14;
+		break;
+	case COMBIOS_DAC_PROGRAMMING_TABLE:
+		check_offset = 0x2a;
+		break;
+	case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+		check_offset = 0x2c;
+		break;
+	case COMBIOS_CRTC_INFO_TABLE:
+		check_offset = 0x2e;
+		break;
+	case COMBIOS_PLL_INFO_TABLE:
+		check_offset = 0x30;
+		break;
+	case COMBIOS_TV_INFO_TABLE:
+		check_offset = 0x32;
+		break;
+	case COMBIOS_DFP_INFO_TABLE:
+		check_offset = 0x34;
+		break;
+	case COMBIOS_HW_CONFIG_INFO_TABLE:
+		check_offset = 0x36;
+		break;
+	case COMBIOS_MULTIMEDIA_INFO_TABLE:
+		check_offset = 0x38;
+		break;
+	case COMBIOS_TV_STD_PATCH_TABLE:
+		check_offset = 0x3e;
+		break;
+	case COMBIOS_LCD_INFO_TABLE:
+		check_offset = 0x40;
+		break;
+	case COMBIOS_MOBILE_INFO_TABLE:
+		check_offset = 0x42;
+		break;
+	case COMBIOS_PLL_INIT_TABLE:
+		check_offset = 0x46;
+		break;
+	case COMBIOS_MEM_CONFIG_TABLE:
+		check_offset = 0x48;
+		break;
+	case COMBIOS_SAVE_MASK_TABLE:
+		check_offset = 0x4a;
+		break;
+	case COMBIOS_HARDCODED_EDID_TABLE:
+		check_offset = 0x4c;
+		break;
+	case COMBIOS_ASIC_INIT_2_TABLE:
+		check_offset = 0x4e;
+		break;
+	case COMBIOS_CONNECTOR_INFO_TABLE:
+		check_offset = 0x50;
+		break;
+	case COMBIOS_DYN_CLK_1_TABLE:
+		check_offset = 0x52;
+		break;
+	case COMBIOS_RESERVED_MEM_TABLE:
+		check_offset = 0x54;
+		break;
+	case COMBIOS_EXT_TMDS_INFO_TABLE:
+		check_offset = 0x58;
+		break;
+	case COMBIOS_MEM_CLK_INFO_TABLE:
+		check_offset = 0x5a;
+		break;
+	case COMBIOS_EXT_DAC_INFO_TABLE:
+		check_offset = 0x5c;
+		break;
+	case COMBIOS_MISC_INFO_TABLE:
+		check_offset = 0x5e;
+		break;
+	case COMBIOS_CRT_INFO_TABLE:
+		check_offset = 0x60;
+		break;
+	case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+		check_offset = 0x62;
+		break;
+	case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+		check_offset = 0x64;
+		break;
+	case COMBIOS_FAN_SPEED_INFO_TABLE:
+		check_offset = 0x66;
+		break;
+	case COMBIOS_OVERDRIVE_INFO_TABLE:
+		check_offset = 0x68;
+		break;
+	case COMBIOS_OEM_INFO_TABLE:
+		check_offset = 0x6a;
+		break;
+	case COMBIOS_DYN_CLK_2_TABLE:
+		check_offset = 0x6c;
+		break;
+	case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+		check_offset = 0x6e;
+		break;
+	case COMBIOS_I2C_INFO_TABLE:
+		check_offset = 0x70;
+		break;
+		/* relative offset tables */
+	case COMBIOS_ASIC_INIT_3_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x3);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_ASIC_INIT_4_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x5);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_DETECTED_MEM_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x7);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_ASIC_INIT_5_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev == 2) {
+				check_offset = RBIOS16(check_offset + 0x9);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_RAM_RESET_TABLE:	/* offset from mem config */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+		if (check_offset) {
+			while (RBIOS8(check_offset++));
+			check_offset += 2;
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_POWERPLAY_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x11);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_GPIO_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x13);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_LCD_DDC_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x15);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x17);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_ON_TABLE:	/* offset from tmds power */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x2);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_OFF_TABLE:	/* offset from tmds power */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x4);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	default:
+		check_offset = 0;
+		break;
+	}
+
+	size = RBIOS8(rdev->bios_header_start + 0x6);
+	/* check absolute offset tables */
+	if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+		offset = RBIOS16(rdev->bios_header_start + check_offset);
+
+	return offset;
+}
+
+bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+{
+	int edid_info, size;
+	struct edid *edid;
+	unsigned char *raw;
+	edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+	if (!edid_info)
+		return false;
+
+	raw = rdev->bios + edid_info;
+	size = EDID_LENGTH * (raw[0x7e] + 1);
+	edid = kmalloc(size, GFP_KERNEL);
+	if (edid == NULL)
+		return false;
+
+	memcpy((unsigned char *)edid, raw, size);
+
+	if (!drm_edid_is_valid(edid)) {
+		kfree(edid);
+		return false;
+	}
+
+	rdev->mode_info.bios_hardcoded_edid = edid;
+	rdev->mode_info.bios_hardcoded_edid_size = size;
+	return true;
+}
+
+/* this is used for atom LCDs as well */
+struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
+{
+	struct edid *edid;
+
+	if (rdev->mode_info.bios_hardcoded_edid) {
+		edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
+		if (edid) {
+			memcpy((unsigned char *)edid,
+			       (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
+			       rdev->mode_info.bios_hardcoded_edid_size);
+			return edid;
+		}
+	}
+	return NULL;
+}
+
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+						       enum radeon_combios_ddc ddc,
+						       u32 clk_mask,
+						       u32 data_mask)
+{
+	struct radeon_i2c_bus_rec i2c;
+	int ddc_line = 0;
+
+	/* ddc id            = mask reg
+	 * DDC_NONE_DETECTED = none
+	 * DDC_DVI           = RADEON_GPIO_DVI_DDC
+	 * DDC_VGA           = RADEON_GPIO_VGA_DDC
+	 * DDC_LCD           = RADEON_GPIOPAD_MASK
+	 * DDC_GPIO          = RADEON_MDGPIO_MASK
+	 * r1xx
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_CRT2_DDC
+	 * r200
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_DVI_DDC
+	 * r300/r350
+	 * DDC_MONID         = RADEON_GPIO_DVI_DDC
+	 * DDC_CRT2          = RADEON_GPIO_DVI_DDC
+	 * rv2xx/rv3xx
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_MONID
+	 * rs3xx/rs4xx
+	 * DDC_MONID         = RADEON_GPIOPAD_MASK
+	 * DDC_CRT2          = RADEON_GPIO_MONID
+	 */
+	switch (ddc) {
+	case DDC_NONE_DETECTED:
+	default:
+		ddc_line = 0;
+		break;
+	case DDC_DVI:
+		ddc_line = RADEON_GPIO_DVI_DDC;
+		break;
+	case DDC_VGA:
+		ddc_line = RADEON_GPIO_VGA_DDC;
+		break;
+	case DDC_LCD:
+		ddc_line = RADEON_GPIOPAD_MASK;
+		break;
+	case DDC_GPIO:
+		ddc_line = RADEON_MDGPIO_MASK;
+		break;
+	case DDC_MONID:
+		if (rdev->family == CHIP_RS300 ||
+		    rdev->family == CHIP_RS400 ||
+		    rdev->family == CHIP_RS480)
+			ddc_line = RADEON_GPIOPAD_MASK;
+		else if (rdev->family == CHIP_R300 ||
+			 rdev->family == CHIP_R350) {
+			ddc_line = RADEON_GPIO_DVI_DDC;
+			ddc = DDC_DVI;
+		} else
+			ddc_line = RADEON_GPIO_MONID;
+		break;
+	case DDC_CRT2:
+		if (rdev->family == CHIP_R200 ||
+		    rdev->family == CHIP_R300 ||
+		    rdev->family == CHIP_R350) {
+			ddc_line = RADEON_GPIO_DVI_DDC;
+			ddc = DDC_DVI;
+		} else if (rdev->family == CHIP_RS300 ||
+			   rdev->family == CHIP_RS400 ||
+			   rdev->family == CHIP_RS480)
+			ddc_line = RADEON_GPIO_MONID;
+		else if (rdev->family >= CHIP_RV350) {
+			ddc_line = RADEON_GPIO_MONID;
+			ddc = DDC_MONID;
+		} else
+			ddc_line = RADEON_GPIO_CRT2_DDC;
+		break;
+	}
+
+	if (ddc_line == RADEON_GPIOPAD_MASK) {
+		i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+		i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+		i2c.a_clk_reg = RADEON_GPIOPAD_A;
+		i2c.a_data_reg = RADEON_GPIOPAD_A;
+		i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+		i2c.en_data_reg = RADEON_GPIOPAD_EN;
+		i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+		i2c.y_data_reg = RADEON_GPIOPAD_Y;
+	} else if (ddc_line == RADEON_MDGPIO_MASK) {
+		i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+		i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+		i2c.a_clk_reg = RADEON_MDGPIO_A;
+		i2c.a_data_reg = RADEON_MDGPIO_A;
+		i2c.en_clk_reg = RADEON_MDGPIO_EN;
+		i2c.en_data_reg = RADEON_MDGPIO_EN;
+		i2c.y_clk_reg = RADEON_MDGPIO_Y;
+		i2c.y_data_reg = RADEON_MDGPIO_Y;
+	} else {
+		i2c.mask_clk_reg = ddc_line;
+		i2c.mask_data_reg = ddc_line;
+		i2c.a_clk_reg = ddc_line;
+		i2c.a_data_reg = ddc_line;
+		i2c.en_clk_reg = ddc_line;
+		i2c.en_data_reg = ddc_line;
+		i2c.y_clk_reg = ddc_line;
+		i2c.y_data_reg = ddc_line;
+	}
+
+	if (clk_mask && data_mask) {
+		/* system specific masks */
+		i2c.mask_clk_mask = clk_mask;
+		i2c.mask_data_mask = data_mask;
+		i2c.a_clk_mask = clk_mask;
+		i2c.a_data_mask = data_mask;
+		i2c.en_clk_mask = clk_mask;
+		i2c.en_data_mask = data_mask;
+		i2c.y_clk_mask = clk_mask;
+		i2c.y_data_mask = data_mask;
+	} else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
+		   (ddc_line == RADEON_MDGPIO_MASK)) {
+		/* default gpiopad masks */
+		i2c.mask_clk_mask = (0x20 << 8);
+		i2c.mask_data_mask = 0x80;
+		i2c.a_clk_mask = (0x20 << 8);
+		i2c.a_data_mask = 0x80;
+		i2c.en_clk_mask = (0x20 << 8);
+		i2c.en_data_mask = 0x80;
+		i2c.y_clk_mask = (0x20 << 8);
+		i2c.y_data_mask = 0x80;
+	} else {
+		/* default masks for ddc pads */
+		i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
+		i2c.mask_data_mask = RADEON_GPIO_MASK_0;
+		i2c.a_clk_mask = RADEON_GPIO_A_1;
+		i2c.a_data_mask = RADEON_GPIO_A_0;
+		i2c.en_clk_mask = RADEON_GPIO_EN_1;
+		i2c.en_data_mask = RADEON_GPIO_EN_0;
+		i2c.y_clk_mask = RADEON_GPIO_Y_1;
+		i2c.y_data_mask = RADEON_GPIO_Y_0;
+	}
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_RS300:
+		switch (ddc_line) {
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_R200:
+		switch (ddc_line) {
+		case RADEON_GPIO_DVI_DDC:
+		case RADEON_GPIO_MONID:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_RV250:
+	case CHIP_RV280:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+		case RADEON_GPIO_CRT2_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_R300:
+	case CHIP_R350:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_RV350:
+	case CHIP_RV380:
+	case CHIP_RS400:
+	case CHIP_RS480:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		case RADEON_GPIO_MONID:
+			/* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+			 * reliably on some pre-r4xx hardware; not sure why.
+			 */
+			i2c.hw_capable = false;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	default:
+		i2c.hw_capable = false;
+		break;
+	}
+	i2c.mm_i2c = false;
+
+	i2c.i2c_id = ddc;
+	i2c.hpd = RADEON_HPD_NONE;
+
+	if (ddc_line)
+		i2c.valid = true;
+	else
+		i2c.valid = false;
+
+	return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+	u16 offset;
+	u8 id, blocks, clk, data;
+	int i;
+
+	i2c.valid = false;
+
+	offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+	if (offset) {
+		blocks = RBIOS8(offset + 2);
+		for (i = 0; i < blocks; i++) {
+			id = RBIOS8(offset + 3 + (i * 5) + 0);
+			if (id == 136) {
+				clk = RBIOS8(offset + 3 + (i * 5) + 3);
+				data = RBIOS8(offset + 3 + (i * 5) + 4);
+				/* gpiopad */
+				i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+							    (1 << clk), (1 << data));
+				break;
+			}
+		}
+	}
+	return i2c;
+}
+
+void radeon_combios_i2c_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+
+	/* actual hw pads
+	 * r1xx/rs2xx/rs3xx
+	 * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm
+	 * r200
+	 * 0x60, 0x64, 0x68, mm
+	 * r300/r350
+	 * 0x60, 0x64, mm
+	 * rv2xx/rv3xx/rs4xx
+	 * 0x60, 0x64, 0x68, gpiopads, mm
+	 */
+
+	/* 0x60 */
+	i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+	rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
+	/* 0x64 */
+	i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+	rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
+
+	/* mm i2c */
+	i2c.valid = true;
+	i2c.hw_capable = true;
+	i2c.mm_i2c = true;
+	i2c.i2c_id = 0xa0;
+	rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350) {
+		/* only 2 sw i2c pads */
+	} else if (rdev->family == CHIP_RS300 ||
+		   rdev->family == CHIP_RS400 ||
+		   rdev->family == CHIP_RS480) {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+		/* gpiopad */
+		i2c = radeon_combios_get_i2c_info_from_table(rdev);
+		if (i2c.valid)
+			rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
+	} else if ((rdev->family == CHIP_R200) ||
+		   (rdev->family >= CHIP_R300)) {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+	} else {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+		/* 0x6c */
+		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
+	}
+}
+
+bool radeon_combios_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t pll_info;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	int8_t rev;
+	uint16_t sclk, mclk;
+
+	pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
+	if (pll_info) {
+		rev = RBIOS8(pll_info);
+
+		/* pixel clocks */
+		p1pll->reference_freq = RBIOS16(pll_info + 0xe);
+		p1pll->reference_div = RBIOS16(pll_info + 0x10);
+		p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
+		p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+		p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+		p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+
+		if (rev > 9) {
+			p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
+			p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
+		} else {
+			p1pll->pll_in_min = 40;
+			p1pll->pll_in_max = 500;
+		}
+		*p2pll = *p1pll;
+
+		/* system clock */
+		spll->reference_freq = RBIOS16(pll_info + 0x1a);
+		spll->reference_div = RBIOS16(pll_info + 0x1c);
+		spll->pll_out_min = RBIOS32(pll_info + 0x1e);
+		spll->pll_out_max = RBIOS32(pll_info + 0x22);
+
+		if (rev > 10) {
+			spll->pll_in_min = RBIOS32(pll_info + 0x48);
+			spll->pll_in_max = RBIOS32(pll_info + 0x4c);
+		} else {
+			/* ??? */
+			spll->pll_in_min = 40;
+			spll->pll_in_max = 500;
+		}
+
+		/* memory clock */
+		mpll->reference_freq = RBIOS16(pll_info + 0x26);
+		mpll->reference_div = RBIOS16(pll_info + 0x28);
+		mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
+		mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
+
+		if (rev > 10) {
+			mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
+			mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
+		} else {
+			/* ??? */
+			mpll->pll_in_min = 40;
+			mpll->pll_in_max = 500;
+		}
+
+		/* default sclk/mclk */
+		sclk = RBIOS16(pll_info + 0xa);
+		mclk = RBIOS16(pll_info + 0x8);
+		if (sclk == 0)
+			sclk = 200 * 100;
+		if (mclk == 0)
+			mclk = 200 * 100;
+
+		rdev->clock.default_sclk = sclk;
+		rdev->clock.default_mclk = mclk;
+
+		if (RBIOS32(pll_info + 0x16))
+			rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
+		else
+			rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
+
+		return true;
+	}
+	return false;
+}
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	u16 igp_info;
+
+	/* sideport is AMD only */
+	if (rdev->family == CHIP_RS400)
+		return false;
+
+	igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+	if (igp_info) {
+		if (RBIOS16(igp_info + 0x4))
+			return true;
+	}
+	return false;
+}
+
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+	0x00000808,		/* r100  */
+	0x00000808,		/* rv100 */
+	0x00000808,		/* rs100 */
+	0x00000808,		/* rv200 */
+	0x00000808,		/* rs200 */
+	0x00000808,		/* r200  */
+	0x00000808,		/* rv250 */
+	0x00000000,		/* rs300 */
+	0x00000808,		/* rv280 */
+	0x00000808,		/* r300  */
+	0x00000808,		/* r350  */
+	0x00000808,		/* rv350 */
+	0x00000808,		/* rv380 */
+	0x00000808,		/* r420  */
+	0x00000808,		/* r423  */
+	0x00000808,		/* rv410 */
+	0x00000000,		/* rs400 */
+	0x00000000,		/* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+							  struct radeon_encoder_primary_dac *p_dac)
+{
+	p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+	return;
+}
+
+struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+								       radeon_encoder
+								       *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t dac_info;
+	uint8_t rev, bg, dac;
+	struct radeon_encoder_primary_dac *p_dac = NULL;
+	int found = 0;
+
+	p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
+			GFP_KERNEL);
+
+	if (!p_dac)
+		return NULL;
+
+	/* check CRT table */
+	dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+	if (dac_info) {
+		rev = RBIOS8(dac_info) & 0x3;
+		if (rev < 2) {
+			bg = RBIOS8(dac_info + 0x2) & 0xf;
+			dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
+			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+		} else {
+			bg = RBIOS8(dac_info + 0x2) & 0xf;
+			dac = RBIOS8(dac_info + 0x3) & 0xf;
+			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+		}
+		/* if the values are zeros, use the table */
+		if ((dac == 0) || (bg == 0))
+			found = 0;
+		else
+			found = 1;
+	}
+
+	/* quirks */
+	/* Radeon 7000 (RV100) */
+	if (((dev->pdev->device == 0x5159) &&
+	    (dev->pdev->subsystem_vendor == 0x174B) &&
+	    (dev->pdev->subsystem_device == 0x7c28)) ||
+	/* Radeon 9100 (R200) */
+	   ((dev->pdev->device == 0x514D) &&
+	    (dev->pdev->subsystem_vendor == 0x174B) &&
+	    (dev->pdev->subsystem_device == 0x7149))) {
+		/* vbios value is bad, use the default */
+		found = 0;
+	}
+
+	if (!found) /* fallback to defaults */
+		radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
+	return p_dac;
+}
+
+enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	uint16_t tv_info;
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+	tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+	if (tv_info) {
+		if (RBIOS8(tv_info + 6) == 'T') {
+			switch (RBIOS8(tv_info + 7) & 0xf) {
+			case 1:
+				tv_std = TV_STD_NTSC;
+				DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+				break;
+			case 2:
+				tv_std = TV_STD_PAL;
+				DRM_DEBUG_KMS("Default TV standard: PAL\n");
+				break;
+			case 3:
+				tv_std = TV_STD_PAL_M;
+				DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+				break;
+			case 4:
+				tv_std = TV_STD_PAL_60;
+				DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+				break;
+			case 5:
+				tv_std = TV_STD_NTSC_J;
+				DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+				break;
+			case 6:
+				tv_std = TV_STD_SCART_PAL;
+				DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
+				break;
+			default:
+				tv_std = TV_STD_NTSC;
+				DRM_DEBUG_KMS
+				    ("Unknown TV standard; defaulting to NTSC\n");
+				break;
+			}
+
+			switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
+			case 0:
+				DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
+				break;
+			case 1:
+				DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
+				break;
+			case 2:
+				DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
+				break;
+			case 3:
+				DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	return tv_std;
+}
+
+static const uint32_t default_tvdac_adj[CHIP_LAST] = {
+	0x00000000,		/* r100  */
+	0x00280000,		/* rv100 */
+	0x00000000,		/* rs100 */
+	0x00880000,		/* rv200 */
+	0x00000000,		/* rs200 */
+	0x00000000,		/* r200  */
+	0x00770000,		/* rv250 */
+	0x00290000,		/* rs300 */
+	0x00560000,		/* rv280 */
+	0x00780000,		/* r300  */
+	0x00770000,		/* r350  */
+	0x00780000,		/* rv350 */
+	0x00780000,		/* rv380 */
+	0x01080000,		/* r420  */
+	0x01080000,		/* r423  */
+	0x01080000,		/* rv410 */
+	0x00780000,		/* rs400 */
+	0x00780000,		/* rs480 */
+};
+
+static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev,
+						     struct radeon_encoder_tv_dac *tv_dac)
+{
+	tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
+	if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
+		tv_dac->ps2_tvdac_adj = 0x00880000;
+	tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+	tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+	return;
+}
+
+struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+							     radeon_encoder
+							     *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t dac_info;
+	uint8_t rev, bg, dac;
+	struct radeon_encoder_tv_dac *tv_dac = NULL;
+	int found = 0;
+
+	tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+	if (!tv_dac)
+		return NULL;
+
+	/* first check TV table */
+	dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+	if (dac_info) {
+		rev = RBIOS8(dac_info + 0x3);
+		if (rev > 4) {
+			bg = RBIOS8(dac_info + 0xc) & 0xf;
+			dac = RBIOS8(dac_info + 0xd) & 0xf;
+			tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xe) & 0xf;
+			dac = RBIOS8(dac_info + 0xf) & 0xf;
+			tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0x10) & 0xf;
+			dac = RBIOS8(dac_info + 0x11) & 0xf;
+			tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+			/* if the values are all zeros, use the table */
+			if (tv_dac->ps2_tvdac_adj)
+				found = 1;
+		} else if (rev > 1) {
+			bg = RBIOS8(dac_info + 0xc) & 0xf;
+			dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
+			tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xd) & 0xf;
+			dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
+			tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xe) & 0xf;
+			dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
+			tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+			/* if the values are all zeros, use the table */
+			if (tv_dac->ps2_tvdac_adj)
+				found = 1;
+		}
+		tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
+	}
+	if (!found) {
+		/* then check CRT table */
+		dac_info =
+		    combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+		if (dac_info) {
+			rev = RBIOS8(dac_info) & 0x3;
+			if (rev < 2) {
+				bg = RBIOS8(dac_info + 0x3) & 0xf;
+				dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
+				tv_dac->ps2_tvdac_adj =
+				    (bg << 16) | (dac << 20);
+				tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				/* if the values are all zeros, use the table */
+				if (tv_dac->ps2_tvdac_adj)
+					found = 1;
+			} else {
+				bg = RBIOS8(dac_info + 0x4) & 0xf;
+				dac = RBIOS8(dac_info + 0x5) & 0xf;
+				tv_dac->ps2_tvdac_adj =
+				    (bg << 16) | (dac << 20);
+				tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				/* if the values are all zeros, use the table */
+				if (tv_dac->ps2_tvdac_adj)
+					found = 1;
+			}
+		} else {
+			DRM_INFO("No TV DAC info found in BIOS\n");
+		}
+	}
+
+	if (!found) /* fallback to defaults */
+		radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
+
+	return tv_dac;
+}
+
+static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
+									 radeon_device
+									 *rdev)
+{
+	struct radeon_encoder_lvds *lvds = NULL;
+	uint32_t fp_vert_stretch, fp_horz_stretch;
+	uint32_t ppll_div_sel, ppll_val;
+	uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+
+	lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+	if (!lvds)
+		return NULL;
+
+	fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
+	fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
+
+	/* These should be fail-safe defaults, fingers crossed */
+	lvds->panel_pwr_delay = 200;
+	lvds->panel_vcc_delay = 2000;
+
+	lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf;
+	lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
+
+	if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
+		lvds->native_mode.vdisplay =
+		    ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
+		     RADEON_VERT_PANEL_SHIFT) + 1;
+	else
+		lvds->native_mode.vdisplay =
+		    (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
+
+	if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
+		lvds->native_mode.hdisplay =
+		    (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
+		      RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
+	else
+		lvds->native_mode.hdisplay =
+		    ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
+
+	if ((lvds->native_mode.hdisplay < 640) ||
+	    (lvds->native_mode.vdisplay < 480)) {
+		lvds->native_mode.hdisplay = 640;
+		lvds->native_mode.vdisplay = 480;
+	}
+
+	ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
+	ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
+	if ((ppll_val & 0x000707ff) == 0x1bb)
+		lvds->use_bios_dividers = false;
+	else {
+		lvds->panel_ref_divider =
+		    RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+		lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
+		lvds->panel_fb_divider = ppll_val & 0x7ff;
+
+		if ((lvds->panel_ref_divider != 0) &&
+		    (lvds->panel_fb_divider > 3))
+			lvds->use_bios_dividers = true;
+	}
+	lvds->panel_vcc_delay = 200;
+
+	DRM_INFO("Panel info derived from registers\n");
+	DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+		 lvds->native_mode.vdisplay);
+
+	return lvds;
+}
+
+struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+							 *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t lcd_info;
+	uint32_t panel_setup;
+	char stmp[30];
+	int tmp, i;
+	struct radeon_encoder_lvds *lvds = NULL;
+
+	lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+
+	if (lcd_info) {
+		lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+		if (!lvds)
+			return NULL;
+
+		for (i = 0; i < 24; i++)
+			stmp[i] = RBIOS8(lcd_info + i + 1);
+		stmp[24] = 0;
+
+		DRM_INFO("Panel ID String: %s\n", stmp);
+
+		lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19);
+		lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b);
+
+		DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+			 lvds->native_mode.vdisplay);
+
+		lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
+		lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
+
+		lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
+		lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+		lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
+
+		lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
+		lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
+		lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
+		if ((lvds->panel_ref_divider != 0) &&
+		    (lvds->panel_fb_divider > 3))
+			lvds->use_bios_dividers = true;
+
+		panel_setup = RBIOS32(lcd_info + 0x39);
+		lvds->lvds_gen_cntl = 0xff00;
+		if (panel_setup & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
+
+		if ((panel_setup >> 4) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
+
+		switch ((panel_setup >> 8) & 0x7) {
+		case 0:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
+			break;
+		case 1:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
+			break;
+		case 2:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
+			break;
+		default:
+			break;
+		}
+
+		if ((panel_setup >> 16) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
+
+		if ((panel_setup >> 17) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
+
+		if ((panel_setup >> 18) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
+
+		if ((panel_setup >> 23) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
+
+		lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
+
+		for (i = 0; i < 32; i++) {
+			tmp = RBIOS16(lcd_info + 64 + i * 2);
+			if (tmp == 0)
+				break;
+
+			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+				u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+				if (hss > lvds->native_mode.hdisplay)
+					hss = (10 - 1) * 8;
+
+				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+					hss;
+				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+					(RBIOS8(tmp + 23) * 8);
+
+				lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+					(RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+					((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+					((RBIOS16(tmp + 28) & 0xf800) >> 11);
+
+				lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
+				lvds->native_mode.flags = 0;
+				/* set crtc values */
+				drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+			}
+		}
+	} else {
+		DRM_INFO("No panel info found in BIOS\n");
+		lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
+	}
+
+	if (lvds)
+		encoder->native_mode = lvds->native_mode;
+	return lvds;
+}
+
+static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_R100  */
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RV100 */
+	{{0, 0}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RS100 */
+	{{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RV200 */
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RS200 */
+	{{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_R200  */
+	{{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}},	/* CHIP_RV250 */
+	{{0, 0}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RS300 */
+	{{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}},	/* CHIP_RV280 */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R300  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R350  */
+	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RV350 */
+	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RV380 */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R420  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R423  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RV410 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS400 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS480 */
+};
+
+bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+					    struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		tmds->tmds_pll[i].value =
+			default_tmds_pll[rdev->family][i].value;
+		tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
+	}
+
+	return true;
+}
+
+bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+					      struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t tmds_info;
+	int i, n;
+	uint8_t ver;
+
+	tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+
+	if (tmds_info) {
+		ver = RBIOS8(tmds_info);
+		DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
+		if (ver == 3) {
+			n = RBIOS8(tmds_info + 5) + 1;
+			if (n > 4)
+				n = 4;
+			for (i = 0; i < n; i++) {
+				tmds->tmds_pll[i].value =
+				    RBIOS32(tmds_info + i * 10 + 0x08);
+				tmds->tmds_pll[i].freq =
+				    RBIOS16(tmds_info + i * 10 + 0x10);
+				DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+					  tmds->tmds_pll[i].freq,
+					  tmds->tmds_pll[i].value);
+			}
+		} else if (ver == 4) {
+			int stride = 0;
+			n = RBIOS8(tmds_info + 5) + 1;
+			if (n > 4)
+				n = 4;
+			for (i = 0; i < n; i++) {
+				tmds->tmds_pll[i].value =
+				    RBIOS32(tmds_info + stride + 0x08);
+				tmds->tmds_pll[i].freq =
+				    RBIOS16(tmds_info + stride + 0x10);
+				if (i == 0)
+					stride += 10;
+				else
+					stride += 6;
+				DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+					  tmds->tmds_pll[i].freq,
+					  tmds->tmds_pll[i].value);
+			}
+		}
+	} else {
+		DRM_INFO("No TMDS info found in BIOS\n");
+		return false;
+	}
+	return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						struct radeon_encoder_ext_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* default for macs */
+	i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+	tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+
+	/* XXX some macs have duallink chips */
+	switch (rdev->mode_info.connector_table) {
+	case CT_POWERBOOK_EXTERNAL:
+	case CT_MINI_EXTERNAL:
+	default:
+		tmds->dvo_chip = DVO_SIL164;
+		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+		break;
+	}
+
+	return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+						  struct radeon_encoder_ext_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t offset;
+	uint8_t ver;
+	enum radeon_combios_ddc gpio;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	tmds->i2c_bus = NULL;
+	if (rdev->flags & RADEON_IS_IGP) {
+		i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		tmds->dvo_chip = DVO_SIL164;
+		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			ver = RBIOS8(offset);
+			DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
+			tmds->slave_addr = RBIOS8(offset + 4 + 2);
+			tmds->slave_addr >>= 1; /* 7 bit addressing */
+			gpio = RBIOS8(offset + 4 + 3);
+			if (gpio == DDC_LCD) {
+				/* MM i2c */
+				i2c_bus.valid = true;
+				i2c_bus.hw_capable = true;
+				i2c_bus.mm_i2c = true;
+				i2c_bus.i2c_id = 0xa0;
+			} else
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+			tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		}
+	}
+
+	if (!tmds->i2c_bus) {
+		DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+		return false;
+	}
+
+	return true;
+}
+
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
+
+	rdev->mode_info.connector_table = radeon_connector_table;
+	if (rdev->mode_info.connector_table == CT_NONE) {
+#ifdef CONFIG_PPC_PMAC
+		if (of_machine_is_compatible("PowerBook3,3")) {
+			/* powerbook with VGA */
+			rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
+		} else if (of_machine_is_compatible("PowerBook3,4") ||
+			   of_machine_is_compatible("PowerBook3,5")) {
+			/* powerbook with internal tmds */
+			rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,1") ||
+			   of_machine_is_compatible("PowerBook5,2") ||
+			   of_machine_is_compatible("PowerBook5,3") ||
+			   of_machine_is_compatible("PowerBook5,4") ||
+			   of_machine_is_compatible("PowerBook5,5")) {
+			/* powerbook with external single link tmds (sil164) */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,6")) {
+			/* powerbook with external dual or single link tmds */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,7") ||
+			   of_machine_is_compatible("PowerBook5,8") ||
+			   of_machine_is_compatible("PowerBook5,9")) {
+			/* PowerBook6,2 ? */
+			/* powerbook with external dual link tmds (sil1178?) */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook4,1") ||
+			   of_machine_is_compatible("PowerBook4,2") ||
+			   of_machine_is_compatible("PowerBook4,3") ||
+			   of_machine_is_compatible("PowerBook6,3") ||
+			   of_machine_is_compatible("PowerBook6,5") ||
+			   of_machine_is_compatible("PowerBook6,7")) {
+			/* ibook */
+			rdev->mode_info.connector_table = CT_IBOOK;
+		} else if (of_machine_is_compatible("PowerMac3,5")) {
+			/* PowerMac G4 Silver radeon 7500 */
+			rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
+		} else if (of_machine_is_compatible("PowerMac4,4")) {
+			/* emac */
+			rdev->mode_info.connector_table = CT_EMAC;
+		} else if (of_machine_is_compatible("PowerMac10,1")) {
+			/* mini with internal tmds */
+			rdev->mode_info.connector_table = CT_MINI_INTERNAL;
+		} else if (of_machine_is_compatible("PowerMac10,2")) {
+			/* mini with external tmds */
+			rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerMac12,1")) {
+			/* PowerMac8,1 ? */
+			/* imac g5 isight */
+			rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+		} else if ((rdev->pdev->device == 0x4a48) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4a48)) {
+			/* Mac X800 */
+			rdev->mode_info.connector_table = CT_MAC_X800;
+		} else if ((of_machine_is_compatible("PowerMac7,2") ||
+			    of_machine_is_compatible("PowerMac7,3")) &&
+			   (rdev->pdev->device == 0x4150) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4150)) {
+			/* Mac G5 tower 9600 */
+			rdev->mode_info.connector_table = CT_MAC_G5_9600;
+		} else if ((rdev->pdev->device == 0x4c66) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4c66)) {
+			/* SAM440ep RV250 embedded board */
+			rdev->mode_info.connector_table = CT_SAM440EP;
+		} else
+#endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC64
+		if (ASIC_IS_RN50(rdev))
+			rdev->mode_info.connector_table = CT_RN50_POWER;
+		else
+#endif
+			rdev->mode_info.connector_table = CT_GENERIC;
+	}
+
+	switch (rdev->mode_info.connector_table) {
+	case CT_GENERIC:
+		DRM_INFO("Connector Table: %d (generic)\n",
+			 rdev->mode_info.connector_table);
+		/* these are the most common settings */
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		} else if (rdev->flags & RADEON_IS_MOBILITY) {
+			/* LVDS */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_LCD1_SUPPORT,
+									0),
+						  ATOM_DEVICE_LCD1_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_LCD1_SUPPORT,
+						    DRM_MODE_CONNECTOR_LVDS,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
+
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 1,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		} else {
+			/* DVI-I - tv dac, int tmds */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+			hpd.hpd = RADEON_HPD_1;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_DFP1_SUPPORT,
+									0),
+						  ATOM_DEVICE_DFP1_SUPPORT);
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT2_SUPPORT,
+									2),
+						  ATOM_DEVICE_CRT2_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_DFP1_SUPPORT |
+						    ATOM_DEVICE_CRT2_SUPPORT,
+						    DRM_MODE_CONNECTOR_DVII,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
+
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 1,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		}
+
+		if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+			/* TV - tv dac */
+			ddc_i2c.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_TV1_SUPPORT,
+									2),
+						  ATOM_DEVICE_TV1_SUPPORT);
+			radeon_add_legacy_connector(dev, 2,
+						    ATOM_DEVICE_TV1_SUPPORT,
+						    DRM_MODE_CONNECTOR_SVIDEO,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SVIDEO,
+						    &hpd);
+		}
+		break;
+	case CT_IBOOK:
+		DRM_INFO("Connector Table: %d (ibook)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* VGA - TV DAC */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_EXTERNAL:
+		DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - primary dac, ext tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP2_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		/* XXX some are SL */
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_INTERNAL:
+		DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - primary dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_VGA:
+		DRM_INFO("Connector Table: %d (powerbook vga)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MINI_EXTERNAL:
+		DRM_INFO("Connector Table: %d (mini external tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, ext tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP2_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		/* XXX are any DL? */
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MINI_INTERNAL:
+		DRM_INFO("Connector Table: %d (mini internal tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_IMAC_G5_ISIGHT:
+		DRM_INFO("Connector Table: %d (imac g5 isight)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-D - int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+					    &hpd);
+		/* VGA - tv dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_EMAC:
+		DRM_INFO("Connector Table: %d (emac)\n",
+			 rdev->mode_info.connector_table);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* VGA - tv dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_RN50_POWER:
+		DRM_INFO("Connector Table: %d (rn50-power)\n",
+			 rdev->mode_info.connector_table);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		break;
+	case CT_MAC_X800:
+		DRM_INFO("Connector Table: %d (mac x800)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
+		break;
+	case CT_MAC_G5_9600:
+		DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* ADC - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_SAM440EP:
+		DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - secondary dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2,
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MAC_G4_SILVER:
+		DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	default:
+		DRM_INFO("Connector table: %d (invalid)\n",
+			 rdev->mode_info.connector_table);
+		return false;
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+				       int bios_index,
+				       enum radeon_combios_connector
+				       *legacy_connector,
+				       struct radeon_i2c_bus_rec *ddc_i2c,
+				       struct radeon_hpd *hpd)
+{
+
+	/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
+	   one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
+	if (dev->pdev->device == 0x515e &&
+	    dev->pdev->subsystem_vendor == 0x1014) {
+		if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
+		    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+			return false;
+	}
+
+	/* X300 card with extra non-existent DVI port */
+	if (dev->pdev->device == 0x5B60 &&
+	    dev->pdev->subsystem_vendor == 0x17af &&
+	    dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+		if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+			return false;
+	}
+
+	return true;
+}
+
+static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
+{
+	/* Acer 5102 has non-existent TV port */
+	if (dev->pdev->device == 0x5975 &&
+	    dev->pdev->subsystem_vendor == 0x1025 &&
+	    dev->pdev->subsystem_device == 0x009f)
+		return false;
+
+	/* HP dc5750 has non-existent TV port */
+	if (dev->pdev->device == 0x5974 &&
+	    dev->pdev->subsystem_vendor == 0x103c &&
+	    dev->pdev->subsystem_device == 0x280a)
+		return false;
+
+	/* MSI S270 has non-existent TV port */
+	if (dev->pdev->device == 0x5955 &&
+	    dev->pdev->subsystem_vendor == 0x1462 &&
+	    dev->pdev->subsystem_device == 0x0131)
+		return false;
+
+	return true;
+}
+
+static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t ext_tmds_info;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (is_dvi_d)
+			return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+		else
+			return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+	}
+	ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+	if (ext_tmds_info) {
+		uint8_t rev = RBIOS8(ext_tmds_info);
+		uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5);
+		if (rev >= 3) {
+			if (is_dvi_d)
+				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+			else
+				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+		} else {
+			if (flags & 1) {
+				if (is_dvi_d)
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+				else
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+			}
+		}
+	}
+	if (is_dvi_d)
+		return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+	else
+		return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+}
+
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t conn_info, entry, devices;
+	uint16_t tmp, connector_object_id;
+	enum radeon_combios_ddc ddc_type;
+	enum radeon_combios_connector connector;
+	int i = 0;
+	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
+
+	conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
+	if (conn_info) {
+		for (i = 0; i < 4; i++) {
+			entry = conn_info + 2 + i * 2;
+
+			if (!RBIOS16(entry))
+				break;
+
+			tmp = RBIOS16(entry);
+
+			connector = (tmp >> 12) & 0xf;
+
+			ddc_type = (tmp >> 8) & 0xf;
+			if (ddc_type == 5)
+				ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+			else
+				ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+
+			switch (connector) {
+			case CONNECTOR_PROPRIETARY_LEGACY:
+			case CONNECTOR_DVI_I_LEGACY:
+			case CONNECTOR_DVI_D_LEGACY:
+				if ((tmp >> 4) & 0x1)
+					hpd.hpd = RADEON_HPD_2;
+				else
+					hpd.hpd = RADEON_HPD_1;
+				break;
+			default:
+				hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+
+			if (!radeon_apply_legacy_quirks(dev, i, &connector,
+							&ddc_i2c, &hpd))
+				continue;
+
+			switch (connector) {
+			case CONNECTOR_PROPRIETARY_LEGACY:
+				if ((tmp >> 4) & 0x1)
+					devices = ATOM_DEVICE_DFP2_SUPPORT;
+				else
+					devices = ATOM_DEVICE_DFP1_SUPPORT;
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev, devices, 0),
+							  devices);
+				radeon_add_legacy_connector(dev, i, devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+							    &hpd);
+				break;
+			case CONNECTOR_CRT_LEGACY:
+				if (tmp & 0x1) {
+					devices = ATOM_DEVICE_CRT2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT2_SUPPORT,
+								   2),
+								  ATOM_DEVICE_CRT2_SUPPORT);
+				} else {
+					devices = ATOM_DEVICE_CRT1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT1_SUPPORT,
+								   1),
+								  ATOM_DEVICE_CRT1_SUPPORT);
+				}
+				radeon_add_legacy_connector(dev,
+							    i,
+							    devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
+				break;
+			case CONNECTOR_DVI_I_LEGACY:
+				devices = 0;
+				if (tmp & 0x1) {
+					devices |= ATOM_DEVICE_CRT2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT2_SUPPORT,
+								   2),
+								  ATOM_DEVICE_CRT2_SUPPORT);
+				} else {
+					devices |= ATOM_DEVICE_CRT1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT1_SUPPORT,
+								   1),
+								  ATOM_DEVICE_CRT1_SUPPORT);
+				}
+				/* RV100 board with external TDMS bit mis-set.
+				 * Actually uses internal TMDS, clear the bit.
+				 */
+				if (dev->pdev->device == 0x5159 &&
+				    dev->pdev->subsystem_vendor == 0x1014 &&
+				    dev->pdev->subsystem_device == 0x029A) {
+					tmp &= ~(1 << 4);
+				}
+				if ((tmp >> 4) & 0x1) {
+					devices |= ATOM_DEVICE_DFP2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_DFP2_SUPPORT,
+								   0),
+								  ATOM_DEVICE_DFP2_SUPPORT);
+					connector_object_id = combios_check_dl_dvi(dev, 0);
+				} else {
+					devices |= ATOM_DEVICE_DFP1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_DFP1_SUPPORT,
+								   0),
+								  ATOM_DEVICE_DFP1_SUPPORT);
+					connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				}
+				radeon_add_legacy_connector(dev,
+							    i,
+							    devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    connector_object_id,
+							    &hpd);
+				break;
+			case CONNECTOR_DVI_D_LEGACY:
+				if ((tmp >> 4) & 0x1) {
+					devices = ATOM_DEVICE_DFP2_SUPPORT;
+					connector_object_id = combios_check_dl_dvi(dev, 1);
+				} else {
+					devices = ATOM_DEVICE_DFP1_SUPPORT;
+					connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				}
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev, devices, 0),
+							  devices);
+				radeon_add_legacy_connector(dev, i, devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    connector_object_id,
+							    &hpd);
+				break;
+			case CONNECTOR_CTV_LEGACY:
+			case CONNECTOR_STV_LEGACY:
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev,
+							   ATOM_DEVICE_TV1_SUPPORT,
+							   2),
+							  ATOM_DEVICE_TV1_SUPPORT);
+				radeon_add_legacy_connector(dev, i,
+							    ATOM_DEVICE_TV1_SUPPORT,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_SVIDEO,
+							    &hpd);
+				break;
+			default:
+				DRM_ERROR("Unknown connector type: %d\n",
+					  connector);
+				continue;
+			}
+
+		}
+	} else {
+		uint16_t tmds_info =
+		    combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+		if (tmds_info) {
+			DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
+
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_DFP1_SUPPORT,
+									0),
+						  ATOM_DEVICE_DFP1_SUPPORT);
+
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+			hpd.hpd = RADEON_HPD_1;
+			radeon_add_legacy_connector(dev,
+						    0,
+						    ATOM_DEVICE_CRT1_SUPPORT |
+						    ATOM_DEVICE_DFP1_SUPPORT,
+						    DRM_MODE_CONNECTOR_DVII,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
+		} else {
+			uint16_t crt_info =
+				combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+			DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
+			if (crt_info) {
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum(dev,
+										ATOM_DEVICE_CRT1_SUPPORT,
+										1),
+							  ATOM_DEVICE_CRT1_SUPPORT);
+				ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+				hpd.hpd = RADEON_HPD_NONE;
+				radeon_add_legacy_connector(dev,
+							    0,
+							    ATOM_DEVICE_CRT1_SUPPORT,
+							    DRM_MODE_CONNECTOR_VGA,
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
+			} else {
+				DRM_DEBUG_KMS("No connector info found\n");
+				return false;
+			}
+		}
+	}
+
+	if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
+		uint16_t lcd_info =
+		    combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+		if (lcd_info) {
+			uint16_t lcd_ddc_info =
+			    combios_get_table_offset(dev,
+						     COMBIOS_LCD_DDC_INFO_TABLE);
+
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_LCD1_SUPPORT,
+									0),
+						  ATOM_DEVICE_LCD1_SUPPORT);
+
+			if (lcd_ddc_info) {
+				ddc_type = RBIOS8(lcd_ddc_info + 2);
+				switch (ddc_type) {
+				case DDC_LCD:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev,
+								      DDC_LCD,
+								      RBIOS32(lcd_ddc_info + 3),
+								      RBIOS32(lcd_ddc_info + 7));
+					radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+					break;
+				case DDC_GPIO:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev,
+								      DDC_GPIO,
+								      RBIOS32(lcd_ddc_info + 3),
+								      RBIOS32(lcd_ddc_info + 7));
+					radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+					break;
+				default:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+					break;
+				}
+				DRM_DEBUG_KMS("LCD DDC Info Table found!\n");
+			} else
+				ddc_i2c.valid = false;
+
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_connector(dev,
+						    5,
+						    ATOM_DEVICE_LCD1_SUPPORT,
+						    DRM_MODE_CONNECTOR_LVDS,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
+		}
+	}
+
+	/* check TV table */
+	if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+		uint32_t tv_info =
+		    combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+		if (tv_info) {
+			if (RBIOS8(tv_info + 6) == 'T') {
+				if (radeon_apply_legacy_tv_quirks(dev)) {
+					hpd.hpd = RADEON_HPD_NONE;
+					ddc_i2c.valid = false;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_TV1_SUPPORT,
+								   2),
+								  ATOM_DEVICE_TV1_SUPPORT);
+					radeon_add_legacy_connector(dev, 6,
+								    ATOM_DEVICE_TV1_SUPPORT,
+								    DRM_MODE_CONNECTOR_SVIDEO,
+								    &ddc_i2c,
+								    CONNECTOR_OBJECT_ID_SVIDEO,
+								    &hpd);
+				}
+			}
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static const char *thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+};
+
+void radeon_combios_get_power_modes(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	u16 offset, misc, misc2 = 0;
+	u8 rev, blocks, tmp;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	rdev->pm.default_power_state_index = -1;
+
+	/* allocate 2 power states */
+	rdev->pm.power_state = kcalloc(2, sizeof(struct radeon_power_state),
+				       GFP_KERNEL);
+	if (rdev->pm.power_state) {
+		/* allocate 1 clock mode per state */
+		rdev->pm.power_state[0].clock_info =
+			kcalloc(1, sizeof(struct radeon_pm_clock_info),
+				GFP_KERNEL);
+		rdev->pm.power_state[1].clock_info =
+			kcalloc(1, sizeof(struct radeon_pm_clock_info),
+				GFP_KERNEL);
+		if (!rdev->pm.power_state[0].clock_info ||
+		    !rdev->pm.power_state[1].clock_info)
+			goto pm_failed;
+	} else
+		goto pm_failed;
+
+	/* check for a thermal chip */
+	offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
+	if (offset) {
+		u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
+
+		rev = RBIOS8(offset);
+
+		if (rev == 0) {
+			thermal_controller = RBIOS8(offset + 3);
+			gpio = RBIOS8(offset + 4) & 0x3f;
+			i2c_addr = RBIOS8(offset + 5);
+		} else if (rev == 1) {
+			thermal_controller = RBIOS8(offset + 4);
+			gpio = RBIOS8(offset + 5) & 0x3f;
+			i2c_addr = RBIOS8(offset + 6);
+		} else if (rev == 2) {
+			thermal_controller = RBIOS8(offset + 4);
+			gpio = RBIOS8(offset + 5) & 0x3f;
+			i2c_addr = RBIOS8(offset + 6);
+			clk_bit = RBIOS8(offset + 0xa);
+			data_bit = RBIOS8(offset + 0xb);
+		}
+		if ((thermal_controller > 0) && (thermal_controller < 3)) {
+			DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+				 thermal_controller_names[thermal_controller],
+				 i2c_addr >> 1);
+			if (gpio == DDC_LCD) {
+				/* MM i2c */
+				i2c_bus.valid = true;
+				i2c_bus.hw_capable = true;
+				i2c_bus.mm_i2c = true;
+				i2c_bus.i2c_id = 0xa0;
+			} else if (gpio == DDC_GPIO)
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit);
+			else
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = thermal_controller_names[thermal_controller];
+				info.addr = i2c_addr >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+			}
+		}
+	} else {
+		/* boards with a thermal chip, but no overdrive table */
+
+		/* Asus 9600xt has an f75375 on the monid bus */
+		if ((dev->pdev->device == 0x4152) &&
+		    (dev->pdev->subsystem_vendor == 0x1043) &&
+		    (dev->pdev->subsystem_device == 0xc002)) {
+			i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = "f75375";
+				info.addr = 0x28;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+				DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+					 name, info.addr);
+			}
+		}
+	}
+
+	if (rdev->flags & RADEON_IS_MOBILITY) {
+		offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
+		if (offset) {
+			rev = RBIOS8(offset);
+			blocks = RBIOS8(offset + 0x2);
+			/* power mode 0 tends to be the only valid one */
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
+			rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				goto default_mode;
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_BATTERY;
+			misc = RBIOS16(offset + 0x5 + 0x0);
+			if (rev > 4)
+				misc2 = RBIOS16(offset + 0x5 + 0xe);
+			rdev->pm.power_state[state_index].misc = misc;
+			rdev->pm.power_state[state_index].misc2 = misc2;
+			if (misc & 0x4) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
+				if (misc & 0x8)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
+				if (rev < 6) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+						RBIOS16(offset + 0x5 + 0xb) * 4;
+					tmp = RBIOS8(offset + 0x5 + 0xd);
+					rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+				} else {
+					u8 entries = RBIOS8(offset + 0x5 + 0xb);
+					u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
+					if (entries && voltage_table_offset) {
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+							RBIOS16(voltage_table_offset) * 4;
+						tmp = RBIOS8(voltage_table_offset + 0x2);
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+					} else
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
+				}
+				switch ((misc2 & 0x700) >> 8) {
+				case 0:
+				default:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
+					break;
+				case 1:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
+					break;
+				case 2:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
+					break;
+				case 3:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
+					break;
+				case 4:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
+					break;
+				}
+			} else
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+			if (rev > 6)
+				rdev->pm.power_state[state_index].pcie_lanes =
+					RBIOS8(offset + 0x5 + 0x10);
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			state_index++;
+		} else {
+			/* XXX figure out some good default low power mode for mobility cards w/out power tables */
+		}
+	} else {
+		/* XXX figure out some good default low power mode for desktop cards */
+	}
+
+default_mode:
+	/* add the default mode */
+	rdev->pm.power_state[state_index].type =
+		POWER_STATE_TYPE_DEFAULT;
+	rdev->pm.power_state[state_index].num_clock_modes = 1;
+	rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+	rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+	rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
+	if ((state_index > 0) &&
+	    (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
+		rdev->pm.power_state[state_index].clock_info[0].voltage =
+			rdev->pm.power_state[0].clock_info[0].voltage;
+	else
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+	rdev->pm.power_state[state_index].pcie_lanes = 16;
+	rdev->pm.power_state[state_index].flags = 0;
+	rdev->pm.default_power_state_index = state_index;
+	rdev->pm.num_power_states = state_index + 1;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	return;
+
+pm_failed:
+	rdev->pm.default_power_state_index = state_index;
+	rdev->pm.num_power_states = 0;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+}
+
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (!tmds)
+		return;
+
+	switch (tmds->dvo_chip) {
+	case DVO_SIL164:
+		/* sil 164 */
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x08, 0x30);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x09, 0x00);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x0a, 0x90);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x0c, 0x89);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x08, 0x3b);
+		break;
+	case DVO_SIL1178:
+		/* sil 1178 - untested */
+		/*
+		 * 0x0f, 0x44
+		 * 0x0f, 0x4c
+		 * 0x0e, 0x01
+		 * 0x0a, 0x80
+		 * 0x09, 0x30
+		 * 0x0c, 0xc9
+		 * 0x0d, 0x70
+		 * 0x08, 0x32
+		 * 0x08, 0x33
+		 */
+		break;
+	default:
+		break;
+	}
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint16_t offset;
+	uint8_t blocks, slave_addr, rev;
+	uint32_t index, id;
+	uint32_t reg, val, and_mask, or_mask;
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (!tmds)
+		return false;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+		rev = RBIOS8(offset);
+		if (offset) {
+			rev = RBIOS8(offset);
+			if (rev > 1) {
+				blocks = RBIOS8(offset + 3);
+				index = offset + 4;
+				while (blocks > 0) {
+					id = RBIOS16(index);
+					index += 2;
+					switch (id >> 13) {
+					case 0:
+						reg = (id & 0x1fff) * 4;
+						val = RBIOS32(index);
+						index += 4;
+						WREG32(reg, val);
+						break;
+					case 2:
+						reg = (id & 0x1fff) * 4;
+						and_mask = RBIOS32(index);
+						index += 4;
+						or_mask = RBIOS32(index);
+						index += 4;
+						val = RREG32(reg);
+						val = (val & and_mask) | or_mask;
+						WREG32(reg, val);
+						break;
+					case 3:
+						val = RBIOS16(index);
+						index += 2;
+						udelay(val);
+						break;
+					case 4:
+						val = RBIOS16(index);
+						index += 2;
+						mdelay(val);
+						break;
+					case 6:
+						slave_addr = id & 0xff;
+						slave_addr >>= 1; /* 7 bit addressing */
+						index++;
+						reg = RBIOS8(index);
+						index++;
+						val = RBIOS8(index);
+						index++;
+						radeon_i2c_put_byte(tmds->i2c_bus,
+								    slave_addr,
+								    reg, val);
+						break;
+					default:
+						DRM_ERROR("Unknown id %d\n", id >> 13);
+						break;
+					}
+					blocks--;
+				}
+				return true;
+			}
+		}
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			index = offset + 10;
+			id = RBIOS16(index);
+			while (id != 0xffff) {
+				index += 2;
+				switch (id >> 13) {
+				case 0:
+					reg = (id & 0x1fff) * 4;
+					val = RBIOS32(index);
+					WREG32(reg, val);
+					break;
+				case 2:
+					reg = (id & 0x1fff) * 4;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32(reg, val);
+					break;
+				case 4:
+					val = RBIOS16(index);
+					index += 2;
+					udelay(val);
+					break;
+				case 5:
+					reg = id & 0x1fff;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32_PLL(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32_PLL(reg, val);
+					break;
+				case 6:
+					reg = id & 0x1fff;
+					val = RBIOS8(index);
+					index += 1;
+					radeon_i2c_put_byte(tmds->i2c_bus,
+							    tmds->slave_addr,
+							    reg, val);
+					break;
+				default:
+					DRM_ERROR("Unknown id %d\n", id >> 13);
+					break;
+				}
+				id = RBIOS16(index);
+			}
+			return true;
+		}
+	}
+	return false;
+}
+
+static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (offset) {
+		while (RBIOS16(offset)) {
+			uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
+			uint32_t addr = (RBIOS16(offset) & 0x1fff);
+			uint32_t val, and_mask, or_mask;
+			uint32_t tmp;
+
+			offset += 2;
+			switch (cmd) {
+			case 0:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32(addr, val);
+				break;
+			case 1:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32(addr, val);
+				break;
+			case 2:
+				and_mask = RBIOS32(offset);
+				offset += 4;
+				or_mask = RBIOS32(offset);
+				offset += 4;
+				tmp = RREG32(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32(addr, tmp);
+				break;
+			case 3:
+				and_mask = RBIOS32(offset);
+				offset += 4;
+				or_mask = RBIOS32(offset);
+				offset += 4;
+				tmp = RREG32(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32(addr, tmp);
+				break;
+			case 4:
+				val = RBIOS16(offset);
+				offset += 2;
+				udelay(val);
+				break;
+			case 5:
+				val = RBIOS16(offset);
+				offset += 2;
+				switch (addr) {
+				case 8:
+					while (val--) {
+						if (!
+						    (RREG32_PLL
+						     (RADEON_CLK_PWRMGT_CNTL) &
+						     RADEON_MC_BUSY))
+							break;
+					}
+					break;
+				case 9:
+					while (val--) {
+						if ((RREG32(RADEON_MC_STATUS) &
+						     RADEON_MC_IDLE))
+							break;
+					}
+					break;
+				default:
+					break;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
+static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (offset) {
+		while (RBIOS8(offset)) {
+			uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
+			uint8_t addr = (RBIOS8(offset) & 0x3f);
+			uint32_t val, shift, tmp;
+			uint32_t and_mask, or_mask;
+
+			offset++;
+			switch (cmd) {
+			case 0:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32_PLL(addr, val);
+				break;
+			case 1:
+				shift = RBIOS8(offset) * 8;
+				offset++;
+				and_mask = RBIOS8(offset) << shift;
+				and_mask |= ~(0xff << shift);
+				offset++;
+				or_mask = RBIOS8(offset) << shift;
+				offset++;
+				tmp = RREG32_PLL(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32_PLL(addr, tmp);
+				break;
+			case 2:
+			case 3:
+				tmp = 1000;
+				switch (addr) {
+				case 1:
+					udelay(150);
+					break;
+				case 2:
+					mdelay(1);
+					break;
+				case 3:
+					while (tmp--) {
+						if (!
+						    (RREG32_PLL
+						     (RADEON_CLK_PWRMGT_CNTL) &
+						     RADEON_MC_BUSY))
+							break;
+					}
+					break;
+				case 4:
+					while (tmp--) {
+						if (RREG32_PLL
+						    (RADEON_CLK_PWRMGT_CNTL) &
+						    RADEON_DLL_READY)
+							break;
+					}
+					break;
+				case 5:
+					tmp =
+					    RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+					if (tmp & RADEON_CG_NO1_DEBUG_0) {
+#if 0
+						uint32_t mclk_cntl =
+						    RREG32_PLL
+						    (RADEON_MCLK_CNTL);
+						mclk_cntl &= 0xffff0000;
+						/*mclk_cntl |= 0x00001111;*//* ??? */
+						WREG32_PLL(RADEON_MCLK_CNTL,
+							   mclk_cntl);
+						mdelay(10);
+#endif
+						WREG32_PLL
+						    (RADEON_CLK_PWRMGT_CNTL,
+						     tmp &
+						     ~RADEON_CG_NO1_DEBUG_0);
+						mdelay(10);
+					}
+					break;
+				default:
+					break;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
+static void combios_parse_ram_reset_table(struct drm_device *dev,
+					  uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	if (offset) {
+		uint8_t val = RBIOS8(offset);
+		while (val != 0xff) {
+			offset++;
+
+			if (val == 0x0f) {
+				uint32_t channel_complete_mask;
+
+				if (ASIC_IS_R300(rdev))
+					channel_complete_mask =
+					    R300_MEM_PWRUP_COMPLETE;
+				else
+					channel_complete_mask =
+					    RADEON_MEM_PWRUP_COMPLETE;
+				tmp = 20000;
+				while (tmp--) {
+					if ((RREG32(RADEON_MEM_STR_CNTL) &
+					     channel_complete_mask) ==
+					    channel_complete_mask)
+						break;
+				}
+			} else {
+				uint32_t or_mask = RBIOS16(offset);
+				offset += 2;
+
+				tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+				tmp &= RADEON_SDRAM_MODE_MASK;
+				tmp |= or_mask;
+				WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+
+				or_mask = val << 24;
+				tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+				tmp &= RADEON_B3MEM_RESET_MASK;
+				tmp |= or_mask;
+				WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+			}
+			val = RBIOS8(offset);
+		}
+	}
+}
+
+static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
+				   int mem_addr_mapping)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t mem_cntl;
+	uint32_t mem_size;
+	uint32_t addr = 0;
+
+	mem_cntl = RREG32(RADEON_MEM_CNTL);
+	if (mem_cntl & RV100_HALF_MODE)
+		ram /= 2;
+	mem_size = ram;
+	mem_cntl &= ~(0xff << 8);
+	mem_cntl |= (mem_addr_mapping & 0xff) << 8;
+	WREG32(RADEON_MEM_CNTL, mem_cntl);
+	RREG32(RADEON_MEM_CNTL);
+
+	/* sdram reset ? */
+
+	/* something like this????  */
+	while (ram--) {
+		addr = ram * 1024 * 1024;
+		/* write to each page */
+		WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
+		/* read back and verify */
+		if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
+			return 0;
+	}
+
+	return mem_size;
+}
+
+static void combios_write_ram_size(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint8_t rev;
+	uint16_t offset;
+	uint32_t mem_size = 0;
+	uint32_t mem_cntl = 0;
+
+	/* should do something smarter here I guess... */
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	/* first check detected mem table */
+	offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
+	if (offset) {
+		rev = RBIOS8(offset);
+		if (rev < 3) {
+			mem_cntl = RBIOS32(offset + 1);
+			mem_size = RBIOS16(offset + 5);
+			if ((rdev->family < CHIP_R200) &&
+			    !ASIC_IS_RN50(rdev))
+				WREG32(RADEON_MEM_CNTL, mem_cntl);
+		}
+	}
+
+	if (!mem_size) {
+		offset =
+		    combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+		if (offset) {
+			rev = RBIOS8(offset - 1);
+			if (rev < 1) {
+				if ((rdev->family < CHIP_R200)
+				    && !ASIC_IS_RN50(rdev)) {
+					int ram = 0;
+					int mem_addr_mapping = 0;
+
+					while (RBIOS8(offset)) {
+						ram = RBIOS8(offset);
+						mem_addr_mapping =
+						    RBIOS8(offset + 1);
+						if (mem_addr_mapping != 0x25)
+							ram *= 2;
+						mem_size =
+						    combios_detect_ram(dev, ram,
+								       mem_addr_mapping);
+						if (mem_size)
+							break;
+						offset += 2;
+					}
+				} else
+					mem_size = RBIOS8(offset);
+			} else {
+				mem_size = RBIOS8(offset);
+				mem_size *= 2;	/* convert to MB */
+			}
+		}
+	}
+
+	mem_size *= (1024 * 1024);	/* convert to bytes */
+	WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
+}
+
+void radeon_combios_asic_init(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t table;
+
+	/* port hardcoded mac stuff from radeonfb */
+	if (rdev->bios == NULL)
+		return;
+
+	/* ASIC INIT 1 */
+	table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
+	if (table)
+		combios_parse_mmio_table(dev, table);
+
+	/* PLL INIT */
+	table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
+	if (table)
+		combios_parse_pll_table(dev, table);
+
+	/* ASIC INIT 2 */
+	table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
+	if (table)
+		combios_parse_mmio_table(dev, table);
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		/* ASIC INIT 4 */
+		table =
+		    combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
+		if (table)
+			combios_parse_mmio_table(dev, table);
+
+		/* RAM RESET */
+		table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
+		if (table)
+			combios_parse_ram_reset_table(dev, table);
+
+		/* ASIC INIT 3 */
+		table =
+		    combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
+		if (table)
+			combios_parse_mmio_table(dev, table);
+
+		/* write CONFIG_MEMSIZE */
+		combios_write_ram_size(dev);
+	}
+
+	/* quirk for rs4xx HP nx6125 laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x308b)
+		return;
+
+	/* quirk for rs4xx HP dv5000 laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x30a4)
+		return;
+
+	/* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x30ae)
+		return;
+
+	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x280a)
+		return;
+	/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS400 &&
+	    rdev->pdev->subsystem_vendor == 0x1179 &&
+	    rdev->pdev->subsystem_device == 0xff31)
+	        return;
+
+	/* DYN CLK 1 */
+	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+	if (table)
+		combios_parse_pll_table(dev, table);
+
+}
+
+void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
+
+	bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+	bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
+
+	/* let the bios control the backlight */
+	bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
+
+	/* tell the bios not to handle mode switching */
+	bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
+			   RADEON_ACC_MODE_CHANGE);
+
+	/* tell the bios a driver is loaded */
+	bios_7_scratch |= RADEON_DRV_LOADED;
+
+	WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
+}
+
+void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_6_scratch;
+
+	bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (lock)
+		bios_6_scratch |= RADEON_DRIVER_CRITICAL;
+	else
+		bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
+
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+				      struct drm_encoder *encoder,
+				      bool connected)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector =
+	    to_radeon_connector(connector);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
+	uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+	if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("TV1 connected\n");
+			/* fix me */
+			bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
+			/*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
+			bios_5_scratch |= RADEON_TV1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_TV1;
+		} else {
+			DRM_DEBUG_KMS("TV1 disconnected\n");
+			bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_TV1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("LCD1 connected\n");
+			bios_4_scratch |= RADEON_LCD1_ATTACHED;
+			bios_5_scratch |= RADEON_LCD1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_LCD1;
+		} else {
+			DRM_DEBUG_KMS("LCD1 disconnected\n");
+			bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
+			bios_5_scratch &= ~RADEON_LCD1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT1 connected\n");
+			bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
+			bios_5_scratch |= RADEON_CRT1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_CRT1;
+		} else {
+			DRM_DEBUG_KMS("CRT1 disconnected\n");
+			bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_CRT1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT2 connected\n");
+			bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
+			bios_5_scratch |= RADEON_CRT2_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_CRT2;
+		} else {
+			DRM_DEBUG_KMS("CRT2 disconnected\n");
+			bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_CRT2_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP1 connected\n");
+			bios_4_scratch |= RADEON_DFP1_ATTACHED;
+			bios_5_scratch |= RADEON_DFP1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_DFP1;
+		} else {
+			DRM_DEBUG_KMS("DFP1 disconnected\n");
+			bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
+			bios_5_scratch &= ~RADEON_DFP1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP2 connected\n");
+			bios_4_scratch |= RADEON_DFP2_ATTACHED;
+			bios_5_scratch |= RADEON_DFP2_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_DFP2;
+		} else {
+			DRM_DEBUG_KMS("DFP2 disconnected\n");
+			bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
+			bios_5_scratch &= ~RADEON_DFP2_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
+		}
+	}
+	WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
+	WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
+	}
+	WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_TV_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_TV_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_CRT_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_LCD_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_DFP_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
+	}
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
new file mode 100644
index 0000000..414642e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -0,0 +1,2510 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_audio.h"
+#include "atom.h"
+
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+
+static int radeon_dp_handle_hpd(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	ret = radeon_dp_mst_check_status(radeon_connector);
+	if (ret == -EINVAL)
+		return 1;
+	return 0;
+}
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		if (radeon_connector->is_mst_connector)
+			return;
+		if (dig_connector->is_mst) {
+			radeon_dp_handle_hpd(connector);
+			return;
+		}
+	}
+	/* bail if the connector does not have hpd pin, e.g.,
+	 * VGA, TV, etc.
+	 */
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+		return;
+
+	radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+	/* if the connector is already off, don't turn it back on */
+	/* FIXME: This access isn't protected by any locks. */
+	if (connector->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	/* just deal with DP (not eDP) here. */
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		/* if existing sink type was not DP no need to retrain */
+		if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+			return;
+
+		/* first get sink type as it may be reset after (un)plug */
+		dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+		/* don't do anything if sink is not display port, i.e.,
+		 * passive dp->(dvi|hdmi) adaptor
+		 */
+		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+		    radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+		    radeon_dp_needs_link_train(radeon_connector)) {
+			/* Don't start link training before we have the DPCD */
+			if (!radeon_dp_getdpcd(radeon_connector))
+				return;
+
+			/* Turn the connector off and back on immediately, which
+			 * will trigger link training
+			 */
+			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+		}
+	}
+}
+
+static void radeon_property_change_mode(struct drm_encoder *encoder)
+{
+	struct drm_crtc *crtc = encoder->crtc;
+
+	if (crtc && crtc->enabled) {
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y, crtc->primary->fb);
+	}
+}
+
+int radeon_get_monitor_bpc(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	int bpc = 8;
+	int mode_clock, max_tmds_clock;
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (radeon_connector->use_digital) {
+			if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+				if (connector->display_info.bpc)
+					bpc = connector->display_info.bpc;
+			}
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+		if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+		    drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+	case DRM_MODE_CONNECTOR_LVDS:
+		if (connector->display_info.bpc)
+			bpc = connector->display_info.bpc;
+		else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			const struct drm_connector_helper_funcs *connector_funcs =
+				connector->helper_private;
+			struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+			if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+				bpc = 6;
+			else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+				bpc = 8;
+		}
+		break;
+	}
+
+	if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+		/* hdmi deep color only implemented on DCE4+ */
+		if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
+			DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
+					  connector->name, bpc);
+			bpc = 8;
+		}
+
+		/*
+		 * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
+		 * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
+		 * 12 bpc is always supported on hdmi deep color sinks, as this is
+		 * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
+		 */
+		if (bpc > 12) {
+			DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
+					  connector->name, bpc);
+			bpc = 12;
+		}
+
+		/* Any defined maximum tmds clock limit we must not exceed? */
+		if (connector->display_info.max_tmds_clock > 0) {
+			/* mode_clock is clock in kHz for mode to be modeset on this connector */
+			mode_clock = radeon_connector->pixelclock_for_modeset;
+
+			/* Maximum allowable input clock in kHz */
+			max_tmds_clock = connector->display_info.max_tmds_clock;
+
+			DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
+					  connector->name, mode_clock, max_tmds_clock);
+
+			/* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
+			if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
+				if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
+					(mode_clock * 5/4 <= max_tmds_clock))
+					bpc = 10;
+				else
+					bpc = 8;
+
+				DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
+						  connector->name, bpc);
+			}
+
+			if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
+				bpc = 8;
+				DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
+						  connector->name, bpc);
+			}
+		}
+		else if (bpc > 8) {
+			/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+			DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+					  connector->name);
+			bpc = 8;
+		}
+	}
+
+	if ((radeon_deep_color == 0) && (bpc > 8)) {
+		DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
+				  connector->name);
+		bpc = 8;
+	}
+
+	DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
+			  connector->name, connector->display_info.bpc, bpc);
+
+	return bpc;
+}
+
+static void
+radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *best_encoder;
+	struct drm_encoder *encoder;
+	const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+	bool connected;
+	int i;
+
+	best_encoder = connector_funcs->best_encoder(connector);
+
+	drm_connector_for_each_possible_encoder(connector, encoder, i) {
+		if ((encoder == best_encoder) && (status == connector_status_connected))
+			connected = true;
+		else
+			connected = false;
+
+		if (rdev->is_atom_bios)
+			radeon_atombios_connected_scratch_regs(connector, encoder, connected);
+		else
+			radeon_combios_connected_scratch_regs(connector, encoder, connected);
+	}
+}
+
+static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
+{
+	struct drm_encoder *encoder;
+	int i;
+
+	drm_connector_for_each_possible_encoder(connector, encoder, i) {
+		if (encoder->encoder_type == encoder_type)
+			return encoder;
+	}
+
+	return NULL;
+}
+
+struct edid *radeon_connector_edid(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
+
+	if (radeon_connector->edid) {
+		return radeon_connector->edid;
+	} else if (edid_blob) {
+		struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
+		if (edid)
+			radeon_connector->edid = edid;
+	}
+	return radeon_connector->edid;
+}
+
+static void radeon_connector_get_edid(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->edid)
+		return;
+
+	/* on hw with routers, select right port */
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
+
+	if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+	     ENCODER_OBJECT_ID_NONE) &&
+	    radeon_connector->ddc_bus->has_aux) {
+		radeon_connector->edid = drm_get_edid(connector,
+						      &radeon_connector->ddc_bus->aux.ddc);
+	} else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+		   (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+		     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
+		    radeon_connector->ddc_bus->has_aux)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      &radeon_connector->ddc_bus->aux.ddc);
+		else if (radeon_connector->ddc_bus)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      &radeon_connector->ddc_bus->adapter);
+	} else if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC &&
+		   connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+		   radeon_connector->ddc_bus) {
+		radeon_connector->edid = drm_get_edid_switcheroo(&radeon_connector->base,
+								 &radeon_connector->ddc_bus->adapter);
+	} else if (radeon_connector->ddc_bus) {
+		radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+						      &radeon_connector->ddc_bus->adapter);
+	}
+
+	if (!radeon_connector->edid) {
+		/* don't fetch the edid from the vbios if ddc fails and runpm is
+		 * enabled so we report disconnected.
+		 */
+		if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+			return;
+
+		if (rdev->is_atom_bios) {
+			/* some laptops provide a hardcoded edid in rom for LCDs */
+			if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+			     (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+				radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+		} else {
+			/* some servers provide a hardcoded edid in rom for KVMs */
+			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+		}
+	}
+}
+
+static void radeon_connector_free_edid(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->edid) {
+		kfree(radeon_connector->edid);
+		radeon_connector->edid = NULL;
+	}
+}
+
+static int radeon_ddc_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	if (radeon_connector->edid) {
+		drm_connector_update_edid_property(connector, radeon_connector->edid);
+		ret = drm_add_edid_modes(connector, radeon_connector->edid);
+		return ret;
+	}
+	drm_connector_update_edid_property(connector, NULL);
+	return 0;
+}
+
+static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	int i;
+
+	/* pick the first one */
+	drm_connector_for_each_possible_encoder(connector, encoder, i)
+		return encoder;
+
+	return NULL;
+}
+
+static void radeon_get_native_mode(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	struct radeon_encoder *radeon_encoder;
+
+	if (encoder == NULL)
+		return;
+
+	radeon_encoder = to_radeon_encoder(encoder);
+
+	if (!list_empty(&connector->probed_modes)) {
+		struct drm_display_mode *preferred_mode =
+			list_first_entry(&connector->probed_modes,
+					 struct drm_display_mode, head);
+
+		radeon_encoder->native_mode = *preferred_mode;
+	} else {
+		radeon_encoder->native_mode.clock = 0;
+	}
+}
+
+/*
+ * radeon_connector_analog_encoder_conflict_solve
+ * - search for other connectors sharing this encoder
+ *   if priority is true, then set them disconnected if this is connected
+ *   if priority is false, set us disconnected if they are connected
+ */
+static enum drm_connector_status
+radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
+					       struct drm_encoder *encoder,
+					       enum drm_connector_status current_status,
+					       bool priority)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_connector *conflict;
+	struct radeon_connector *radeon_conflict;
+
+	list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+		struct drm_encoder *enc;
+		int i;
+
+		if (conflict == connector)
+			continue;
+
+		radeon_conflict = to_radeon_connector(conflict);
+
+		drm_connector_for_each_possible_encoder(conflict, enc, i) {
+			/* if the IDs match */
+			if (enc == encoder) {
+				if (conflict->status != connector_status_connected)
+					continue;
+
+				if (radeon_conflict->use_digital)
+					continue;
+
+				if (priority == true) {
+					DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
+						      conflict->name);
+					DRM_DEBUG_KMS("in favor of %s\n",
+						      connector->name);
+					conflict->status = connector_status_disconnected;
+					radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
+				} else {
+					DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n",
+						      connector->name);
+					DRM_DEBUG_KMS("in favor of %s\n",
+						      conflict->name);
+					current_status = connector_status_disconnected;
+				}
+				break;
+			}
+		}
+	}
+	return current_status;
+
+}
+
+static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+	if (native_mode->hdisplay != 0 &&
+	    native_mode->vdisplay != 0 &&
+	    native_mode->clock != 0) {
+		mode = drm_mode_duplicate(dev, native_mode);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		drm_mode_set_name(mode);
+
+		DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
+	} else if (native_mode->hdisplay != 0 &&
+		   native_mode->vdisplay != 0) {
+		/* mac laptops without an edid */
+		/* Note that this is not necessarily the exact panel mode,
+		 * but an approximation based on the cvt formula.  For these
+		 * systems we should ideally read the mode info out of the
+		 * registers or add a mode table, but this works and is much
+		 * simpler.
+		 */
+		mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
+	}
+	return mode;
+}
+
+static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	int i;
+	struct mode_size {
+		int w;
+		int h;
+	} common_modes[17] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < 17; i++) {
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+			if (common_modes[i].w > 1024 ||
+			    common_modes[i].h > 768)
+				continue;
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			if (common_modes[i].w > native_mode->hdisplay ||
+			    common_modes[i].h > native_mode->vdisplay ||
+			    (common_modes[i].w == native_mode->hdisplay &&
+			     common_modes[i].h == native_mode->vdisplay))
+				continue;
+		}
+		if (common_modes[i].w < 320 || common_modes[i].h < 200)
+			continue;
+
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+		drm_mode_probed_add(connector, mode);
+	}
+}
+
+static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+				  uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	if (property == rdev->mode_info.coherent_mode_property) {
+		struct radeon_encoder_atom_dig *dig;
+		bool new_coherent_mode;
+
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (!radeon_encoder->enc_priv)
+			return 0;
+
+		dig = radeon_encoder->enc_priv;
+		new_coherent_mode = val ? true : false;
+		if (dig->coherent_mode != new_coherent_mode) {
+			dig->coherent_mode = new_coherent_mode;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.audio_property) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_connector->audio != val) {
+			radeon_connector->audio = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.dither_property) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_connector->dither != val) {
+			radeon_connector->dither = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_type != val) {
+			radeon_encoder->underscan_type = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_hborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_hborder != val) {
+			radeon_encoder->underscan_hborder = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_vborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_vborder != val) {
+			radeon_encoder->underscan_vborder = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.tv_std_property) {
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
+		if (!encoder) {
+			encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC);
+		}
+
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (!radeon_encoder->enc_priv)
+			return 0;
+		if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
+			struct radeon_encoder_atom_dac *dac_int;
+			dac_int = radeon_encoder->enc_priv;
+			dac_int->tv_std = val;
+		} else {
+			struct radeon_encoder_tv_dac *dac_int;
+			dac_int = radeon_encoder->enc_priv;
+			dac_int->tv_std = val;
+		}
+		radeon_property_change_mode(&radeon_encoder->base);
+	}
+
+	if (property == rdev->mode_info.load_detect_property) {
+		struct radeon_connector *radeon_connector =
+			to_radeon_connector(connector);
+
+		if (val == 0)
+			radeon_connector->dac_load_detect = false;
+		else
+			radeon_connector->dac_load_detect = true;
+	}
+
+	if (property == rdev->mode_info.tmds_pll_property) {
+		struct radeon_encoder_int_tmds *tmds = NULL;
+		bool ret = false;
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		tmds = radeon_encoder->enc_priv;
+		if (!tmds)
+			return 0;
+
+		if (val == 0) {
+			if (rdev->is_atom_bios)
+				ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds);
+			else
+				ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
+		}
+		if (val == 1 || ret == false) {
+			radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
+		}
+		radeon_property_change_mode(&radeon_encoder->base);
+	}
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		enum radeon_rmx_type rmx_type;
+
+		if (connector->encoder)
+			radeon_encoder = to_radeon_encoder(connector->encoder);
+		else {
+			const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+			radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+		}
+
+		switch (val) {
+		default:
+		case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+		case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+		case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+		case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+		}
+		if (radeon_encoder->rmx_type == rmx_type)
+			return 0;
+
+		if ((rmx_type != DRM_MODE_SCALE_NONE) &&
+		    (radeon_encoder->native_mode.clock == 0))
+			return 0;
+
+		radeon_encoder->rmx_type = rmx_type;
+
+		radeon_property_change_mode(&radeon_encoder->base);
+	}
+
+	if (property == rdev->mode_info.output_csc_property) {
+		if (connector->encoder)
+			radeon_encoder = to_radeon_encoder(connector->encoder);
+		else {
+			const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+			radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+		}
+
+		if (radeon_encoder->output_csc == val)
+			return 0;
+
+		radeon_encoder->output_csc = val;
+
+		if (connector->encoder->crtc) {
+			struct drm_crtc *crtc  = connector->encoder->crtc;
+			struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+			radeon_crtc->output_csc = radeon_encoder->output_csc;
+
+			/*
+			 * Our .gamma_set assumes the .gamma_store has been
+			 * prefilled and don't care about its arguments.
+			 */
+			crtc->funcs->gamma_set(crtc, NULL, NULL, NULL, 0, NULL);
+		}
+	}
+
+	return 0;
+}
+
+static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
+					  struct drm_connector *connector)
+{
+	struct radeon_encoder *radeon_encoder =	to_radeon_encoder(encoder);
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	struct drm_display_mode *t, *mode;
+
+	/* If the EDID preferred mode doesn't match the native mode, use it */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+			if (mode->hdisplay != native_mode->hdisplay ||
+			    mode->vdisplay != native_mode->vdisplay)
+				memcpy(native_mode, mode, sizeof(*mode));
+		}
+	}
+
+	/* Try to get native mode details from EDID if necessary */
+	if (!native_mode->clock) {
+		list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+			if (mode->hdisplay == native_mode->hdisplay &&
+			    mode->vdisplay == native_mode->vdisplay) {
+				*native_mode = *mode;
+				drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
+				DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
+				break;
+			}
+		}
+	}
+
+	if (!native_mode->clock) {
+		DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
+		radeon_encoder->rmx_type = RMX_OFF;
+	}
+}
+
+static int radeon_lvds_get_modes(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	int ret = 0;
+	struct drm_display_mode *mode;
+
+	radeon_connector_get_edid(connector);
+	ret = radeon_ddc_get_modes(connector);
+	if (ret > 0) {
+		encoder = radeon_best_single_encoder(connector);
+		if (encoder) {
+			radeon_fixup_lvds_native_mode(encoder, connector);
+			/* add scaled modes */
+			radeon_add_common_modes(encoder, connector);
+		}
+		return ret;
+	}
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		return 0;
+
+	/* we have no EDID modes */
+	mode = radeon_fp_native_mode(encoder);
+	if (mode) {
+		ret = 1;
+		drm_mode_probed_add(connector, mode);
+		/* add the width/height from vbios tables if available */
+		connector->display_info.width_mm = mode->width_mm;
+		connector->display_info.height_mm = mode->height_mm;
+		/* add scaled modes */
+		radeon_add_common_modes(encoder, connector);
+	}
+
+	return ret;
+}
+
+static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+	if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+		return MODE_PANEL;
+
+	if (encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+		/* AVIVO hardware supports downscaling modes larger than the panel
+		 * to the panel size, but I'm not sure this is desirable.
+		 */
+		if ((mode->hdisplay > native_mode->hdisplay) ||
+		    (mode->vdisplay > native_mode->vdisplay))
+			return MODE_PANEL;
+
+		/* if scaling is disabled, block non-native modes */
+		if (radeon_encoder->rmx_type == RMX_OFF) {
+			if ((mode->hdisplay != native_mode->hdisplay) ||
+			    (mode->vdisplay != native_mode->vdisplay))
+				return MODE_PANEL;
+		}
+	}
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	if (!drm_kms_helper_is_poll_worker()) {
+		r = pm_runtime_get_sync(connector->dev->dev);
+		if (r < 0)
+			return connector_status_disconnected;
+	}
+
+	if (encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+		/* check if panel is valid */
+		if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+			ret = connector_status_connected;
+		/* don't fetch the edid from the vbios if ddc fails and runpm is
+		 * enabled so we report disconnected.
+		 */
+		if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+			ret = connector_status_disconnected;
+	}
+
+	/* check for edid as well */
+	radeon_connector_get_edid(connector);
+	if (radeon_connector->edid)
+		ret = connector_status_connected;
+	/* check acpi lid status ??? */
+
+	radeon_connector_update_scratch_regs(connector, ret);
+
+	if (!drm_kms_helper_is_poll_worker()) {
+		pm_runtime_mark_last_busy(connector->dev->dev);
+		pm_runtime_put_autosuspend(connector->dev->dev);
+	}
+
+	return ret;
+}
+
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
+		drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+		radeon_connector->ddc_bus->has_aux = false;
+	}
+}
+
+static void radeon_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	radeon_connector_free_edid(connector);
+	kfree(radeon_connector->con_priv);
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int radeon_lvds_set_property(struct drm_connector *connector,
+				    struct drm_property *property,
+				    uint64_t value)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_encoder *radeon_encoder;
+	enum radeon_rmx_type rmx_type;
+
+	DRM_DEBUG_KMS("\n");
+	if (property != dev->mode_config.scaling_mode_property)
+		return 0;
+
+	if (connector->encoder)
+		radeon_encoder = to_radeon_encoder(connector->encoder);
+	else {
+		const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+		radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+	}
+
+	switch (value) {
+	case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+	case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+	case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+	default:
+	case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+	}
+	if (radeon_encoder->rmx_type == rmx_type)
+		return 0;
+
+	radeon_encoder->rmx_type = rmx_type;
+
+	radeon_property_change_mode(&radeon_encoder->base);
+	return 0;
+}
+
+
+static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+	.get_modes = radeon_lvds_get_modes,
+	.mode_valid = radeon_lvds_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = radeon_connector_unregister,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_lvds_set_property,
+};
+
+static int radeon_vga_get_modes(struct drm_connector *connector)
+{
+	int ret;
+
+	radeon_connector_get_edid(connector);
+	ret = radeon_ddc_get_modes(connector);
+
+	radeon_get_native_mode(connector);
+
+	return ret;
+}
+
+static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* XXX check mode bandwidth */
+
+	if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder;
+	const struct drm_encoder_helper_funcs *encoder_funcs;
+	bool dret = false;
+	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	if (!drm_kms_helper_is_poll_worker()) {
+		r = pm_runtime_get_sync(connector->dev->dev);
+		if (r < 0)
+			return connector_status_disconnected;
+	}
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		ret = connector_status_disconnected;
+
+	if (radeon_connector->ddc_bus)
+		dret = radeon_ddc_probe(radeon_connector, false);
+	if (dret) {
+		radeon_connector->detected_by_load = false;
+		radeon_connector_free_edid(connector);
+		radeon_connector_get_edid(connector);
+
+		if (!radeon_connector->edid) {
+			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+					connector->name);
+			ret = connector_status_connected;
+		} else {
+			radeon_connector->use_digital =
+				!!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
+				radeon_connector_free_edid(connector);
+				ret = connector_status_disconnected;
+			} else {
+				ret = connector_status_connected;
+			}
+		}
+	} else {
+
+		/* if we aren't forcing don't do destructive polling */
+		if (!force) {
+			/* only return the previous status if we last
+			 * detected a monitor via load.
+			 */
+			if (radeon_connector->detected_by_load)
+				ret = connector->status;
+			goto out;
+		}
+
+		if (radeon_connector->dac_load_detect && encoder) {
+			encoder_funcs = encoder->helper_private;
+			ret = encoder_funcs->detect(encoder, connector);
+			if (ret != connector_status_disconnected)
+				radeon_connector->detected_by_load = true;
+		}
+	}
+
+	if (ret == connector_status_connected)
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+
+	/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+	 * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+	 * by other means, assume the CRT is connected and use that EDID.
+	 */
+	if ((!rdev->is_atom_bios) &&
+	    (ret == connector_status_disconnected) &&
+	    rdev->mode_info.bios_hardcoded_edid_size) {
+		ret = connector_status_connected;
+	}
+
+	radeon_connector_update_scratch_regs(connector, ret);
+
+out:
+	if (!drm_kms_helper_is_poll_worker()) {
+		pm_runtime_mark_last_busy(connector->dev->dev);
+		pm_runtime_put_autosuspend(connector->dev->dev);
+	}
+
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+	.get_modes = radeon_vga_get_modes,
+	.mode_valid = radeon_vga_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_vga_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = radeon_connector_unregister,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_connector_set_property,
+};
+
+static int radeon_tv_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_display_mode *tv_mode;
+	struct drm_encoder *encoder;
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		return 0;
+
+	/* avivo chips can scale any mode */
+	if (rdev->family >= CHIP_RS600)
+		/* add scaled modes */
+		radeon_add_common_modes(encoder, connector);
+	else {
+		/* only 800x600 is supported right now on pre-avivo chips */
+		tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
+		tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+		drm_mode_probed_add(connector, tv_mode);
+	}
+	return 1;
+}
+
+static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
+		return MODE_CLOCK_RANGE;
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_encoder *encoder;
+	const struct drm_encoder_helper_funcs *encoder_funcs;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	if (!radeon_connector->dac_load_detect)
+		return ret;
+
+	if (!drm_kms_helper_is_poll_worker()) {
+		r = pm_runtime_get_sync(connector->dev->dev);
+		if (r < 0)
+			return connector_status_disconnected;
+	}
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		ret = connector_status_disconnected;
+	else {
+		encoder_funcs = encoder->helper_private;
+		ret = encoder_funcs->detect(encoder, connector);
+	}
+	if (ret == connector_status_connected)
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
+	radeon_connector_update_scratch_regs(connector, ret);
+
+	if (!drm_kms_helper_is_poll_worker()) {
+		pm_runtime_mark_last_busy(connector->dev->dev);
+		pm_runtime_put_autosuspend(connector->dev->dev);
+	}
+
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
+	.get_modes = radeon_tv_get_modes,
+	.mode_valid = radeon_tv_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_tv_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_tv_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = radeon_connector_unregister,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_connector_set_property,
+};
+
+static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status status;
+
+	/* We only trust HPD on R600 and newer ASICS. */
+	if (rdev->family >= CHIP_R600
+	  && radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
+		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+			status = connector_status_connected;
+		else
+			status = connector_status_disconnected;
+		if (connector->status == status)
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * DVI is complicated
+ * Do a DDC probe, if DDC probe passes, get the full EDID so
+ * we can do analog/digital monitor detection at this point.
+ * If the monitor is an analog monitor or we got no DDC,
+ * we need to find the DAC encoder object for this connector.
+ * If we got no DDC, we do load detection on the DAC encoder object.
+ * If we got analog DDC or load detection passes on the DAC encoder
+ * we have to check if this analog encoder is shared with anyone else (TV)
+ * if its shared we have to set the other connector to disconnected.
+ */
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder = NULL;
+	const struct drm_encoder_helper_funcs *encoder_funcs;
+	int r;
+	enum drm_connector_status ret = connector_status_disconnected;
+	bool dret = false, broken_edid = false;
+
+	if (!drm_kms_helper_is_poll_worker()) {
+		r = pm_runtime_get_sync(connector->dev->dev);
+		if (r < 0)
+			return connector_status_disconnected;
+	}
+
+	if (radeon_connector->detected_hpd_without_ddc) {
+		force = true;
+		radeon_connector->detected_hpd_without_ddc = false;
+	}
+
+	if (!force && radeon_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto exit;
+	}
+
+	if (radeon_connector->ddc_bus) {
+		dret = radeon_ddc_probe(radeon_connector, false);
+
+		/* Sometimes the pins required for the DDC probe on DVI
+		 * connectors don't make contact at the same time that the ones
+		 * for HPD do. If the DDC probe fails even though we had an HPD
+		 * signal, try again later */
+		if (!dret && !force &&
+		    connector->status != connector_status_connected) {
+			DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+			radeon_connector->detected_hpd_without_ddc = true;
+			schedule_delayed_work(&rdev->hotplug_work,
+					      msecs_to_jiffies(1000));
+			goto exit;
+		}
+	}
+	if (dret) {
+		radeon_connector->detected_by_load = false;
+		radeon_connector_free_edid(connector);
+		radeon_connector_get_edid(connector);
+
+		if (!radeon_connector->edid) {
+			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+					connector->name);
+			/* rs690 seems to have a problem with connectors not existing and always
+			 * return a block of 0's. If we see this just stop polling on this output */
+			if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
+			    radeon_connector->base.null_edid_counter) {
+				ret = connector_status_disconnected;
+				DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
+					  connector->name);
+				radeon_connector->ddc_bus = NULL;
+			} else {
+				ret = connector_status_connected;
+				broken_edid = true; /* defer use_digital to later */
+			}
+		} else {
+			radeon_connector->use_digital =
+				!!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
+				radeon_connector_free_edid(connector);
+				ret = connector_status_disconnected;
+			} else {
+				ret = connector_status_connected;
+			}
+			/* This gets complicated.  We have boards with VGA + HDMI with a
+			 * shared DDC line and we have boards with DVI-D + HDMI with a shared
+			 * DDC line.  The latter is more complex because with DVI<->HDMI adapters
+			 * you don't really know what's connected to which port as both are digital.
+			 */
+			if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
+				struct drm_connector *list_connector;
+				struct radeon_connector *list_radeon_connector;
+				list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+					if (connector == list_connector)
+						continue;
+					list_radeon_connector = to_radeon_connector(list_connector);
+					if (list_radeon_connector->shared_ddc &&
+					    (list_radeon_connector->ddc_bus->rec.i2c_id ==
+					     radeon_connector->ddc_bus->rec.i2c_id)) {
+						/* cases where both connectors are digital */
+						if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+							/* hpd is our only option in this case */
+							if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+								radeon_connector_free_edid(connector);
+								ret = connector_status_disconnected;
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
+		goto out;
+
+	/* DVI-D and HDMI-A are digital only */
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
+		goto out;
+
+	/* if we aren't forcing don't do destructive polling */
+	if (!force) {
+		/* only return the previous status if we last
+		 * detected a monitor via load.
+		 */
+		if (radeon_connector->detected_by_load)
+			ret = connector->status;
+		goto out;
+	}
+
+	/* find analog encoder */
+	if (radeon_connector->dac_load_detect) {
+		int i;
+
+		drm_connector_for_each_possible_encoder(connector, encoder, i) {
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
+			    encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+				continue;
+
+			encoder_funcs = encoder->helper_private;
+			if (encoder_funcs->detect) {
+				if (!broken_edid) {
+					if (ret != connector_status_connected) {
+						/* deal with analog monitors without DDC */
+						ret = encoder_funcs->detect(encoder, connector);
+						if (ret == connector_status_connected) {
+							radeon_connector->use_digital = false;
+						}
+						if (ret != connector_status_disconnected)
+							radeon_connector->detected_by_load = true;
+					}
+				} else {
+					enum drm_connector_status lret;
+					/* assume digital unless load detected otherwise */
+					radeon_connector->use_digital = true;
+					lret = encoder_funcs->detect(encoder, connector);
+					DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+					if (lret == connector_status_connected)
+						radeon_connector->use_digital = false;
+				}
+				break;
+			}
+		}
+	}
+
+	if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) &&
+	    encoder) {
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+	}
+
+	/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+	 * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+	 * by other means, assume the DFP is connected and use that EDID.  In most
+	 * cases the DVI port is actually a virtual KVM port connected to the service
+	 * processor.
+	 */
+out:
+	if ((!rdev->is_atom_bios) &&
+	    (ret == connector_status_disconnected) &&
+	    rdev->mode_info.bios_hardcoded_edid_size) {
+		radeon_connector->use_digital = true;
+		ret = connector_status_connected;
+	}
+
+	/* updated in get modes as well since we need to know if it's analog or digital */
+	radeon_connector_update_scratch_regs(connector, ret);
+
+	if ((radeon_audio != 0) && radeon_connector->use_digital) {
+		const struct drm_connector_helper_funcs *connector_funcs =
+			connector->helper_private;
+
+		encoder = connector_funcs->best_encoder(connector);
+		if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+			radeon_connector_get_edid(connector);
+			radeon_audio_detect(connector, encoder, ret);
+		}
+	}
+
+exit:
+	if (!drm_kms_helper_is_poll_worker()) {
+		pm_runtime_mark_last_busy(connector->dev->dev);
+		pm_runtime_put_autosuspend(connector->dev->dev);
+	}
+
+	return ret;
+}
+
+/* okay need to be smart in here about which encoder to pick */
+static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder;
+	int i;
+
+	drm_connector_for_each_possible_encoder(connector, encoder, i) {
+		if (radeon_connector->use_digital == true) {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+				return encoder;
+		} else {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
+			    encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+				return encoder;
+		}
+	}
+
+	/* see if we have a default encoder  TODO */
+
+	/* then check use digitial */
+	/* pick the first one */
+	drm_connector_for_each_possible_encoder(connector, encoder, i)
+		return encoder;
+
+	return NULL;
+}
+
+static void radeon_dvi_force(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	if (connector->force == DRM_FORCE_ON)
+		radeon_connector->use_digital = false;
+	if (connector->force == DRM_FORCE_ON_DIGITAL)
+		radeon_connector->use_digital = true;
+}
+
+static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	/* XXX check mode bandwidth */
+
+	/* clocks over 135 MHz have heat issues with DVI on RV100 */
+	if (radeon_connector->use_digital &&
+	    (rdev->family == CHIP_RV100) &&
+	    (mode->clock > 135000))
+		return MODE_CLOCK_HIGH;
+
+	if (radeon_connector->use_digital && (mode->clock > 165000)) {
+		if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
+			return MODE_OK;
+		else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+			/* HDMI 1.3+ supports max clock of 340 Mhz */
+			if (mode->clock > 340000)
+				return MODE_CLOCK_HIGH;
+			else
+				return MODE_OK;
+		} else {
+			return MODE_CLOCK_HIGH;
+		}
+	}
+
+	/* check against the max pixel clock */
+	if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+	.get_modes = radeon_vga_get_modes,
+	.mode_valid = radeon_dvi_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dvi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.early_unregister = radeon_connector_unregister,
+	.destroy = radeon_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	int ret;
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_display_mode *mode;
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+			if (!radeon_dig_connector->edp_on)
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+			radeon_connector_get_edid(connector);
+			ret = radeon_ddc_get_modes(connector);
+			if (!radeon_dig_connector->edp_on)
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+		} else {
+			/* need to setup ddc on the bridge */
+			if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			    ENCODER_OBJECT_ID_NONE) {
+				if (encoder)
+					radeon_atom_ext_encoder_setup_ddc(encoder);
+			}
+			radeon_connector_get_edid(connector);
+			ret = radeon_ddc_get_modes(connector);
+		}
+
+		if (ret > 0) {
+			if (encoder) {
+				radeon_fixup_lvds_native_mode(encoder, connector);
+				/* add scaled modes */
+				radeon_add_common_modes(encoder, connector);
+			}
+			return ret;
+		}
+
+		if (!encoder)
+			return 0;
+
+		/* we have no EDID modes */
+		mode = radeon_fp_native_mode(encoder);
+		if (mode) {
+			ret = 1;
+			drm_mode_probed_add(connector, mode);
+			/* add the width/height from vbios tables if available */
+			connector->display_info.width_mm = mode->width_mm;
+			connector->display_info.height_mm = mode->height_mm;
+			/* add scaled modes */
+			radeon_add_common_modes(encoder, connector);
+		}
+	} else {
+		/* need to setup ddc on the bridge */
+		if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			ENCODER_OBJECT_ID_NONE) {
+			if (encoder)
+				radeon_atom_ext_encoder_setup_ddc(encoder);
+		}
+		radeon_connector_get_edid(connector);
+		ret = radeon_ddc_get_modes(connector);
+
+		radeon_get_native_mode(connector);
+	}
+
+	return ret;
+}
+
+u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	int i;
+
+	drm_connector_for_each_possible_encoder(connector, encoder, i) {
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return radeon_encoder->encoder_id;
+		default:
+			break;
+		}
+	}
+
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	int i;
+	bool found = false;
+
+	drm_connector_for_each_possible_encoder(connector, encoder, i) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
+			found = true;
+	}
+
+	return found;
+}
+
+bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE5(rdev) &&
+	    (rdev->clock.default_dispclk >= 53900) &&
+	    radeon_connector_encoder_is_hbr2(connector)) {
+		return true;
+	}
+
+	return false;
+}
+
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	int r;
+
+	if (radeon_dig_connector->is_mst)
+		return connector_status_disconnected;
+
+	if (!drm_kms_helper_is_poll_worker()) {
+		r = pm_runtime_get_sync(connector->dev->dev);
+		if (r < 0)
+			return connector_status_disconnected;
+	}
+
+	if (!force && radeon_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto out;
+	}
+
+	radeon_connector_free_edid(connector);
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		if (encoder) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+			/* check if panel is valid */
+			if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+				ret = connector_status_connected;
+			/* don't fetch the edid from the vbios if ddc fails and runpm is
+			 * enabled so we report disconnected.
+			 */
+			if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+				ret = connector_status_disconnected;
+		}
+		/* eDP is always DP */
+		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		if (!radeon_dig_connector->edp_on)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+		if (radeon_dp_getdpcd(radeon_connector))
+			ret = connector_status_connected;
+		if (!radeon_dig_connector->edp_on)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+	} else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+		   ENCODER_OBJECT_ID_NONE) {
+		/* DP bridges are always DP */
+		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		/* get the DPCD from the bridge */
+		radeon_dp_getdpcd(radeon_connector);
+
+		if (encoder) {
+			/* setup ddc on the bridge */
+			radeon_atom_ext_encoder_setup_ddc(encoder);
+			/* bridge chips are always aux */
+			if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
+				ret = connector_status_connected;
+			else if (radeon_connector->dac_load_detect) { /* try load detection */
+				const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+				ret = encoder_funcs->detect(encoder, connector);
+			}
+		}
+	} else {
+		radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+			ret = connector_status_connected;
+			if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+				radeon_dp_getdpcd(radeon_connector);
+				r = radeon_dp_mst_probe(radeon_connector);
+				if (r == 1)
+					ret = connector_status_disconnected;
+			}
+		} else {
+			if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+				if (radeon_dp_getdpcd(radeon_connector)) {
+					r = radeon_dp_mst_probe(radeon_connector);
+					if (r == 1)
+						ret = connector_status_disconnected;
+					else
+						ret = connector_status_connected;
+				}
+			} else {
+				/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
+				if (radeon_ddc_probe(radeon_connector, false))
+					ret = connector_status_connected;
+			}
+		}
+	}
+
+	radeon_connector_update_scratch_regs(connector, ret);
+
+	if ((radeon_audio != 0) && encoder) {
+		radeon_connector_get_edid(connector);
+		radeon_audio_detect(connector, encoder, ret);
+	}
+
+out:
+	if (!drm_kms_helper_is_poll_worker()) {
+		pm_runtime_mark_last_busy(connector->dev->dev);
+		pm_runtime_put_autosuspend(connector->dev->dev);
+	}
+
+	return ret;
+}
+
+static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	/* XXX check mode bandwidth */
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+		if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+			return MODE_PANEL;
+
+		if (encoder) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+			/* AVIVO hardware supports downscaling modes larger than the panel
+			 * to the panel size, but I'm not sure this is desirable.
+			 */
+			if ((mode->hdisplay > native_mode->hdisplay) ||
+			    (mode->vdisplay > native_mode->vdisplay))
+				return MODE_PANEL;
+
+			/* if scaling is disabled, block non-native modes */
+			if (radeon_encoder->rmx_type == RMX_OFF) {
+				if ((mode->hdisplay != native_mode->hdisplay) ||
+				    (mode->vdisplay != native_mode->vdisplay))
+					return MODE_PANEL;
+			}
+		}
+	} else {
+		if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+			return radeon_dp_mode_valid_helper(connector, mode);
+		} else {
+			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+				/* HDMI 1.3+ supports max clock of 340 Mhz */
+				if (mode->clock > 340000)
+					return MODE_CLOCK_HIGH;
+			} else {
+				if (mode->clock > 165000)
+					return MODE_CLOCK_HIGH;
+			}
+		}
+	}
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+	.get_modes = radeon_dp_get_modes,
+	.mode_valid = radeon_dp_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.early_unregister = radeon_connector_unregister,
+	.destroy = radeon_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.early_unregister = radeon_connector_unregister,
+	.destroy = radeon_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.early_unregister = radeon_connector_unregister,
+	.destroy = radeon_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+void
+radeon_add_atom_connector(struct drm_device *dev,
+			  uint32_t connector_id,
+			  uint32_t supported_device,
+			  int connector_type,
+			  struct radeon_i2c_bus_rec *i2c_bus,
+			  uint32_t igp_lane_info,
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd,
+			  struct radeon_router *router)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *radeon_dig_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	uint32_t subpixel_order = SubPixelNone;
+	bool shared_ddc = false;
+	bool is_dp_bridge = false;
+	bool has_aux = false;
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+		return;
+
+	/* if the user selected tv=0 don't try and add the connector */
+	if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+	     (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+	     (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+	    (radeon_tv == 0))
+		return;
+
+	/* see if we already added it */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->connector_id == connector_id) {
+			radeon_connector->devices |= supported_device;
+			return;
+		}
+		if (radeon_connector->ddc_bus && i2c_bus->valid) {
+			if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
+				radeon_connector->shared_ddc = true;
+				shared_ddc = true;
+			}
+			if (radeon_connector->router_bus && router->ddc_valid &&
+			    (radeon_connector->router.router_id == router->router_id)) {
+				radeon_connector->shared_ddc = false;
+				shared_ddc = false;
+			}
+		}
+	}
+
+	/* check if it's a dp bridge */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->devices & supported_device) {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_TRAVIS:
+			case ENCODER_OBJECT_ID_NUTMEG:
+				is_dp_bridge = true;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+	if (!radeon_connector)
+		return;
+
+	connector = &radeon_connector->base;
+
+	radeon_connector->connector_id = connector_id;
+	radeon_connector->devices = supported_device;
+	radeon_connector->shared_ddc = shared_ddc;
+	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
+
+	radeon_connector->router = *router;
+	if (router->ddc_valid || router->cd_valid) {
+		radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
+		if (!radeon_connector->router_bus)
+			DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
+	}
+
+	if (is_dp_bridge) {
+		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+		if (!radeon_dig_connector)
+			goto failed;
+		radeon_dig_connector->igp_lane_info = igp_lane_info;
+		radeon_connector->con_priv = radeon_dig_connector;
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (radeon_connector->ddc_bus)
+				has_aux = true;
+			else
+				DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+		case DRM_MODE_CONNECTOR_DVIA:
+		default:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			drm_object_attach_property(&radeon_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+			if (ASIC_IS_DCE5(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.output_csc_property,
+							   RADEON_OUTPUT_CSC_BYPASS);
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_property,
+						      UNDERSCAN_OFF);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_hborder_property,
+						      0);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_vborder_property,
+						      0);
+
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_NONE);
+
+			drm_object_attach_property(&radeon_connector->base.base,
+						   rdev->mode_info.dither_property,
+						   RADEON_FMT_DITHER_DISABLE);
+
+			if (radeon_audio != 0) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.audio_property,
+							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
+			}
+			if (ASIC_IS_DCE5(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.output_csc_property,
+							   RADEON_OUTPUT_CSC_BYPASS);
+
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				radeon_connector->dac_load_detect = true;
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.load_detect_property,
+							      1);
+			}
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+		case DRM_MODE_CONNECTOR_eDP:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_lvds_bridge_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	} else {
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   dev->mode_config.scaling_mode_property,
+							   DRM_MODE_SCALE_NONE);
+			if (ASIC_IS_DCE5(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.output_csc_property,
+							   RADEON_OUTPUT_CSC_BYPASS);
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVIA:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   dev->mode_config.scaling_mode_property,
+							   DRM_MODE_SCALE_NONE);
+			if (ASIC_IS_DCE5(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.output_csc_property,
+							   RADEON_OUTPUT_CSC_BYPASS);
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
+				drm_object_attach_property(&radeon_connector->base.base,
+							   dev->mode_config.scaling_mode_property,
+							   DRM_MODE_SCALE_NONE);
+			}
+			if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.audio_property,
+							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
+			}
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				radeon_connector->dac_load_detect = true;
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.load_detect_property,
+							      1);
+			}
+			if (ASIC_IS_DCE5(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.output_csc_property,
+							   RADEON_OUTPUT_CSC_BYPASS);
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
+				drm_object_attach_property(&radeon_connector->base.base,
+							   dev->mode_config.scaling_mode_property,
+							   DRM_MODE_SCALE_NONE);
+			}
+			if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.audio_property,
+							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
+			}
+			if (ASIC_IS_DCE5(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.output_csc_property,
+							   RADEON_OUTPUT_CSC_BYPASS);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (radeon_connector->ddc_bus)
+					has_aux = true;
+				else
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
+				drm_object_attach_property(&radeon_connector->base.base,
+							   dev->mode_config.scaling_mode_property,
+							   DRM_MODE_SCALE_NONE);
+			}
+			if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.audio_property,
+							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
+			}
+			if (ASIC_IS_DCE5(rdev))
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.output_csc_property,
+							   RADEON_OUTPUT_CSC_BYPASS);
+			connector->interlace_allowed = true;
+			/* in theory with a DP to VGA converter... */
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_eDP:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (radeon_connector->ddc_bus)
+					has_aux = true;
+				else
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_9PinDIN:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.tv_std_property,
+						      radeon_atombios_get_tv_info(rdev));
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	}
+
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid) {
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+			                    DRM_CONNECTOR_POLL_DISCONNECT;
+		}
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	connector->display_info.subpixel_order = subpixel_order;
+	drm_connector_register(connector);
+
+	if (has_aux)
+		radeon_dp_aux_init(radeon_connector);
+
+	return;
+
+failed:
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+void
+radeon_add_legacy_connector(struct drm_device *dev,
+			    uint32_t connector_id,
+			    uint32_t supported_device,
+			    int connector_type,
+			    struct radeon_i2c_bus_rec *i2c_bus,
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	uint32_t subpixel_order = SubPixelNone;
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+		return;
+
+	/* if the user selected tv=0 don't try and add the connector */
+	if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+	     (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+	     (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+	    (radeon_tv == 0))
+		return;
+
+	/* see if we already added it */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->connector_id == connector_id) {
+			radeon_connector->devices |= supported_device;
+			return;
+		}
+	}
+
+	radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+	if (!radeon_connector)
+		return;
+
+	connector = &radeon_connector->base;
+
+	radeon_connector->connector_id = connector_id;
+	radeon_connector->devices = supported_device;
+	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
+
+	switch (connector_type) {
+	case DRM_MODE_CONNECTOR_VGA:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		radeon_connector->dac_load_detect = true;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      1);
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->interlace_allowed = true;
+		connector->doublescan_allowed = true;
+		break;
+	case DRM_MODE_CONNECTOR_DVIA:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		radeon_connector->dac_load_detect = true;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      1);
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->interlace_allowed = true;
+		connector->doublescan_allowed = true;
+		break;
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_DVID:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+		}
+		subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = true;
+		if (connector_type == DRM_MODE_CONNECTOR_DVII)
+			connector->doublescan_allowed = true;
+		else
+			connector->doublescan_allowed = false;
+		break;
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+		radeon_connector->dac_load_detect = true;
+		/* RS400,RC410,RS480 chipset seems to report a lot
+		 * of false positive on load detect, we haven't yet
+		 * found a way to make load detect reliable on those
+		 * chipset, thus just disable it for TV.
+		 */
+		if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+			radeon_connector->dac_load_detect = false;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      radeon_connector->dac_load_detect);
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.tv_std_property,
+					      radeon_combios_get_tv_info(rdev));
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		drm_object_attach_property(&radeon_connector->base.base,
+					      dev->mode_config.scaling_mode_property,
+					      DRM_MODE_SCALE_FULLSCREEN);
+		subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+		break;
+	}
+
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid) {
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+			                    DRM_CONNECTOR_POLL_DISCONNECT;
+		}
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	connector->display_info.subpixel_order = subpixel_order;
+	drm_connector_register(connector);
+}
+
+void radeon_setup_mst_connector(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	if (!ASIC_IS_DCE5(rdev))
+		return;
+
+	if (radeon_mst == 0)
+		return;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		int ret;
+
+		radeon_connector = to_radeon_connector(connector);
+
+		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		ret = radeon_dp_mst_init(radeon_connector);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
new file mode 100644
index 0000000..1ae31db
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright 2008 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include <linux/list_sort.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_trace.h"
+
+#define RADEON_CS_MAX_PRIORITY		32u
+#define RADEON_CS_NUM_BUCKETS		(RADEON_CS_MAX_PRIORITY + 1)
+
+/* This is based on the bucket sort with O(n) time complexity.
+ * An item with priority "i" is added to bucket[i]. The lists are then
+ * concatenated in descending order.
+ */
+struct radeon_cs_buckets {
+	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
+};
+
+static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
+		INIT_LIST_HEAD(&b->bucket[i]);
+}
+
+static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
+				  struct list_head *item, unsigned priority)
+{
+	/* Since buffers which appear sooner in the relocation list are
+	 * likely to be used more often than buffers which appear later
+	 * in the list, the sort mustn't change the ordering of buffers
+	 * with the same priority, i.e. it must be stable.
+	 */
+	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
+}
+
+static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
+				       struct list_head *out_list)
+{
+	unsigned i;
+
+	/* Connect the sorted buckets in the output list. */
+	for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
+		list_splice(&b->bucket[i], out_list);
+	}
+}
+
+static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *chunk;
+	struct radeon_cs_buckets buckets;
+	unsigned i;
+	bool need_mmap_lock = false;
+	int r;
+
+	if (p->chunk_relocs == NULL) {
+		return 0;
+	}
+	chunk = p->chunk_relocs;
+	p->dma_reloc_idx = 0;
+	/* FIXME: we assume that each relocs use 4 dwords */
+	p->nrelocs = chunk->length_dw / 4;
+	p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list),
+			GFP_KERNEL | __GFP_ZERO);
+	if (p->relocs == NULL) {
+		return -ENOMEM;
+	}
+
+	radeon_cs_buckets_init(&buckets);
+
+	for (i = 0; i < p->nrelocs; i++) {
+		struct drm_radeon_cs_reloc *r;
+		struct drm_gem_object *gobj;
+		unsigned priority;
+
+		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
+		gobj = drm_gem_object_lookup(p->filp, r->handle);
+		if (gobj == NULL) {
+			DRM_ERROR("gem object lookup failed 0x%x\n",
+				  r->handle);
+			return -ENOENT;
+		}
+		p->relocs[i].robj = gem_to_radeon_bo(gobj);
+
+		/* The userspace buffer priorities are from 0 to 15. A higher
+		 * number means the buffer is more important.
+		 * Also, the buffers used for write have a higher priority than
+		 * the buffers used for read only, which doubles the range
+		 * to 0 to 31. 32 is reserved for the kernel driver.
+		 */
+		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+			   + !!r->write_domain;
+
+		/* The first reloc of an UVD job is the msg and that must be in
+		 * VRAM, the second reloc is the DPB and for WMV that must be in
+		 * VRAM as well. Also put everything into VRAM on AGP cards and older
+		 * IGP chips to avoid image corruptions
+		 */
+		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+		    (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
+						   PCI_CAP_ID_AGP) ||
+		     p->rdev->family == CHIP_RS780 ||
+		     p->rdev->family == CHIP_RS880)) {
+
+			/* TODO: is this still needed for NI+ ? */
+			p->relocs[i].preferred_domains =
+				RADEON_GEM_DOMAIN_VRAM;
+
+			p->relocs[i].allowed_domains =
+				RADEON_GEM_DOMAIN_VRAM;
+
+			/* prioritize this over any other relocation */
+			priority = RADEON_CS_MAX_PRIORITY;
+		} else {
+			uint32_t domain = r->write_domain ?
+				r->write_domain : r->read_domains;
+
+			if (domain & RADEON_GEM_DOMAIN_CPU) {
+				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
+					  "for command submission\n");
+				return -EINVAL;
+			}
+
+			p->relocs[i].preferred_domains = domain;
+			if (domain == RADEON_GEM_DOMAIN_VRAM)
+				domain |= RADEON_GEM_DOMAIN_GTT;
+			p->relocs[i].allowed_domains = domain;
+		}
+
+		if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
+			uint32_t domain = p->relocs[i].preferred_domains;
+			if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
+				DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
+					  "allowed for userptr BOs\n");
+				return -EINVAL;
+			}
+			need_mmap_lock = true;
+			domain = RADEON_GEM_DOMAIN_GTT;
+			p->relocs[i].preferred_domains = domain;
+			p->relocs[i].allowed_domains = domain;
+		}
+
+		/* Objects shared as dma-bufs cannot be moved to VRAM */
+		if (p->relocs[i].robj->prime_shared_count) {
+			p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
+			if (!p->relocs[i].allowed_domains) {
+				DRM_ERROR("BO associated with dma-buf cannot "
+					  "be moved to VRAM\n");
+				return -EINVAL;
+			}
+		}
+
+		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
+		p->relocs[i].tv.shared = !r->write_domain;
+
+		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
+				      priority);
+	}
+
+	radeon_cs_buckets_get_list(&buckets, &p->validated);
+
+	if (p->cs_flags & RADEON_CS_USE_VM)
+		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
+					      &p->validated);
+	if (need_mmap_lock)
+		down_read(&current->mm->mmap_sem);
+
+	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
+
+	if (need_mmap_lock)
+		up_read(&current->mm->mmap_sem);
+
+	return r;
+}
+
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+	p->priority = priority;
+
+	switch (ring) {
+	default:
+		DRM_ERROR("unknown ring id: %d\n", ring);
+		return -EINVAL;
+	case RADEON_CS_RING_GFX:
+		p->ring = RADEON_RING_TYPE_GFX_INDEX;
+		break;
+	case RADEON_CS_RING_COMPUTE:
+		if (p->rdev->family >= CHIP_TAHITI) {
+			if (p->priority > 0)
+				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
+			else
+				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
+		} else
+			p->ring = RADEON_RING_TYPE_GFX_INDEX;
+		break;
+	case RADEON_CS_RING_DMA:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			if (p->priority > 0)
+				p->ring = R600_RING_TYPE_DMA_INDEX;
+			else
+				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+		} else if (p->rdev->family >= CHIP_RV770) {
+			p->ring = R600_RING_TYPE_DMA_INDEX;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_CS_RING_UVD:
+		p->ring = R600_RING_TYPE_UVD_INDEX;
+		break;
+	case RADEON_CS_RING_VCE:
+		/* TODO: only use the low priority ring for now */
+		p->ring = TN_RING_TYPE_VCE1_INDEX;
+		break;
+	}
+	return 0;
+}
+
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+	struct radeon_bo_list *reloc;
+	int r;
+
+	list_for_each_entry(reloc, &p->validated, tv.head) {
+		struct reservation_object *resv;
+
+		resv = reloc->robj->tbo.resv;
+		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
+				     reloc->tv.shared);
+		if (r)
+			return r;
+	}
+	return 0;
+}
+
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
+int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+{
+	struct drm_radeon_cs *cs = data;
+	uint64_t *chunk_array_ptr;
+	unsigned size, i;
+	u32 ring = RADEON_CS_RING_GFX;
+	s32 priority = 0;
+
+	INIT_LIST_HEAD(&p->validated);
+
+	if (!cs->num_chunks) {
+		return 0;
+	}
+
+	/* get chunks */
+	p->idx = 0;
+	p->ib.sa_bo = NULL;
+	p->const_ib.sa_bo = NULL;
+	p->chunk_ib = NULL;
+	p->chunk_relocs = NULL;
+	p->chunk_flags = NULL;
+	p->chunk_const_ib = NULL;
+	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
+	if (p->chunks_array == NULL) {
+		return -ENOMEM;
+	}
+	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
+	if (copy_from_user(p->chunks_array, chunk_array_ptr,
+			       sizeof(uint64_t)*cs->num_chunks)) {
+		return -EFAULT;
+	}
+	p->cs_flags = 0;
+	p->nchunks = cs->num_chunks;
+	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
+	if (p->chunks == NULL) {
+		return -ENOMEM;
+	}
+	for (i = 0; i < p->nchunks; i++) {
+		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
+		struct drm_radeon_cs_chunk user_chunk;
+		uint32_t __user *cdata;
+
+		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
+		if (copy_from_user(&user_chunk, chunk_ptr,
+				       sizeof(struct drm_radeon_cs_chunk))) {
+			return -EFAULT;
+		}
+		p->chunks[i].length_dw = user_chunk.length_dw;
+		if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
+			p->chunk_relocs = &p->chunks[i];
+		}
+		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
+			p->chunk_ib = &p->chunks[i];
+			/* zero length IB isn't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+			p->chunk_const_ib = &p->chunks[i];
+			/* zero length CONST IB isn't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
+			p->chunk_flags = &p->chunks[i];
+			/* zero length flags aren't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+
+		size = p->chunks[i].length_dw;
+		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+		p->chunks[i].user_ptr = cdata;
+		if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
+			continue;
+
+		if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
+			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
+				continue;
+		}
+
+		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+		size *= sizeof(uint32_t);
+		if (p->chunks[i].kdata == NULL) {
+			return -ENOMEM;
+		}
+		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
+			return -EFAULT;
+		}
+		if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
+			p->cs_flags = p->chunks[i].kdata[0];
+			if (p->chunks[i].length_dw > 1)
+				ring = p->chunks[i].kdata[1];
+			if (p->chunks[i].length_dw > 2)
+				priority = (s32)p->chunks[i].kdata[2];
+		}
+	}
+
+	/* these are KMS only */
+	if (p->rdev) {
+		if ((p->cs_flags & RADEON_CS_USE_VM) &&
+		    !p->rdev->vm_manager.enabled) {
+			DRM_ERROR("VM not active on asic!\n");
+			return -EINVAL;
+		}
+
+		if (radeon_cs_get_ring(p, ring, priority))
+			return -EINVAL;
+
+		/* we only support VM on some SI+ rings */
+		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
+			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
+				DRM_ERROR("Ring %d requires VM!\n", p->ring);
+				return -EINVAL;
+			}
+		} else {
+			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
+				DRM_ERROR("VM not supported on ring %d!\n",
+					  p->ring);
+				return -EINVAL;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int cmp_size_smaller_first(void *priv, struct list_head *a,
+				  struct list_head *b)
+{
+	struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
+	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
+
+	/* Sort A before B if A is smaller. */
+	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
+{
+	unsigned i;
+
+	if (!error) {
+		/* Sort the buffer list from the smallest to largest buffer,
+		 * which affects the order of buffers in the LRU list.
+		 * This assures that the smallest buffers are added first
+		 * to the LRU list, so they are likely to be later evicted
+		 * first, instead of large buffers whose eviction is more
+		 * expensive.
+		 *
+		 * This slightly lowers the number of bytes moved by TTM
+		 * per frame under memory pressure.
+		 */
+		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+
+		ttm_eu_fence_buffer_objects(&parser->ticket,
+					    &parser->validated,
+					    &parser->ib.fence->base);
+	} else if (backoff) {
+		ttm_eu_backoff_reservation(&parser->ticket,
+					   &parser->validated);
+	}
+
+	if (parser->relocs != NULL) {
+		for (i = 0; i < parser->nrelocs; i++) {
+			struct radeon_bo *bo = parser->relocs[i].robj;
+			if (bo == NULL)
+				continue;
+
+			drm_gem_object_put_unlocked(&bo->gem_base);
+		}
+	}
+	kfree(parser->track);
+	kvfree(parser->relocs);
+	kvfree(parser->vm_bos);
+	for (i = 0; i < parser->nchunks; i++)
+		kvfree(parser->chunks[i].kdata);
+	kfree(parser->chunks);
+	kfree(parser->chunks_array);
+	radeon_ib_free(parser->rdev, &parser->ib);
+	radeon_ib_free(parser->rdev, &parser->const_ib);
+}
+
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+			      struct radeon_cs_parser *parser)
+{
+	int r;
+
+	if (parser->chunk_ib == NULL)
+		return 0;
+
+	if (parser->cs_flags & RADEON_CS_USE_VM)
+		return 0;
+
+	r = radeon_cs_parse(rdev, parser->ring, parser);
+	if (r || parser->parser_error) {
+		DRM_ERROR("Invalid command stream !\n");
+		return r;
+	}
+
+	r = radeon_cs_sync_rings(parser);
+	if (r) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR("Failed to sync rings: %i\n", r);
+		return r;
+	}
+
+	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+		radeon_uvd_note_usage(rdev);
+	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
+		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
+		radeon_vce_note_usage(rdev);
+
+	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
+	if (r) {
+		DRM_ERROR("Failed to schedule IB !\n");
+	}
+	return r;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
+				   struct radeon_vm *vm)
+{
+	struct radeon_device *rdev = p->rdev;
+	struct radeon_bo_va *bo_va;
+	int i, r;
+
+	r = radeon_vm_update_page_directory(rdev, vm);
+	if (r)
+		return r;
+
+	r = radeon_vm_clear_freed(rdev, vm);
+	if (r)
+		return r;
+
+	if (vm->ib_bo_va == NULL) {
+		DRM_ERROR("Tmp BO not in VM!\n");
+		return -EINVAL;
+	}
+
+	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
+				&rdev->ring_tmp_bo.bo->tbo.mem);
+	if (r)
+		return r;
+
+	for (i = 0; i < p->nrelocs; i++) {
+		struct radeon_bo *bo;
+
+		bo = p->relocs[i].robj;
+		bo_va = radeon_vm_bo_find(vm, bo);
+		if (bo_va == NULL) {
+			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+			return -EINVAL;
+		}
+
+		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
+		if (r)
+			return r;
+
+		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
+	}
+
+	return radeon_vm_clear_invalids(rdev, vm);
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+				 struct radeon_cs_parser *parser)
+{
+	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	int r;
+
+	if (parser->chunk_ib == NULL)
+		return 0;
+	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+		return 0;
+
+	if (parser->const_ib.length_dw) {
+		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
+		if (r) {
+			return r;
+		}
+	}
+
+	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
+	if (r) {
+		return r;
+	}
+
+	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+		radeon_uvd_note_usage(rdev);
+
+	mutex_lock(&vm->mutex);
+	r = radeon_bo_vm_update_pte(parser, vm);
+	if (r) {
+		goto out;
+	}
+
+	r = radeon_cs_sync_rings(parser);
+	if (r) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR("Failed to sync rings: %i\n", r);
+		goto out;
+	}
+
+	if ((rdev->family >= CHIP_TAHITI) &&
+	    (parser->chunk_const_ib != NULL)) {
+		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
+	} else {
+		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
+	}
+
+out:
+	mutex_unlock(&vm->mutex);
+	return r;
+}
+
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		r = radeon_gpu_reset(rdev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
+{
+	struct radeon_cs_chunk *ib_chunk;
+	struct radeon_vm *vm = NULL;
+	int r;
+
+	if (parser->chunk_ib == NULL)
+		return 0;
+
+	if (parser->cs_flags & RADEON_CS_USE_VM) {
+		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+		vm = &fpriv->vm;
+
+		if ((rdev->family >= CHIP_TAHITI) &&
+		    (parser->chunk_const_ib != NULL)) {
+			ib_chunk = parser->chunk_const_ib;
+			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+				return -EINVAL;
+			}
+			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+					   vm, ib_chunk->length_dw * 4);
+			if (r) {
+				DRM_ERROR("Failed to get const ib !\n");
+				return r;
+			}
+			parser->const_ib.is_const_ib = true;
+			parser->const_ib.length_dw = ib_chunk->length_dw;
+			if (copy_from_user(parser->const_ib.ptr,
+					       ib_chunk->user_ptr,
+					       ib_chunk->length_dw * 4))
+				return -EFAULT;
+		}
+
+		ib_chunk = parser->chunk_ib;
+		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+			return -EINVAL;
+		}
+	}
+	ib_chunk = parser->chunk_ib;
+
+	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+			   vm, ib_chunk->length_dw * 4);
+	if (r) {
+		DRM_ERROR("Failed to get ib !\n");
+		return r;
+	}
+	parser->ib.length_dw = ib_chunk->length_dw;
+	if (ib_chunk->kdata)
+		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
+	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+		return -EFAULT;
+	return 0;
+}
+
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_cs_parser parser;
+	int r;
+
+	down_read(&rdev->exclusive_lock);
+	if (!rdev->accel_working) {
+		up_read(&rdev->exclusive_lock);
+		return -EBUSY;
+	}
+	if (rdev->in_reset) {
+		up_read(&rdev->exclusive_lock);
+		r = radeon_gpu_reset(rdev);
+		if (!r)
+			r = -EAGAIN;
+		return r;
+	}
+	/* initialize parser */
+	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+	parser.filp = filp;
+	parser.rdev = rdev;
+	parser.dev = rdev->dev;
+	parser.family = rdev->family;
+	r = radeon_cs_parser_init(&parser, data);
+	if (r) {
+		DRM_ERROR("Failed to initialize parser !\n");
+		radeon_cs_parser_fini(&parser, r, false);
+		up_read(&rdev->exclusive_lock);
+		r = radeon_cs_handle_lockup(rdev, r);
+		return r;
+	}
+
+	r = radeon_cs_ib_fill(rdev, &parser);
+	if (!r) {
+		r = radeon_cs_parser_relocs(&parser);
+		if (r && r != -ERESTARTSYS)
+			DRM_ERROR("Failed to parse relocation %d!\n", r);
+	}
+
+	if (r) {
+		radeon_cs_parser_fini(&parser, r, false);
+		up_read(&rdev->exclusive_lock);
+		r = radeon_cs_handle_lockup(rdev, r);
+		return r;
+	}
+
+	trace_radeon_cs(&parser);
+
+	r = radeon_cs_ib_chunk(rdev, &parser);
+	if (r) {
+		goto out;
+	}
+	r = radeon_cs_ib_vm_chunk(rdev, &parser);
+	if (r) {
+		goto out;
+	}
+out:
+	radeon_cs_parser_fini(&parser, r, true);
+	up_read(&rdev->exclusive_lock);
+	r = radeon_cs_handle_lockup(rdev, r);
+	return r;
+}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser:	parser structure holding parsing context.
+ * @pkt:	where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt,
+			   unsigned idx)
+{
+	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+	struct radeon_device *rdev = p->rdev;
+	uint32_t header;
+	int ret = 0, i;
+
+	if (idx >= ib_chunk->length_dw) {
+		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+			  idx, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	header = radeon_get_ib_value(p, idx);
+	pkt->idx = idx;
+	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+	pkt->one_reg_wr = 0;
+	switch (pkt->type) {
+	case RADEON_PACKET_TYPE0:
+		if (rdev->family < CHIP_R600) {
+			pkt->reg = R100_CP_PACKET0_GET_REG(header);
+			pkt->one_reg_wr =
+				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+		} else
+			pkt->reg = R600_CP_PACKET0_GET_REG(header);
+		break;
+	case RADEON_PACKET_TYPE3:
+		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+		break;
+	case RADEON_PACKET_TYPE2:
+		pkt->count = -1;
+		break;
+	default:
+		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+		ret = -EINVAL;
+		goto dump_ib;
+	}
+	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+		ret = -EINVAL;
+		goto dump_ib;
+	}
+	return 0;
+
+dump_ib:
+	for (i = 0; i < ib_chunk->length_dw; i++) {
+		if (i == idx)
+			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
+		else
+			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
+	}
+	return ret;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p:		structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet p3reloc;
+	int r;
+
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r)
+		return false;
+	if (p3reloc.type != RADEON_PACKET_TYPE3)
+		return false;
+	if (p3reloc.opcode != RADEON_PACKET3_NOP)
+		return false;
+	return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p:		structure holding the parser context.
+ * @pkt:	structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt)
+{
+	volatile uint32_t *ib;
+	unsigned i;
+	unsigned idx;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx;
+	for (i = 0; i <= (pkt->count + 1); i++, idx++)
+		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser:		parser structure holding parsing context.
+ * @data:		pointer to relocation data
+ * @offset_start:	starting offset
+ * @offset_mask:	offset mask (to align start offset on)
+ * @reloc:		reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+				struct radeon_bo_list **cs_reloc,
+				int nomm)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_cs_packet p3reloc;
+	unsigned idx;
+	int r;
+
+	if (p->chunk_relocs == NULL) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	*cs_reloc = NULL;
+	relocs_chunk = p->chunk_relocs;
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r)
+		return r;
+	p->idx += p3reloc.count + 2;
+	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+	    p3reloc.opcode != RADEON_PACKET3_NOP) {
+		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+			  p3reloc.idx);
+		radeon_cs_dump_packet(p, &p3reloc);
+		return -EINVAL;
+	}
+	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		radeon_cs_dump_packet(p, &p3reloc);
+		return -EINVAL;
+	}
+	/* FIXME: we assume reloc size is 4 dwords */
+	if (nomm) {
+		*cs_reloc = p->relocs;
+		(*cs_reloc)->gpu_offset =
+			(u64)relocs_chunk->kdata[idx + 3] << 32;
+		(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
+	} else
+		*cs_reloc = &p->relocs[(idx / 4)];
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
new file mode 100644
index 0000000..9195227
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	uint32_t cur_lock;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+		else
+			cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+		WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
+		else
+			cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
+		WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+	} else {
+		cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= RADEON_CUR_LOCK;
+		else
+			cur_lock &= ~RADEON_CUR_LOCK;
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
+	}
+}
+
+static void radeon_hide_cursor(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+			   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+			   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+			   (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+	} else {
+		u32 reg;
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+			reg = RADEON_CRTC_GEN_CNTL;
+			break;
+		case 1:
+			reg = RADEON_CRTC2_GEN_CNTL;
+			break;
+		default:
+			return;
+		}
+		WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
+	}
+}
+
+static void radeon_show_cursor(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (radeon_crtc->cursor_out_of_bounds)
+		return;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+		       upper_32_bits(radeon_crtc->cursor_addr));
+		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+		       lower_32_bits(radeon_crtc->cursor_addr));
+		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+		       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if (rdev->family >= CHIP_RV770) {
+			if (radeon_crtc->crtc_id)
+				WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(radeon_crtc->cursor_addr));
+			else
+				WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(radeon_crtc->cursor_addr));
+		}
+
+		WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+		       lower_32_bits(radeon_crtc->cursor_addr));
+		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+		WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
+		       (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+	} else {
+		/* offset is from DISP(2)_BASE_ADDRESS */
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+		       radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+			WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+			break;
+		case 1:
+			WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+			break;
+		default:
+			return;
+		}
+
+		WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
+					  (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
+			 ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
+	}
+}
+
+static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	int xorigin = 0, yorigin = 0;
+	int w = radeon_crtc->cursor_width;
+
+	radeon_crtc->cursor_x = x;
+	radeon_crtc->cursor_y = y;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		/* avivo cursor are offset into the total surface */
+		x += crtc->x;
+		y += crtc->y;
+	}
+
+	if (x < 0)
+		xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
+	if (y < 0)
+		yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		x += crtc->x;
+		y += crtc->y;
+	}
+	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+	/* fixed on DCE6 and newer */
+	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+		int i = 0;
+		struct drm_crtc *crtc_p;
+
+		/*
+		 * avivo cursor image can't end on 128 pixel boundary or
+		 * go past the end of the frame if both crtcs are enabled
+		 *
+		 * NOTE: It is safe to access crtc->enabled of other crtcs
+		 * without holding either the mode_config lock or the other
+		 * crtc's lock as long as write access to this flag _always_
+		 * grabs all locks.
+		 */
+		list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
+			if (crtc_p->enabled)
+				i++;
+		}
+		if (i > 1) {
+			int cursor_end, frame_end;
+
+			cursor_end = x + w;
+			frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+			if (cursor_end >= frame_end) {
+				w = w - (cursor_end - frame_end);
+				if (!(frame_end & 0x7f))
+					w--;
+			} else if (cursor_end <= 0) {
+				goto out_of_bounds;
+			} else if (!(cursor_end & 0x7f)) {
+				w--;
+			}
+			if (w <= 0) {
+				goto out_of_bounds;
+			}
+		}
+	}
+
+	if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
+	    x >= (crtc->x + crtc->mode.hdisplay) ||
+	    y >= (crtc->y + crtc->mode.vdisplay))
+		goto out_of_bounds;
+
+	x += xorigin;
+	y += yorigin;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+		WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+		WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+		WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+		WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+	} else {
+		x -= crtc->x;
+		y -= crtc->y;
+
+		if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+			y *= 2;
+
+		WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
+		       (RADEON_CUR_LOCK
+			| (xorigin << 16)
+			| yorigin));
+		WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
+		       (RADEON_CUR_LOCK
+			| (x << 16)
+			| y));
+		/* offset is from DISP(2)_BASE_ADDRESS */
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+		       radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
+		       yorigin * 256);
+	}
+
+	if (radeon_crtc->cursor_out_of_bounds) {
+		radeon_crtc->cursor_out_of_bounds = false;
+		if (radeon_crtc->cursor_bo)
+			radeon_show_cursor(crtc);
+	}
+
+	return 0;
+
+ out_of_bounds:
+	if (!radeon_crtc->cursor_out_of_bounds) {
+		radeon_hide_cursor(crtc);
+		radeon_crtc->cursor_out_of_bounds = true;
+	}
+	return 0;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+			    int x, int y)
+{
+	int ret;
+
+	radeon_lock_cursor(crtc, true);
+	ret = radeon_cursor_move_locked(crtc, x, y);
+	radeon_lock_cursor(crtc, false);
+
+	return ret;
+}
+
+int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+			    struct drm_file *file_priv,
+			    uint32_t handle,
+			    uint32_t width,
+			    uint32_t height,
+			    int32_t hot_x,
+			    int32_t hot_y)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct radeon_bo *robj;
+	int ret;
+
+	if (!handle) {
+		/* turn off cursor */
+		radeon_hide_cursor(crtc);
+		obj = NULL;
+		goto unpin;
+	}
+
+	if ((width > radeon_crtc->max_cursor_width) ||
+	    (height > radeon_crtc->max_cursor_height)) {
+		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+		return -ENOENT;
+	}
+
+	robj = gem_to_radeon_bo(obj);
+	ret = radeon_bo_reserve(robj, false);
+	if (ret != 0) {
+		drm_gem_object_put_unlocked(obj);
+		return ret;
+	}
+	/* Only 27 bit offset for legacy cursor */
+	ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+				       &radeon_crtc->cursor_addr);
+	radeon_bo_unreserve(robj);
+	if (ret) {
+		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+		drm_gem_object_put_unlocked(obj);
+		return ret;
+	}
+
+	radeon_lock_cursor(crtc, true);
+
+	if (width != radeon_crtc->cursor_width ||
+	    height != radeon_crtc->cursor_height ||
+	    hot_x != radeon_crtc->cursor_hot_x ||
+	    hot_y != radeon_crtc->cursor_hot_y) {
+		int x, y;
+
+		x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
+		y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+
+		radeon_crtc->cursor_width = width;
+		radeon_crtc->cursor_height = height;
+		radeon_crtc->cursor_hot_x = hot_x;
+		radeon_crtc->cursor_hot_y = hot_y;
+
+		radeon_cursor_move_locked(crtc, x, y);
+	}
+
+	radeon_show_cursor(crtc);
+
+	radeon_lock_cursor(crtc, false);
+
+unpin:
+	if (radeon_crtc->cursor_bo) {
+		struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+		ret = radeon_bo_reserve(robj, false);
+		if (likely(ret == 0)) {
+			radeon_bo_unpin(robj);
+			radeon_bo_unreserve(robj);
+		}
+		drm_gem_object_put_unlocked(radeon_crtc->cursor_bo);
+	}
+
+	radeon_crtc->cursor_bo = obj;
+	return 0;
+}
+
+/**
+ * radeon_cursor_reset - Re-set the current cursor, if any.
+ *
+ * @crtc: drm crtc
+ *
+ * If the CRTC passed in currently has a cursor assigned, this function
+ * makes sure it's visible.
+ */
+void radeon_cursor_reset(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	if (radeon_crtc->cursor_bo) {
+		radeon_lock_cursor(crtc, true);
+
+		radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+					  radeon_crtc->cursor_y);
+
+		radeon_show_cursor(crtc);
+
+		radeon_lock_cursor(crtc, false);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
new file mode 100644
index 0000000..59c8a66
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -0,0 +1,1924 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/console.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_cache.h>
+#include <drm/radeon_drm.h>
+#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/efi.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+static const char radeon_family_name[][16] = {
+	"R100",
+	"RV100",
+	"RS100",
+	"RV200",
+	"RS200",
+	"R200",
+	"RV250",
+	"RS300",
+	"RV280",
+	"R300",
+	"R350",
+	"RV350",
+	"RV380",
+	"R420",
+	"R423",
+	"RV410",
+	"RS400",
+	"RS480",
+	"RS600",
+	"RS690",
+	"RS740",
+	"RV515",
+	"R520",
+	"RV530",
+	"RV560",
+	"RV570",
+	"R580",
+	"R600",
+	"RV610",
+	"RV630",
+	"RV670",
+	"RV620",
+	"RV635",
+	"RS780",
+	"RS880",
+	"RV770",
+	"RV730",
+	"RV710",
+	"RV740",
+	"CEDAR",
+	"REDWOOD",
+	"JUNIPER",
+	"CYPRESS",
+	"HEMLOCK",
+	"PALM",
+	"SUMO",
+	"SUMO2",
+	"BARTS",
+	"TURKS",
+	"CAICOS",
+	"CAYMAN",
+	"ARUBA",
+	"TAHITI",
+	"PITCAIRN",
+	"VERDE",
+	"OLAND",
+	"HAINAN",
+	"BONAIRE",
+	"KAVERI",
+	"KABINI",
+	"HAWAII",
+	"MULLINS",
+	"LAST",
+};
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
+#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
+
+struct radeon_px_quirk {
+	u32 chip_vendor;
+	u32 chip_device;
+	u32 subsys_vendor;
+	u32 subsys_device;
+	u32 px_quirk_flags;
+};
+
+static struct radeon_px_quirk radeon_px_quirk_list[] = {
+	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
+	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
+	{ 0, 0, 0, 0, 0 },
+};
+
+bool radeon_is_px(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev->flags & RADEON_IS_PX)
+		return true;
+	return false;
+}
+
+static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
+{
+	struct radeon_px_quirk *p = radeon_px_quirk_list;
+
+	/* Apply PX quirks */
+	while (p && p->chip_device != 0) {
+		if (rdev->pdev->vendor == p->chip_vendor &&
+		    rdev->pdev->device == p->chip_device &&
+		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+		    rdev->pdev->subsystem_device == p->subsys_device) {
+			rdev->px_quirk_flags = p->px_quirk_flags;
+			break;
+		}
+		++p;
+	}
+
+	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
+		rdev->flags &= ~RADEON_IS_PX;
+
+	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+	if (!radeon_is_atpx_hybrid() &&
+	    !radeon_has_atpx_dgpu_power_cntl())
+		rdev->flags &= ~RADEON_IS_PX;
+}
+
+/**
+ * radeon_program_register_sequence - program an array of registers.
+ *
+ * @rdev: radeon_device pointer
+ * @registers: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+void radeon_program_register_sequence(struct radeon_device *rdev,
+				      const u32 *registers,
+				      const u32 array_size)
+{
+	u32 tmp, reg, and_mask, or_mask;
+	int i;
+
+	if (array_size % 3)
+		return;
+
+	for (i = 0; i < array_size; i +=3) {
+		reg = registers[i + 0];
+		and_mask = registers[i + 1];
+		or_mask = registers[i + 2];
+
+		if (and_mask == 0xffffffff) {
+			tmp = or_mask;
+		} else {
+			tmp = RREG32(reg);
+			tmp &= ~and_mask;
+			tmp |= or_mask;
+		}
+		WREG32(reg, tmp);
+	}
+}
+
+void radeon_pci_config_reset(struct radeon_device *rdev)
+{
+	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
+}
+
+/**
+ * radeon_surface_init - Clear GPU surface registers.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clear GPU surface registers (r1xx-r5xx).
+ */
+void radeon_surface_init(struct radeon_device *rdev)
+{
+	/* FIXME: check this out */
+	if (rdev->family < CHIP_R600) {
+		int i;
+
+		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+			if (rdev->surface_regs[i].bo)
+				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+			else
+				radeon_clear_surface_reg(rdev, i);
+		}
+		/* enable surfaces */
+		WREG32(RADEON_SURFACE_CNTL, 0);
+	}
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+/**
+ * radeon_scratch_init - Init scratch register driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init CP scratch register driver information (r1xx-r5xx)
+ */
+void radeon_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	/* FIXME: check this out */
+	if (rdev->family < CHIP_R300) {
+		rdev->scratch.num_reg = 5;
+	} else {
+		rdev->scratch.num_reg = 7;
+	}
+	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+/**
+ * radeon_scratch_get - Allocate a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Allocate a CP scratch register for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
+{
+	int i;
+
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		if (rdev->scratch.free[i]) {
+			rdev->scratch.free[i] = false;
+			*reg = rdev->scratch.reg[i];
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/**
+ * radeon_scratch_free - Free a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Free a CP scratch register allocated for use by the driver (all asics)
+ */
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
+{
+	int i;
+
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		if (rdev->scratch.reg[i] == reg) {
+			rdev->scratch.free[i] = true;
+			return;
+		}
+	}
+}
+
+/*
+ * GPU doorbell aperture helpers function.
+ */
+/**
+ * radeon_doorbell_init - Init doorbell driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init doorbell driver information (CIK)
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_doorbell_init(struct radeon_device *rdev)
+{
+	/* doorbell bar mapping */
+	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
+	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
+
+	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
+	if (rdev->doorbell.num_doorbells == 0)
+		return -EINVAL;
+
+	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
+	if (rdev->doorbell.ptr == NULL) {
+		return -ENOMEM;
+	}
+	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
+	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
+
+	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
+
+	return 0;
+}
+
+/**
+ * radeon_doorbell_fini - Tear down doorbell driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down doorbell driver information (CIK)
+ */
+static void radeon_doorbell_fini(struct radeon_device *rdev)
+{
+	iounmap(rdev->doorbell.ptr);
+	rdev->doorbell.ptr = NULL;
+}
+
+/**
+ * radeon_doorbell_get - Allocate a doorbell entry
+ *
+ * @rdev: radeon_device pointer
+ * @doorbell: doorbell index
+ *
+ * Allocate a doorbell for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
+{
+	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
+	if (offset < rdev->doorbell.num_doorbells) {
+		__set_bit(offset, rdev->doorbell.used);
+		*doorbell = offset;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+/**
+ * radeon_doorbell_free - Free a doorbell entry
+ *
+ * @rdev: radeon_device pointer
+ * @doorbell: doorbell index
+ *
+ * Free a doorbell allocated for use by the driver (all asics)
+ */
+void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
+{
+	if (doorbell < rdev->doorbell.num_doorbells)
+		__clear_bit(doorbell, rdev->doorbell.used);
+}
+
+/*
+ * radeon_wb_*()
+ * Writeback is the the method by which the the GPU updates special pages
+ * in memory with the status of certain GPU events (fences, ring pointers,
+ * etc.).
+ */
+
+/**
+ * radeon_wb_disable - Disable Writeback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback (all asics).  Used for suspend.
+ */
+void radeon_wb_disable(struct radeon_device *rdev)
+{
+	rdev->wb.enabled = false;
+}
+
+/**
+ * radeon_wb_fini - Disable Writeback and free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
+void radeon_wb_fini(struct radeon_device *rdev)
+{
+	radeon_wb_disable(rdev);
+	if (rdev->wb.wb_obj) {
+		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+			radeon_bo_kunmap(rdev->wb.wb_obj);
+			radeon_bo_unpin(rdev->wb.wb_obj);
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+		}
+		radeon_bo_unref(&rdev->wb.wb_obj);
+		rdev->wb.wb = NULL;
+		rdev->wb.wb_obj = NULL;
+	}
+}
+
+/**
+ * radeon_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
+int radeon_wb_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->wb.wb_obj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
+				     &rdev->wb.wb_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+				&rdev->wb.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
+	}
+
+	/* clear wb memory */
+	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
+	/* disable event_write fences */
+	rdev->wb.use_event = false;
+	/* disabled via module param */
+	if (radeon_no_wb == 1) {
+		rdev->wb.enabled = false;
+	} else {
+		if (rdev->flags & RADEON_IS_AGP) {
+			/* often unreliable on AGP */
+			rdev->wb.enabled = false;
+		} else if (rdev->family < CHIP_R300) {
+			/* often unreliable on pre-r300 */
+			rdev->wb.enabled = false;
+		} else {
+			rdev->wb.enabled = true;
+			/* event_write fences are only available on r600+ */
+			if (rdev->family >= CHIP_R600) {
+				rdev->wb.use_event = true;
+			}
+		}
+	}
+	/* always use writeback/events on NI, APUs */
+	if (rdev->family >= CHIP_PALM) {
+		rdev->wb.enabled = true;
+		rdev->wb.use_event = true;
+	}
+
+	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+
+	return 0;
+}
+
+/**
+ * radeon_vram_location - try to find VRAM location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP and if the AGP aperture doesn't allow us to have
+ * room for all the VRAM than we restrict the VRAM to the PCI aperture
+ * size and print a warning.
+ *
+ * This function will never fails, worst case are limiting VRAM.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ *
+ * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
+ */
+void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
+{
+	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
+	mc->vram_start = base;
+	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
+		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
+		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	if (limit && limit < mc->real_vram_size)
+		mc->real_vram_size = limit;
+	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+			mc->mc_vram_size >> 20, mc->vram_start,
+			mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/**
+ * radeon_gtt_location - try to find GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
+ */
+void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_af, size_bf;
+
+	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	if (size_bf > size_af) {
+		if (mc->gtt_size > size_bf) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_bf;
+		}
+		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
+	} else {
+		if (mc->gtt_size > size_af) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_af;
+		}
+		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+	}
+	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+}
+
+/*
+ * GPU helpers function.
+ */
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+	return false;
+#endif
+}
+
+/**
+ * radeon_card_posted - check if the hw has already been initialized
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic has been initialized (all asics).
+ * Used at driver startup.
+ * Returns true if initialized or false if not.
+ */
+bool radeon_card_posted(struct radeon_device *rdev)
+{
+	uint32_t reg;
+
+	/* for pass through, always force asic_init for CI */
+	if (rdev->family >= CHIP_BONAIRE &&
+	    radeon_device_is_virtual())
+		return false;
+
+	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
+	if (efi_enabled(EFI_BOOT) &&
+	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+	    (rdev->family < CHIP_R600))
+		return false;
+
+	if (ASIC_IS_NODCE(rdev))
+		goto check_memsize;
+
+	/* first check CRTCs */
+	if (ASIC_IS_DCE4(rdev)) {
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+			if (rdev->num_crtc >= 4) {
+				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+			}
+			if (rdev->num_crtc >= 6) {
+				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+			}
+		if (reg & EVERGREEN_CRTC_MASTER_EN)
+			return true;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
+		      RREG32(AVIVO_D2CRTC_CONTROL);
+		if (reg & AVIVO_CRTC_EN) {
+			return true;
+		}
+	} else {
+		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
+		      RREG32(RADEON_CRTC2_GEN_CNTL);
+		if (reg & RADEON_CRTC_EN) {
+			return true;
+		}
+	}
+
+check_memsize:
+	/* then check MEM_SIZE, in case the crtcs are off */
+	if (rdev->family >= CHIP_R600)
+		reg = RREG32(R600_CONFIG_MEMSIZE);
+	else
+		reg = RREG32(RADEON_CONFIG_MEMSIZE);
+
+	if (reg)
+		return true;
+
+	return false;
+
+}
+
+/**
+ * radeon_update_bandwidth_info - update display bandwidth params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Used when sclk/mclk are switched or display modes are set.
+ * params are used to calculate display watermarks (all asics)
+ */
+void radeon_update_bandwidth_info(struct radeon_device *rdev)
+{
+	fixed20_12 a;
+	u32 sclk = rdev->pm.current_sclk;
+	u32 mclk = rdev->pm.current_mclk;
+
+	/* sclk/mclk in Mhz */
+	a.full = dfixed_const(100);
+	rdev->pm.sclk.full = dfixed_const(sclk);
+	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+	rdev->pm.mclk.full = dfixed_const(mclk);
+	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		a.full = dfixed_const(16);
+		/* core_bandwidth = sclk(Mhz) * 16 */
+		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
+	}
+}
+
+/**
+ * radeon_boot_test_post_card - check and possibly initialize the hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic is initialized and if not, attempt to initialize
+ * it (all asics).
+ * Returns true if initialized or false if not.
+ */
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+	if (radeon_card_posted(rdev))
+		return true;
+
+	if (rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		if (rdev->is_atom_bios)
+			atom_asic_init(rdev->mode_info.atom_context);
+		else
+			radeon_combios_asic_init(rdev->ddev);
+		return true;
+	} else {
+		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+		return false;
+	}
+}
+
+/**
+ * radeon_dummy_page_init - init dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
+int radeon_dummy_page_init(struct radeon_device *rdev)
+{
+	if (rdev->dummy_page.page)
+		return 0;
+	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+	if (rdev->dummy_page.page == NULL)
+		return -ENOMEM;
+	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
+					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
+		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+		__free_page(rdev->dummy_page.page);
+		rdev->dummy_page.page = NULL;
+		return -ENOMEM;
+	}
+	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+							    RADEON_GART_PAGE_DUMMY);
+	return 0;
+}
+
+/**
+ * radeon_dummy_page_fini - free dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
+void radeon_dummy_page_fini(struct radeon_device *rdev)
+{
+	if (rdev->dummy_page.page == NULL)
+		return;
+	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
+			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	__free_page(rdev->dummy_page.page);
+	rdev->dummy_page.page = NULL;
+}
+
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios.  The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = rdev->pll_rreg(rdev, reg);
+	return r;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	rdev->pll_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = rdev->mc_rreg(rdev, reg);
+	return r;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	rdev->mc_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	WREG32(reg*4, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32(reg*4);
+	return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	WREG32_IO(reg*4, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32_IO(reg*4);
+	return r;
+}
+
+/**
+ * radeon_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int radeon_atombios_init(struct radeon_device *rdev)
+{
+	struct card_info *atom_card_info =
+	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
+
+	if (!atom_card_info)
+		return -ENOMEM;
+
+	rdev->mode_info.atom_card_info = atom_card_info;
+	atom_card_info->dev = rdev->ddev;
+	atom_card_info->reg_read = cail_reg_read;
+	atom_card_info->reg_write = cail_reg_write;
+	/* needed for iio ops */
+	if (rdev->rio_mem) {
+		atom_card_info->ioreg_read = cail_ioreg_read;
+		atom_card_info->ioreg_write = cail_ioreg_write;
+	} else {
+		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+		atom_card_info->ioreg_read = cail_reg_read;
+		atom_card_info->ioreg_write = cail_reg_write;
+	}
+	atom_card_info->mc_read = cail_mc_read;
+	atom_card_info->mc_write = cail_mc_write;
+	atom_card_info->pll_read = cail_pll_read;
+	atom_card_info->pll_write = cail_pll_write;
+
+	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+	if (!rdev->mode_info.atom_context) {
+		radeon_atombios_fini(rdev);
+		return -ENOMEM;
+	}
+
+	mutex_init(&rdev->mode_info.atom_context->mutex);
+	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
+	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
+	return 0;
+}
+
+/**
+ * radeon_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void radeon_atombios_fini(struct radeon_device *rdev)
+{
+	if (rdev->mode_info.atom_context) {
+		kfree(rdev->mode_info.atom_context->scratch);
+	}
+	kfree(rdev->mode_info.atom_context);
+	rdev->mode_info.atom_context = NULL;
+	kfree(rdev->mode_info.atom_card_info);
+	rdev->mode_info.atom_card_info = NULL;
+}
+
+/* COMBIOS */
+/*
+ * COMBIOS is the bios format prior to ATOM. It provides
+ * command tables similar to ATOM, but doesn't have a unified
+ * parser.  See radeon_combios.c
+ */
+
+/**
+ * radeon_combios_init - init the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info for combios (r1xx-r3xx).
+ * Returns 0 on sucess.
+ * Called at driver startup.
+ */
+int radeon_combios_init(struct radeon_device *rdev)
+{
+	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+	return 0;
+}
+
+/**
+ * radeon_combios_fini - free the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info for combios (r1xx-r3xx).
+ * Called at driver shutdown.
+ */
+void radeon_combios_fini(struct radeon_device *rdev)
+{
+}
+
+/* if we get transitioned to only one device, take VGA back */
+/**
+ * radeon_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: radeon_device pointer
+ * @state: enable/disable vga decode
+ *
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
+static unsigned int radeon_vga_set_decode(void *cookie, bool state)
+{
+	struct radeon_device *rdev = cookie;
+	radeon_vga_set_state(rdev, state);
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+	return (arg & (arg - 1)) == 0;
+}
+
+/**
+ * Determine a sensible default GART size according to ASIC family.
+ *
+ * @family ASIC family name
+ */
+static int radeon_gart_size_auto(enum radeon_family family)
+{
+	/* default to a larger gart size on newer asics */
+	if (family >= CHIP_TAHITI)
+		return 2048;
+	else if (family >= CHIP_RV770)
+		return 1024;
+	else
+		return 512;
+}
+
+/**
+ * radeon_check_arguments - validate module params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
+static void radeon_check_arguments(struct radeon_device *rdev)
+{
+	/* vramlimit must be a power of two */
+	if (!radeon_check_pot_argument(radeon_vram_limit)) {
+		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
+				radeon_vram_limit);
+		radeon_vram_limit = 0;
+	}
+
+	if (radeon_gart_size == -1) {
+		radeon_gart_size = radeon_gart_size_auto(rdev->family);
+	}
+	/* gtt size must be power of two and greater or equal to 32M */
+	if (radeon_gart_size < 32) {
+		dev_warn(rdev->dev, "gart size (%d) too small\n",
+				radeon_gart_size);
+		radeon_gart_size = radeon_gart_size_auto(rdev->family);
+	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
+		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
+				radeon_gart_size);
+		radeon_gart_size = radeon_gart_size_auto(rdev->family);
+	}
+	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
+	/* AGP mode can only be -1, 1, 2, 4, 8 */
+	switch (radeon_agpmode) {
+	case -1:
+	case 0:
+	case 1:
+	case 2:
+	case 4:
+	case 8:
+		break;
+	default:
+		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
+				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
+		radeon_agpmode = 0;
+		break;
+	}
+
+	if (!radeon_check_pot_argument(radeon_vm_size)) {
+		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
+			 radeon_vm_size);
+		radeon_vm_size = 4;
+	}
+
+	if (radeon_vm_size < 1) {
+		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
+			 radeon_vm_size);
+		radeon_vm_size = 4;
+	}
+
+	/*
+	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
+	 */
+	if (radeon_vm_size > 1024) {
+		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
+			 radeon_vm_size);
+		radeon_vm_size = 4;
+	}
+
+	/* defines number of bits in page table versus page directory,
+	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
+	 * page table and the remaining bits are in the page directory */
+	if (radeon_vm_block_size == -1) {
+
+		/* Total bits covered by PD + PTs */
+		unsigned bits = ilog2(radeon_vm_size) + 18;
+
+		/* Make sure the PD is 4K in size up to 8GB address space.
+		   Above that split equal between PD and PTs */
+		if (radeon_vm_size <= 8)
+			radeon_vm_block_size = bits - 9;
+		else
+			radeon_vm_block_size = (bits + 3) / 2;
+
+	} else if (radeon_vm_block_size < 9) {
+		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
+			 radeon_vm_block_size);
+		radeon_vm_block_size = 9;
+	}
+
+	if (radeon_vm_block_size > 24 ||
+	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
+			 radeon_vm_block_size);
+		radeon_vm_block_size = 9;
+	}
+}
+
+/**
+ * radeon_switcheroo_set_state - set switcheroo state
+ *
+ * @pdev: pci dev pointer
+ * @state: vga_switcheroo state
+ *
+ * Callback for the switcheroo driver.  Suspends or resumes the
+ * the asics before or after it is powered up using ACPI methods.
+ */
+static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
+		return;
+
+	if (state == VGA_SWITCHEROO_ON) {
+		pr_info("radeon: switched on\n");
+		/* don't suspend or resume card normally */
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+		radeon_resume_kms(dev, true, true);
+
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+		drm_kms_helper_poll_enable(dev);
+	} else {
+		pr_info("radeon: switched off\n");
+		drm_kms_helper_poll_disable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		radeon_suspend_kms(dev, true, true, false);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+
+/**
+ * radeon_switcheroo_can_switch - see if switcheroo state can change
+ *
+ * @pdev: pci dev pointer
+ *
+ * Callback for the switcheroo driver.  Check of the switcheroo
+ * state can be changed.
+ * Returns true if the state can be changed, false if not.
+ */
+static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	/*
+	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
+	 * locking inversion with the driver load path. And the access here is
+	 * completely racy anyway. So don't bother with locking for now.
+	 */
+	return dev->open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+	.set_gpu_state = radeon_switcheroo_set_state,
+	.reprobe = NULL,
+	.can_switch = radeon_switcheroo_can_switch,
+};
+
+/**
+ * radeon_device_init - initialize the driver
+ *
+ * @rdev: radeon_device pointer
+ * @pdev: drm dev pointer
+ * @pdev: pci dev pointer
+ * @flags: driver flags
+ *
+ * Initializes the driver info and hw (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver startup.
+ */
+int radeon_device_init(struct radeon_device *rdev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev,
+		       uint32_t flags)
+{
+	int r, i;
+	int dma_bits;
+	bool runtime = false;
+
+	rdev->shutdown = false;
+	rdev->dev = &pdev->dev;
+	rdev->ddev = ddev;
+	rdev->pdev = pdev;
+	rdev->flags = flags;
+	rdev->family = flags & RADEON_FAMILY_MASK;
+	rdev->is_atom_bios = false;
+	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
+	rdev->mc.gtt_size = 512 * 1024 * 1024;
+	rdev->accel_working = false;
+	/* set up ring ids */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		rdev->ring[i].idx = i;
+	}
+	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
+
+	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+
+	/* mutex initialization are all done here so we
+	 * can recall function without having locking issues */
+	mutex_init(&rdev->ring_lock);
+	mutex_init(&rdev->dc_hw_i2c_mutex);
+	atomic_set(&rdev->ih.lock, 0);
+	mutex_init(&rdev->gem.mutex);
+	mutex_init(&rdev->pm.mutex);
+	mutex_init(&rdev->gpu_clock_mutex);
+	mutex_init(&rdev->srbm_mutex);
+	init_rwsem(&rdev->pm.mclk_lock);
+	init_rwsem(&rdev->exclusive_lock);
+	init_waitqueue_head(&rdev->irq.vblank_queue);
+	mutex_init(&rdev->mn_lock);
+	hash_init(rdev->mn_hash);
+	r = radeon_gem_init(rdev);
+	if (r)
+		return r;
+
+	radeon_check_arguments(rdev);
+	/* Adjust VM size here.
+	 * Max GPUVM size for cayman+ is 40 bits.
+	 */
+	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
+
+	/* Set asic functions */
+	r = radeon_asic_init(rdev);
+	if (r)
+		return r;
+
+	/* all of the newer IGP chips have an internal gart
+	 * However some rs4xx report as AGP, so remove that here.
+	 */
+	if ((rdev->family >= CHIP_RS400) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		rdev->flags &= ~RADEON_IS_AGP;
+	}
+
+	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
+		radeon_agp_disable(rdev);
+	}
+
+	/* Set the internal MC address mask
+	 * This is the max address of the GPU's
+	 * internal address space.
+	 */
+	if (rdev->family >= CHIP_CAYMAN)
+		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+	else if (rdev->family >= CHIP_CEDAR)
+		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
+	else
+		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
+
+	/* set DMA mask + need_dma32 flags.
+	 * PCIE - can handle 40-bits.
+	 * IGP - can handle 40-bits
+	 * AGP - generally dma32 is safest
+	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
+	 */
+	rdev->need_dma32 = false;
+	if (rdev->flags & RADEON_IS_AGP)
+		rdev->need_dma32 = true;
+	if ((rdev->flags & RADEON_IS_PCI) &&
+	    (rdev->family <= CHIP_RS740))
+		rdev->need_dma32 = true;
+#ifdef CONFIG_PPC64
+	if (rdev->family == CHIP_CEDAR)
+		rdev->need_dma32 = true;
+#endif
+
+	dma_bits = rdev->need_dma32 ? 32 : 40;
+	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		rdev->need_dma32 = true;
+		dma_bits = 32;
+		pr_warn("radeon: No suitable DMA available\n");
+	}
+	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+		pr_warn("radeon: No coherent DMA available\n");
+	}
+	rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+
+	/* Registers mapping */
+	/* TODO: block userspace mapping of io register */
+	spin_lock_init(&rdev->mmio_idx_lock);
+	spin_lock_init(&rdev->smc_idx_lock);
+	spin_lock_init(&rdev->pll_idx_lock);
+	spin_lock_init(&rdev->mc_idx_lock);
+	spin_lock_init(&rdev->pcie_idx_lock);
+	spin_lock_init(&rdev->pciep_idx_lock);
+	spin_lock_init(&rdev->pif_idx_lock);
+	spin_lock_init(&rdev->cg_idx_lock);
+	spin_lock_init(&rdev->uvd_idx_lock);
+	spin_lock_init(&rdev->rcu_idx_lock);
+	spin_lock_init(&rdev->didt_idx_lock);
+	spin_lock_init(&rdev->end_idx_lock);
+	if (rdev->family >= CHIP_BONAIRE) {
+		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
+		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
+	} else {
+		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
+		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
+	}
+	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
+	if (rdev->rmmio == NULL)
+		return -ENOMEM;
+
+	/* doorbell bar mapping */
+	if (rdev->family >= CHIP_BONAIRE)
+		radeon_doorbell_init(rdev);
+
+	/* io port mapping */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
+			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
+			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
+			break;
+		}
+	}
+	if (rdev->rio_mem == NULL)
+		DRM_ERROR("Unable to find PCI I/O BAR\n");
+
+	if (rdev->flags & RADEON_IS_PX)
+		radeon_device_handle_px_quirks(rdev);
+
+	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
+	/* this will fail for cards that aren't VGA class devices, just
+	 * ignore it */
+	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+
+	if (rdev->flags & RADEON_IS_PX)
+		runtime = true;
+	if (!pci_is_thunderbolt_attached(rdev->pdev))
+		vga_switcheroo_register_client(rdev->pdev,
+					       &radeon_switcheroo_ops, runtime);
+	if (runtime)
+		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
+
+	r = radeon_init(rdev);
+	if (r)
+		goto failed;
+
+	r = radeon_gem_debugfs_init(rdev);
+	if (r) {
+		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+	}
+
+	r = radeon_mst_debugfs_init(rdev);
+	if (r) {
+		DRM_ERROR("registering mst debugfs failed (%d).\n", r);
+	}
+
+	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
+		/* Acceleration not working on AGP card try again
+		 * with fallback to PCI or PCIE GART
+		 */
+		radeon_asic_reset(rdev);
+		radeon_fini(rdev);
+		radeon_agp_disable(rdev);
+		r = radeon_init(rdev);
+		if (r)
+			goto failed;
+	}
+
+	r = radeon_ib_ring_tests(rdev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	/*
+	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+	 * after the CP ring have chew one packet at least. Hence here we stop
+	 * and restart DPM after the radeon_ib_ring_tests().
+	 */
+	if (rdev->pm.dpm_enabled &&
+	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
+	    (rdev->family == CHIP_TURKS) &&
+	    (rdev->flags & RADEON_IS_MOBILITY)) {
+		mutex_lock(&rdev->pm.mutex);
+		radeon_dpm_disable(rdev);
+		radeon_dpm_enable(rdev);
+		mutex_unlock(&rdev->pm.mutex);
+	}
+
+	if ((radeon_testing & 1)) {
+		if (rdev->accel_working)
+			radeon_test_moves(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
+	}
+	if ((radeon_testing & 2)) {
+		if (rdev->accel_working)
+			radeon_test_syncing(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
+	}
+	if (radeon_benchmarking) {
+		if (rdev->accel_working)
+			radeon_benchmark(rdev, radeon_benchmarking);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
+	}
+	return 0;
+
+failed:
+	/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
+	if (radeon_is_px(ddev))
+		pm_runtime_put_noidle(ddev->dev);
+	if (runtime)
+		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
+	return r;
+}
+
+/**
+ * radeon_device_fini - tear down the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the driver info (all asics).
+ * Called at driver shutdown.
+ */
+void radeon_device_fini(struct radeon_device *rdev)
+{
+	DRM_INFO("radeon: finishing device.\n");
+	rdev->shutdown = true;
+	/* evict vram memory */
+	radeon_bo_evict_vram(rdev);
+	radeon_fini(rdev);
+	if (!pci_is_thunderbolt_attached(rdev->pdev))
+		vga_switcheroo_unregister_client(rdev->pdev);
+	if (rdev->flags & RADEON_IS_PX)
+		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
+	vga_client_register(rdev->pdev, NULL, NULL, NULL);
+	if (rdev->rio_mem)
+		pci_iounmap(rdev->pdev, rdev->rio_mem);
+	rdev->rio_mem = NULL;
+	iounmap(rdev->rmmio);
+	rdev->rmmio = NULL;
+	if (rdev->family >= CHIP_BONAIRE)
+		radeon_doorbell_fini(rdev);
+}
+
+
+/*
+ * Suspend & resume.
+ */
+/**
+ * radeon_suspend_kms - initiate device suspend
+ *
+ * @pdev: drm dev pointer
+ * @state: suspend state
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+		       bool fbcon, bool freeze)
+{
+	struct radeon_device *rdev;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	int i, r;
+
+	if (dev == NULL || dev->dev_private == NULL) {
+		return -ENODEV;
+	}
+
+	rdev = dev->dev_private;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	drm_kms_helper_poll_disable(dev);
+
+	drm_modeset_lock_all(dev);
+	/* turn off display hw */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	}
+	drm_modeset_unlock_all(dev);
+
+	/* unpin the front buffers and cursors */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+		struct drm_framebuffer *fb = crtc->primary->fb;
+		struct radeon_bo *robj;
+
+		if (radeon_crtc->cursor_bo) {
+			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+			r = radeon_bo_reserve(robj, false);
+			if (r == 0) {
+				radeon_bo_unpin(robj);
+				radeon_bo_unreserve(robj);
+			}
+		}
+
+		if (fb == NULL || fb->obj[0] == NULL) {
+			continue;
+		}
+		robj = gem_to_radeon_bo(fb->obj[0]);
+		/* don't unpin kernel fb objects */
+		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
+			r = radeon_bo_reserve(robj, false);
+			if (r == 0) {
+				radeon_bo_unpin(robj);
+				radeon_bo_unreserve(robj);
+			}
+		}
+	}
+	/* evict vram memory */
+	radeon_bo_evict_vram(rdev);
+
+	/* wait for gpu to finish processing current batch */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		r = radeon_fence_wait_empty(rdev, i);
+		if (r) {
+			/* delay GPU reset to resume */
+			radeon_fence_driver_force_completion(rdev, i);
+		}
+	}
+
+	radeon_save_bios_scratch_regs(rdev);
+
+	radeon_suspend(rdev);
+	radeon_hpd_fini(rdev);
+	/* evict remaining vram memory
+	 * This second call to evict vram is to evict the gart page table
+	 * using the CPU.
+	 */
+	radeon_bo_evict_vram(rdev);
+
+	radeon_agp_suspend(rdev);
+
+	pci_save_state(dev->pdev);
+	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
+		rdev->asic->asic_reset(rdev, true);
+		pci_restore_state(dev->pdev);
+	} else if (suspend) {
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+		pci_set_power_state(dev->pdev, PCI_D3hot);
+	}
+
+	if (fbcon) {
+		console_lock();
+		radeon_fbdev_set_suspend(rdev, 1);
+		console_unlock();
+	}
+	return 0;
+}
+
+/**
+ * radeon_resume_kms - initiate device resume
+ *
+ * @pdev: drm dev pointer
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+{
+	struct drm_connector *connector;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_crtc *crtc;
+	int r;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	if (fbcon) {
+		console_lock();
+	}
+	if (resume) {
+		pci_set_power_state(dev->pdev, PCI_D0);
+		pci_restore_state(dev->pdev);
+		if (pci_enable_device(dev->pdev)) {
+			if (fbcon)
+				console_unlock();
+			return -1;
+		}
+	}
+	/* resume AGP if in use */
+	radeon_agp_resume(rdev);
+	radeon_resume(rdev);
+
+	r = radeon_ib_ring_tests(rdev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+		/* do dpm late init */
+		r = radeon_pm_late_init(rdev);
+		if (r) {
+			rdev->pm.dpm_enabled = false;
+			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+		}
+	} else {
+		/* resume old pm late */
+		radeon_pm_resume(rdev);
+	}
+
+	radeon_restore_bios_scratch_regs(rdev);
+
+	/* pin cursors */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+		if (radeon_crtc->cursor_bo) {
+			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+			r = radeon_bo_reserve(robj, false);
+			if (r == 0) {
+				/* Only 27 bit offset for legacy cursor */
+				r = radeon_bo_pin_restricted(robj,
+							     RADEON_GEM_DOMAIN_VRAM,
+							     ASIC_IS_AVIVO(rdev) ?
+							     0 : 1 << 27,
+							     &radeon_crtc->cursor_addr);
+				if (r != 0)
+					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+				radeon_bo_unreserve(robj);
+			}
+		}
+	}
+
+	/* init dig PHYs, disp eng pll */
+	if (rdev->is_atom_bios) {
+		radeon_atom_encoder_init(rdev);
+		radeon_atom_disp_eng_pll_init(rdev);
+		/* turn on the BL */
+		if (rdev->mode_info.bl_encoder) {
+			u8 bl_level = radeon_get_backlight_level(rdev,
+								 rdev->mode_info.bl_encoder);
+			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+						   bl_level);
+		}
+	}
+	/* reset hpd state */
+	radeon_hpd_init(rdev);
+	/* blat the mode back in */
+	if (fbcon) {
+		drm_helper_resume_force_mode(dev);
+		/* turn on display hw */
+		drm_modeset_lock_all(dev);
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+		}
+		drm_modeset_unlock_all(dev);
+	}
+
+	drm_kms_helper_poll_enable(dev);
+
+	/* set the power state here in case we are a PX system or headless */
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+		radeon_pm_compute_clocks(rdev);
+
+	if (fbcon) {
+		radeon_fbdev_set_suspend(rdev, 0);
+		console_unlock();
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_gpu_reset - reset the asic
+ *
+ * @rdev: radeon device pointer
+ *
+ * Attempt the reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+	unsigned ring_sizes[RADEON_NUM_RINGS];
+	uint32_t *ring_data[RADEON_NUM_RINGS];
+
+	bool saved = false;
+
+	int i, r;
+	int resched;
+
+	down_write(&rdev->exclusive_lock);
+
+	if (!rdev->needs_reset) {
+		up_write(&rdev->exclusive_lock);
+		return 0;
+	}
+
+	atomic_inc(&rdev->gpu_reset_counter);
+
+	radeon_save_bios_scratch_regs(rdev);
+	/* block TTM */
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+	radeon_suspend(rdev);
+	radeon_hpd_fini(rdev);
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
+						   &ring_data[i]);
+		if (ring_sizes[i]) {
+			saved = true;
+			dev_info(rdev->dev, "Saved %d dwords of commands "
+				 "on ring %d.\n", ring_sizes[i], i);
+		}
+	}
+
+	r = radeon_asic_reset(rdev);
+	if (!r) {
+		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
+		radeon_resume(rdev);
+	}
+
+	radeon_restore_bios_scratch_regs(rdev);
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!r && ring_data[i]) {
+			radeon_ring_restore(rdev, &rdev->ring[i],
+					    ring_sizes[i], ring_data[i]);
+		} else {
+			radeon_fence_driver_force_completion(rdev, i);
+			kfree(ring_data[i]);
+		}
+	}
+
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+		/* do dpm late init */
+		r = radeon_pm_late_init(rdev);
+		if (r) {
+			rdev->pm.dpm_enabled = false;
+			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+		}
+	} else {
+		/* resume old pm late */
+		radeon_pm_resume(rdev);
+	}
+
+	/* init dig PHYs, disp eng pll */
+	if (rdev->is_atom_bios) {
+		radeon_atom_encoder_init(rdev);
+		radeon_atom_disp_eng_pll_init(rdev);
+		/* turn on the BL */
+		if (rdev->mode_info.bl_encoder) {
+			u8 bl_level = radeon_get_backlight_level(rdev,
+								 rdev->mode_info.bl_encoder);
+			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+						   bl_level);
+		}
+	}
+	/* reset hpd state */
+	radeon_hpd_init(rdev);
+
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+
+	rdev->in_reset = true;
+	rdev->needs_reset = false;
+
+	downgrade_write(&rdev->exclusive_lock);
+
+	drm_helper_resume_force_mode(rdev->ddev);
+
+	/* set the power state here in case we are a PX system or headless */
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+		radeon_pm_compute_clocks(rdev);
+
+	if (!r) {
+		r = radeon_ib_ring_tests(rdev);
+		if (r && saved)
+			r = -EAGAIN;
+	} else {
+		/* bad news, how to tell it to userspace ? */
+		dev_info(rdev->dev, "GPU reset failed\n");
+	}
+
+	rdev->needs_reset = r == -EAGAIN;
+	rdev->in_reset = false;
+
+	up_read(&rdev->exclusive_lock);
+	return r;
+}
+
+
+/*
+ * Debugfs
+ */
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles)
+{
+	unsigned i;
+
+	for (i = 0; i < rdev->debugfs_count; i++) {
+		if (rdev->debugfs[i].files == files) {
+			/* Already registered */
+			return 0;
+		}
+	}
+
+	i = rdev->debugfs_count + 1;
+	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
+		DRM_ERROR("Reached maximum number of debugfs components.\n");
+		DRM_ERROR("Report so we increase "
+			  "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+		return -EINVAL;
+	}
+	rdev->debugfs[rdev->debugfs_count].files = files;
+	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+	rdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(files, nfiles,
+				 rdev->ddev->primary->debugfs_root,
+				 rdev->ddev->primary);
+#endif
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
new file mode 100644
index 0000000..9d3ac8b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -0,0 +1,1976 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include <asm/div64.h>
+
+#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_edid.h>
+
+#include <linux/gcd.h>
+
+static void avivo_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u16 *r, *g, *b;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+	WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
+	WREG32(AVIVO_DC_LUT_RW_MODE, 0);
+	WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
+
+	WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
+	r = crtc->gamma_store;
+	g = r + crtc->gamma_size;
+	b = g + crtc->gamma_size;
+	for (i = 0; i < 256; i++) {
+		WREG32(AVIVO_DC_LUT_30_COLOR,
+		       ((*r++ & 0xffc0) << 14) |
+		       ((*g++ & 0xffc0) << 4) |
+		       (*b++ >> 6));
+	}
+
+	/* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
+	WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
+}
+
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u16 *r, *g, *b;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	r = crtc->gamma_store;
+	g = r + crtc->gamma_size;
+	b = g + crtc->gamma_size;
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       ((*r++ & 0xffc0) << 14) |
+		       ((*g++ & 0xffc0) << 4) |
+		       (*b++ >> 6));
+	}
+}
+
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u16 *r, *g, *b;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+	WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+	       NI_GRPH_PRESCALE_BYPASS);
+	WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+	       NI_OVL_PRESCALE_BYPASS);
+	WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	r = crtc->gamma_store;
+	g = r + crtc->gamma_size;
+	b = g + crtc->gamma_size;
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       ((*r++ & 0xffc0) << 14) |
+		       ((*g++ & 0xffc0) << 4) |
+		       (*b++ >> 6));
+	}
+
+	WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+	WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+	WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+	WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) |
+		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
+	WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+	if (ASIC_IS_DCE8(rdev)) {
+		/* XXX this only needs to be programmed once per crtc at startup,
+		 * not sure where the best place for it is
+		 */
+		WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset,
+		       CIK_CURSOR_ALPHA_BLND_ENA);
+	}
+}
+
+static void legacy_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u16 *r, *g, *b;
+	int i;
+	uint32_t dac2_cntl;
+
+	dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+	if (radeon_crtc->crtc_id == 0)
+		dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
+	else
+		dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
+	WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+	WREG8(RADEON_PALETTE_INDEX, 0);
+	r = crtc->gamma_store;
+	g = r + crtc->gamma_size;
+	b = g + crtc->gamma_size;
+	for (i = 0; i < 256; i++) {
+		WREG32(RADEON_PALETTE_30_DATA,
+		       ((*r++ & 0xffc0) << 14) |
+		       ((*g++ & 0xffc0) << 4) |
+		       (*b++ >> 6));
+	}
+}
+
+void radeon_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (!crtc->enabled)
+		return;
+
+	if (ASIC_IS_DCE5(rdev))
+		dce5_crtc_load_lut(crtc);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_crtc_load_lut(crtc);
+	else if (ASIC_IS_AVIVO(rdev))
+		avivo_crtc_load_lut(crtc);
+	else
+		legacy_crtc_load_lut(crtc);
+}
+
+static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				 u16 *blue, uint32_t size,
+				 struct drm_modeset_acquire_ctx *ctx)
+{
+	radeon_crtc_load_lut(crtc);
+
+	return 0;
+}
+
+static void radeon_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	destroy_workqueue(radeon_crtc->flip_queue);
+	kfree(radeon_crtc);
+}
+
+/**
+ * radeon_unpin_work_func - unpin old buffer object
+ *
+ * @__work - kernel work item
+ *
+ * Unpin the old frame buffer object outside of the interrupt handler
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+	struct radeon_flip_work *work =
+		container_of(__work, struct radeon_flip_work, unpin_work);
+	int r;
+
+	/* unpin of the old buffer */
+	r = radeon_bo_reserve(work->old_rbo, false);
+	if (likely(r == 0)) {
+		r = radeon_bo_unpin(work->old_rbo);
+		if (unlikely(r != 0)) {
+			DRM_ERROR("failed to unpin buffer after flip\n");
+		}
+		radeon_bo_unreserve(work->old_rbo);
+	} else
+		DRM_ERROR("failed to reserve buffer after flip\n");
+
+	drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
+	kfree(work);
+}
+
+void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	unsigned long flags;
+	u32 update_pending;
+	int vpos, hpos;
+
+	/* can happen during initialization */
+	if (radeon_crtc == NULL)
+		return;
+
+	/* Skip the pageflip completion check below (based on polling) on
+	 * asics which reliably support hw pageflip completion irqs. pflip
+	 * irqs are a reliable and race-free method of handling pageflip
+	 * completion detection. A use_pflipirq module parameter < 2 allows
+	 * to override this in case of asics with faulty pflip irqs.
+	 * A module parameter of 0 would only use this polling based path,
+	 * a parameter of 1 would use pflip irq only as a backup to this
+	 * path, as in Linux 3.16.
+	 */
+	if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
+		return;
+
+	spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+	if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+		DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+				 "RADEON_FLIP_SUBMITTED(%d)\n",
+				 radeon_crtc->flip_status,
+				 RADEON_FLIP_SUBMITTED);
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+
+	update_pending = radeon_page_flip_pending(rdev, crtc_id);
+
+	/* Has the pageflip already completed in crtc, or is it certain
+	 * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides
+	 * distance to start of "fudged earlier" vblank in vpos, distance to
+	 * start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in
+	 * the last few scanlines before start of real vblank, where the vblank
+	 * irq can fire, so we have sampled update_pending a bit too early and
+	 * know the flip will complete at leading edge of the upcoming real
+	 * vblank. On pre-AVIVO hardware, flips also complete inside the real
+	 * vblank, not only at leading edge, so if update_pending for hpos >= 0
+	 *  == inside real vblank, the flip will complete almost immediately.
+	 * Note that this method of completion handling is still not 100% race
+	 * free, as we could execute before the radeon_flip_work_func managed
+	 * to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op,
+	 * but the flip still gets programmed into hw and completed during
+	 * vblank, leading to a delayed emission of the flip completion event.
+	 * This applies at least to pre-AVIVO hardware, where flips are always
+	 * completing inside vblank, not only at leading edge of vblank.
+	 */
+	if (update_pending &&
+	    (DRM_SCANOUTPOS_VALID &
+	     radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+					GET_DISTANCE_TO_VBLANKSTART,
+					&vpos, &hpos, NULL, NULL,
+					&rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
+	    ((vpos >= 0 && hpos < 0) || (hpos >= 0 && !ASIC_IS_AVIVO(rdev)))) {
+		/* crtc didn't flip in this target vblank interval,
+		 * but flip is pending in crtc. Based on the current
+		 * scanout position we know that the current frame is
+		 * (nearly) complete and the flip will (likely)
+		 * complete before the start of the next frame.
+		 */
+		update_pending = 0;
+	}
+	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+	if (!update_pending)
+		radeon_crtc_handle_flip(rdev, crtc_id);
+}
+
+/**
+ * radeon_crtc_handle_flip - page flip completed
+ *
+ * @rdev: radeon device pointer
+ * @crtc_id: crtc number this event is for
+ *
+ * Called when we are sure that a page flip for this crtc is completed.
+ */
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	struct radeon_flip_work *work;
+	unsigned long flags;
+
+	/* this can happen at init */
+	if (radeon_crtc == NULL)
+		return;
+
+	spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+	work = radeon_crtc->flip_work;
+	if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+		DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+				 "RADEON_FLIP_SUBMITTED(%d)\n",
+				 radeon_crtc->flip_status,
+				 RADEON_FLIP_SUBMITTED);
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+
+	/* Pageflip completed. Clean up. */
+	radeon_crtc->flip_status = RADEON_FLIP_NONE;
+	radeon_crtc->flip_work = NULL;
+
+	/* wakeup userspace */
+	if (work->event)
+		drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
+
+	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+	drm_crtc_vblank_put(&radeon_crtc->base);
+	radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
+	queue_work(radeon_crtc->flip_queue, &work->unpin_work);
+}
+
+/**
+ * radeon_flip_work_func - page flip framebuffer
+ *
+ * @work - kernel work item
+ *
+ * Wait for the buffer object to become idle and do the actual page flip
+ */
+static void radeon_flip_work_func(struct work_struct *__work)
+{
+	struct radeon_flip_work *work =
+		container_of(__work, struct radeon_flip_work, flip_work);
+	struct radeon_device *rdev = work->rdev;
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
+
+	struct drm_crtc *crtc = &radeon_crtc->base;
+	unsigned long flags;
+	int r;
+	int vpos, hpos;
+
+	down_read(&rdev->exclusive_lock);
+	if (work->fence) {
+		struct radeon_fence *fence;
+
+		fence = to_radeon_fence(work->fence);
+		if (fence && fence->rdev == rdev) {
+			r = radeon_fence_wait(fence, false);
+			if (r == -EDEADLK) {
+				up_read(&rdev->exclusive_lock);
+				do {
+					r = radeon_gpu_reset(rdev);
+				} while (r == -EAGAIN);
+				down_read(&rdev->exclusive_lock);
+			}
+		} else
+			r = dma_fence_wait(work->fence, false);
+
+		if (r)
+			DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
+
+		/* We continue with the page flip even if we failed to wait on
+		 * the fence, otherwise the DRM core and userspace will be
+		 * confused about which BO the CRTC is scanning out
+		 */
+
+		dma_fence_put(work->fence);
+		work->fence = NULL;
+	}
+
+	/* Wait until we're out of the vertical blank period before the one
+	 * targeted by the flip. Always wait on pre DCE4 to avoid races with
+	 * flip completion handling from vblank irq, as these old asics don't
+	 * have reliable pageflip completion interrupts.
+	 */
+	while (radeon_crtc->enabled &&
+		(radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0,
+					    &vpos, &hpos, NULL, NULL,
+					    &crtc->hwmode)
+		& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+		(!ASIC_IS_AVIVO(rdev) ||
+		((int) (work->target_vblank -
+		dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+		usleep_range(1000, 2000);
+
+	/* We borrow the event spin lock for protecting flip_status */
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	/* set the proper interrupt */
+	radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+	/* do the flip (mmio) */
+	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
+
+	radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
+					struct drm_framebuffer *fb,
+					struct drm_pending_vblank_event *event,
+					uint32_t page_flip_flags,
+					uint32_t target,
+					struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct radeon_flip_work *work;
+	struct radeon_bo *new_rbo;
+	uint32_t tiling_flags, pitch_pixels;
+	uint64_t base;
+	unsigned long flags;
+	int r;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&work->flip_work, radeon_flip_work_func);
+	INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+	work->rdev = rdev;
+	work->crtc_id = radeon_crtc->crtc_id;
+	work->event = event;
+	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+
+	/* schedule unpin of the old buffer */
+	obj = crtc->primary->fb->obj[0];
+
+	/* take a reference to the old object */
+	drm_gem_object_get(obj);
+	work->old_rbo = gem_to_radeon_bo(obj);
+
+	obj = fb->obj[0];
+	new_rbo = gem_to_radeon_bo(obj);
+
+	/* pin the new buffer */
+	DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+			 work->old_rbo, new_rbo);
+
+	r = radeon_bo_reserve(new_rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+		goto cleanup;
+	}
+	/* Only 27 bit offset for legacy CRTC */
+	r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
+				     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(new_rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to pin new rbo buffer before flip\n");
+		goto cleanup;
+	}
+	work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(new_rbo);
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		/* crtc offset is from display base addr not FB location */
+		base -= radeon_crtc->legacy_display_base_addr;
+		pitch_pixels = fb->pitches[0] / fb->format->cpp[0];
+
+		if (tiling_flags & RADEON_TILING_MACRO) {
+			if (ASIC_IS_R300(rdev)) {
+				base &= ~0x7ff;
+			} else {
+				int byteshift = fb->format->cpp[0] * 8 >> 4;
+				int tile_addr = (((crtc->y >> 3) * pitch_pixels +  crtc->x) >> (8 - byteshift)) << 11;
+				base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+			}
+		} else {
+			int offset = crtc->y * pitch_pixels + crtc->x;
+			switch (fb->format->cpp[0] * 8) {
+			case 8:
+			default:
+				offset *= 1;
+				break;
+			case 15:
+			case 16:
+				offset *= 2;
+				break;
+			case 24:
+				offset *= 3;
+				break;
+			case 32:
+				offset *= 4;
+				break;
+			}
+			base += offset;
+		}
+		base &= ~7;
+	}
+	work->base = base;
+	work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
+		dev->driver->get_vblank_counter(dev, work->crtc_id);
+
+	/* We borrow the event spin lock for protecting flip_work */
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+		r = -EBUSY;
+		goto pflip_cleanup;
+	}
+	radeon_crtc->flip_status = RADEON_FLIP_PENDING;
+	radeon_crtc->flip_work = work;
+
+	/* update crtc fb */
+	crtc->primary->fb = fb;
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	queue_work(radeon_crtc->flip_queue, &work->flip_work);
+	return 0;
+
+pflip_cleanup:
+	if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
+		DRM_ERROR("failed to reserve new rbo in error path\n");
+		goto cleanup;
+	}
+	if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
+		DRM_ERROR("failed to unpin new rbo in error path\n");
+	}
+	radeon_bo_unreserve(new_rbo);
+
+cleanup:
+	drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
+	dma_fence_put(work->fence);
+	kfree(work);
+	return r;
+}
+
+static int
+radeon_crtc_set_config(struct drm_mode_set *set,
+		       struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_device *dev;
+	struct radeon_device *rdev;
+	struct drm_crtc *crtc;
+	bool active = false;
+	int ret;
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	dev = set->crtc->dev;
+
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_crtc_helper_set_config(set, ctx);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (crtc->enabled)
+			active = true;
+
+	pm_runtime_mark_last_busy(dev->dev);
+
+	rdev = dev->dev_private;
+	/* if we have active crtcs and we don't have a power ref,
+	   take the current one */
+	if (active && !rdev->have_disp_power_ref) {
+		rdev->have_disp_power_ref = true;
+		return ret;
+	}
+	/* if we have no active crtcs, then drop the power ref
+	   we got before */
+	if (!active && rdev->have_disp_power_ref) {
+		pm_runtime_put_autosuspend(dev->dev);
+		rdev->have_disp_power_ref = false;
+	}
+
+	/* drop the power reference we got coming in here */
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
+
+static const struct drm_crtc_funcs radeon_crtc_funcs = {
+	.cursor_set2 = radeon_crtc_cursor_set2,
+	.cursor_move = radeon_crtc_cursor_move,
+	.gamma_set = radeon_crtc_gamma_set,
+	.set_config = radeon_crtc_set_config,
+	.destroy = radeon_crtc_destroy,
+	.page_flip_target = radeon_crtc_page_flip_target,
+};
+
+static void radeon_crtc_init(struct drm_device *dev, int index)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc;
+	int i;
+
+	radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	if (radeon_crtc == NULL)
+		return;
+
+	drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+	radeon_crtc->crtc_id = index;
+	radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
+	rdev->mode_info.crtcs[index] = radeon_crtc;
+
+	if (rdev->family >= CHIP_BONAIRE) {
+		radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
+		radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
+	} else {
+		radeon_crtc->max_cursor_width = CURSOR_WIDTH;
+		radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
+	}
+	dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+	dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
+
+#if 0
+	radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+	radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+	radeon_crtc->mode_set.num_connectors = 0;
+#endif
+
+	for (i = 0; i < 256; i++) {
+		radeon_crtc->lut_r[i] = i << 2;
+		radeon_crtc->lut_g[i] = i << 2;
+		radeon_crtc->lut_b[i] = i << 2;
+	}
+
+	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+		radeon_atombios_init_crtc(dev, radeon_crtc);
+	else
+		radeon_legacy_init_crtc(dev, radeon_crtc);
+}
+
+static const char *encoder_names[38] = {
+	"NONE",
+	"INTERNAL_LVDS",
+	"INTERNAL_TMDS1",
+	"INTERNAL_TMDS2",
+	"INTERNAL_DAC1",
+	"INTERNAL_DAC2",
+	"INTERNAL_SDVOA",
+	"INTERNAL_SDVOB",
+	"SI170B",
+	"CH7303",
+	"CH7301",
+	"INTERNAL_DVO1",
+	"EXTERNAL_SDVOA",
+	"EXTERNAL_SDVOB",
+	"TITFP513",
+	"INTERNAL_LVTM1",
+	"VT1623",
+	"HDMI_SI1930",
+	"HDMI_INTERNAL",
+	"INTERNAL_KLDSCP_TMDS1",
+	"INTERNAL_KLDSCP_DVO1",
+	"INTERNAL_KLDSCP_DAC1",
+	"INTERNAL_KLDSCP_DAC2",
+	"SI178",
+	"MVPU_FPGA",
+	"INTERNAL_DDI",
+	"VT1625",
+	"HDMI_SI1932",
+	"DP_AN9801",
+	"DP_DP501",
+	"INTERNAL_UNIPHY",
+	"INTERNAL_KLDSCP_LVTMA",
+	"INTERNAL_UNIPHY1",
+	"INTERNAL_UNIPHY2",
+	"NUTMEG",
+	"TRAVIS",
+	"INTERNAL_VCE",
+	"INTERNAL_UNIPHY3",
+};
+
+static const char *hpd_names[6] = {
+	"HPD1",
+	"HPD2",
+	"HPD3",
+	"HPD4",
+	"HPD5",
+	"HPD6",
+};
+
+static void radeon_print_display_setup(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	uint32_t devices;
+	int i = 0;
+
+	DRM_INFO("Radeon Display Connectors\n");
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		DRM_INFO("Connector %d:\n", i);
+		DRM_INFO("  %s\n", connector->name);
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
+		if (radeon_connector->ddc_bus) {
+			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				 radeon_connector->ddc_bus->rec.mask_clk_reg,
+				 radeon_connector->ddc_bus->rec.mask_data_reg,
+				 radeon_connector->ddc_bus->rec.a_clk_reg,
+				 radeon_connector->ddc_bus->rec.a_data_reg,
+				 radeon_connector->ddc_bus->rec.en_clk_reg,
+				 radeon_connector->ddc_bus->rec.en_data_reg,
+				 radeon_connector->ddc_bus->rec.y_clk_reg,
+				 radeon_connector->ddc_bus->rec.y_data_reg);
+			if (radeon_connector->router.ddc_valid)
+				DRM_INFO("  DDC Router 0x%x/0x%x\n",
+					 radeon_connector->router.ddc_mux_control_pin,
+					 radeon_connector->router.ddc_mux_state);
+			if (radeon_connector->router.cd_valid)
+				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+					 radeon_connector->router.cd_mux_control_pin,
+					 radeon_connector->router.cd_mux_state);
+		} else {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+		}
+		DRM_INFO("  Encoders:\n");
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			radeon_encoder = to_radeon_encoder(encoder);
+			devices = radeon_encoder->devices & radeon_connector->devices;
+			if (devices) {
+				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
+					DRM_INFO("    CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
+					DRM_INFO("    CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
+					DRM_INFO("    LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
+					DRM_INFO("    DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
+					DRM_INFO("    DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
+					DRM_INFO("    DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
+					DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
+					DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+					DRM_INFO("    DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_TV1_SUPPORT)
+					DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CV_SUPPORT)
+					DRM_INFO("    CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
+			}
+		}
+		i++;
+	}
+}
+
+static bool radeon_setup_enc_conn(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	bool ret = false;
+
+	if (rdev->bios) {
+		if (rdev->is_atom_bios) {
+			ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+			if (ret == false)
+				ret = radeon_get_atom_connector_info_from_object_table(dev);
+		} else {
+			ret = radeon_get_legacy_connector_info_from_bios(dev);
+			if (ret == false)
+				ret = radeon_get_legacy_connector_info_from_table(dev);
+		}
+	} else {
+		if (!ASIC_IS_AVIVO(rdev))
+			ret = radeon_get_legacy_connector_info_from_table(dev);
+	}
+	if (ret) {
+		radeon_setup_encoder_clones(dev);
+		radeon_print_display_setup(dev);
+	}
+
+	return ret;
+}
+
+/* avivo */
+
+/**
+ * avivo_reduce_ratio - fractional number reduction
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @nom_min: minimum value for nominator
+ * @den_min: minimum value for denominator
+ *
+ * Find the greatest common divisor and apply it on both nominator and
+ * denominator, but make nominator and denominator are at least as large
+ * as their minimum values.
+ */
+static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
+			       unsigned nom_min, unsigned den_min)
+{
+	unsigned tmp;
+
+	/* reduce the numbers to a simpler ratio */
+	tmp = gcd(*nom, *den);
+	*nom /= tmp;
+	*den /= tmp;
+
+	/* make sure nominator is large enough */
+	if (*nom < nom_min) {
+		tmp = DIV_ROUND_UP(nom_min, *nom);
+		*nom *= tmp;
+		*den *= tmp;
+	}
+
+	/* make sure the denominator is large enough */
+	if (*den < den_min) {
+		tmp = DIV_ROUND_UP(den_min, *den);
+		*nom *= tmp;
+		*den *= tmp;
+	}
+}
+
+/**
+ * avivo_get_fb_ref_div - feedback and ref divider calculation
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @post_div: post divider
+ * @fb_div_max: feedback divider maximum
+ * @ref_div_max: reference divider maximum
+ * @fb_div: resulting feedback divider
+ * @ref_div: resulting reference divider
+ *
+ * Calculate feedback and reference divider for a given post divider. Makes
+ * sure we stay within the limits.
+ */
+static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
+				 unsigned fb_div_max, unsigned ref_div_max,
+				 unsigned *fb_div, unsigned *ref_div)
+{
+	/* limit reference * post divider to a maximum */
+	ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
+
+	/* get matching reference and feedback divider */
+	*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+
+	/* limit fb divider to its maximum */
+	if (*fb_div > fb_div_max) {
+		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+		*fb_div = fb_div_max;
+	}
+}
+
+/**
+ * radeon_compute_pll_avivo - compute PLL paramaters
+ *
+ * @pll: information about the PLL
+ * @dot_clock_p: resulting pixel clock
+ * fb_div_p: resulting feedback divider
+ * frac_fb_div_p: fractional part of the feedback divider
+ * ref_div_p: resulting reference divider
+ * post_div_p: resulting reference divider
+ *
+ * Try to calculate the PLL parameters to generate the given frequency:
+ * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
+ */
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+			      u32 freq,
+			      u32 *dot_clock_p,
+			      u32 *fb_div_p,
+			      u32 *frac_fb_div_p,
+			      u32 *ref_div_p,
+			      u32 *post_div_p)
+{
+	unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
+		freq : freq / 10;
+
+	unsigned fb_div_min, fb_div_max, fb_div;
+	unsigned post_div_min, post_div_max, post_div;
+	unsigned ref_div_min, ref_div_max, ref_div;
+	unsigned post_div_best, diff_best;
+	unsigned nom, den;
+
+	/* determine allowed feedback divider range */
+	fb_div_min = pll->min_feedback_div;
+	fb_div_max = pll->max_feedback_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		fb_div_min *= 10;
+		fb_div_max *= 10;
+	}
+
+	/* determine allowed ref divider range */
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		ref_div_min = pll->reference_div;
+	else
+		ref_div_min = pll->min_ref_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
+	    pll->flags & RADEON_PLL_USE_REF_DIV)
+		ref_div_max = pll->reference_div;
+	else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+		/* fix for problems on RS880 */
+		ref_div_max = min(pll->max_ref_div, 7u);
+	else
+		ref_div_max = pll->max_ref_div;
+
+	/* determine allowed post divider range */
+	if (pll->flags & RADEON_PLL_USE_POST_DIV) {
+		post_div_min = pll->post_div;
+		post_div_max = pll->post_div;
+	} else {
+		unsigned vco_min, vco_max;
+
+		if (pll->flags & RADEON_PLL_IS_LCD) {
+			vco_min = pll->lcd_pll_out_min;
+			vco_max = pll->lcd_pll_out_max;
+		} else {
+			vco_min = pll->pll_out_min;
+			vco_max = pll->pll_out_max;
+		}
+
+		if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+			vco_min *= 10;
+			vco_max *= 10;
+		}
+
+		post_div_min = vco_min / target_clock;
+		if ((target_clock * post_div_min) < vco_min)
+			++post_div_min;
+		if (post_div_min < pll->min_post_div)
+			post_div_min = pll->min_post_div;
+
+		post_div_max = vco_max / target_clock;
+		if ((target_clock * post_div_max) > vco_max)
+			--post_div_max;
+		if (post_div_max > pll->max_post_div)
+			post_div_max = pll->max_post_div;
+	}
+
+	/* represent the searched ratio as fractional number */
+	nom = target_clock;
+	den = pll->reference_freq;
+
+	/* reduce the numbers to a simpler ratio */
+	avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
+
+	/* now search for a post divider */
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+		post_div_best = post_div_min;
+	else
+		post_div_best = post_div_max;
+	diff_best = ~0;
+
+	for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
+		unsigned diff;
+		avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
+				     ref_div_max, &fb_div, &ref_div);
+		diff = abs(target_clock - (pll->reference_freq * fb_div) /
+			(ref_div * post_div));
+
+		if (diff < diff_best || (diff == diff_best &&
+		    !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
+
+			post_div_best = post_div;
+			diff_best = diff;
+		}
+	}
+	post_div = post_div_best;
+
+	/* get the feedback and reference divider for the optimal value */
+	avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+			     &fb_div, &ref_div);
+
+	/* reduce the numbers to a simpler ratio once more */
+	/* this also makes sure that the reference divider is large enough */
+	avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
+
+	/* avoid high jitter with small fractional dividers */
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
+		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
+		if (fb_div < fb_div_min) {
+			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
+			fb_div *= tmp;
+			ref_div *= tmp;
+		}
+	}
+
+	/* and finally save the result */
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		*fb_div_p = fb_div / 10;
+		*frac_fb_div_p = fb_div % 10;
+	} else {
+		*fb_div_p = fb_div;
+		*frac_fb_div_p = 0;
+	}
+
+	*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
+			(pll->reference_freq * *frac_fb_div_p)) /
+		       (ref_div * post_div * 10);
+	*ref_div_p = ref_div;
+	*post_div_p = post_div;
+
+	DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
+		      ref_div, post_div);
+}
+
+/* pre-avivo */
+static inline uint32_t radeon_div(uint64_t n, uint32_t d)
+{
+	uint64_t mod;
+
+	n += d / 2;
+
+	mod = do_div(n, d);
+	return n;
+}
+
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+			       uint64_t freq,
+			       uint32_t *dot_clock_p,
+			       uint32_t *fb_div_p,
+			       uint32_t *frac_fb_div_p,
+			       uint32_t *ref_div_p,
+			       uint32_t *post_div_p)
+{
+	uint32_t min_ref_div = pll->min_ref_div;
+	uint32_t max_ref_div = pll->max_ref_div;
+	uint32_t min_post_div = pll->min_post_div;
+	uint32_t max_post_div = pll->max_post_div;
+	uint32_t min_fractional_feed_div = 0;
+	uint32_t max_fractional_feed_div = 0;
+	uint32_t best_vco = pll->best_vco;
+	uint32_t best_post_div = 1;
+	uint32_t best_ref_div = 1;
+	uint32_t best_feedback_div = 1;
+	uint32_t best_frac_feedback_div = 0;
+	uint32_t best_freq = -1;
+	uint32_t best_error = 0xffffffff;
+	uint32_t best_vco_diff = 1;
+	uint32_t post_div;
+	u32 pll_out_min, pll_out_max;
+
+	DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+	freq = freq * 1000;
+
+	if (pll->flags & RADEON_PLL_IS_LCD) {
+		pll_out_min = pll->lcd_pll_out_min;
+		pll_out_max = pll->lcd_pll_out_max;
+	} else {
+		pll_out_min = pll->pll_out_min;
+		pll_out_max = pll->pll_out_max;
+	}
+
+	if (pll_out_min > 64800)
+		pll_out_min = 64800;
+
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		min_ref_div = max_ref_div = pll->reference_div;
+	else {
+		while (min_ref_div < max_ref_div-1) {
+			uint32_t mid = (min_ref_div + max_ref_div) / 2;
+			uint32_t pll_in = pll->reference_freq / mid;
+			if (pll_in < pll->pll_in_min)
+				max_ref_div = mid;
+			else if (pll_in > pll->pll_in_max)
+				min_ref_div = mid;
+			else
+				break;
+		}
+	}
+
+	if (pll->flags & RADEON_PLL_USE_POST_DIV)
+		min_post_div = max_post_div = pll->post_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		min_fractional_feed_div = pll->min_frac_feedback_div;
+		max_fractional_feed_div = pll->max_frac_feedback_div;
+	}
+
+	for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
+		uint32_t ref_div;
+
+		if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+			continue;
+
+		/* legacy radeons only have a few post_divs */
+		if (pll->flags & RADEON_PLL_LEGACY) {
+			if ((post_div == 5) ||
+			    (post_div == 7) ||
+			    (post_div == 9) ||
+			    (post_div == 10) ||
+			    (post_div == 11) ||
+			    (post_div == 13) ||
+			    (post_div == 14) ||
+			    (post_div == 15))
+				continue;
+		}
+
+		for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
+			uint32_t feedback_div, current_freq = 0, error, vco_diff;
+			uint32_t pll_in = pll->reference_freq / ref_div;
+			uint32_t min_feed_div = pll->min_feedback_div;
+			uint32_t max_feed_div = pll->max_feedback_div + 1;
+
+			if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
+				continue;
+
+			while (min_feed_div < max_feed_div) {
+				uint32_t vco;
+				uint32_t min_frac_feed_div = min_fractional_feed_div;
+				uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
+				uint32_t frac_feedback_div;
+				uint64_t tmp;
+
+				feedback_div = (min_feed_div + max_feed_div) / 2;
+
+				tmp = (uint64_t)pll->reference_freq * feedback_div;
+				vco = radeon_div(tmp, ref_div);
+
+				if (vco < pll_out_min) {
+					min_feed_div = feedback_div + 1;
+					continue;
+				} else if (vco > pll_out_max) {
+					max_feed_div = feedback_div;
+					continue;
+				}
+
+				while (min_frac_feed_div < max_frac_feed_div) {
+					frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
+					tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
+					tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
+					current_freq = radeon_div(tmp, ref_div * post_div);
+
+					if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+						if (freq < current_freq)
+							error = 0xffffffff;
+						else
+							error = freq - current_freq;
+					} else
+						error = abs(current_freq - freq);
+					vco_diff = abs(vco - best_vco);
+
+					if ((best_vco == 0 && error < best_error) ||
+					    (best_vco != 0 &&
+					     ((best_error > 100 && error < best_error - 100) ||
+					      (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
+						best_post_div = post_div;
+						best_ref_div = ref_div;
+						best_feedback_div = feedback_div;
+						best_frac_feedback_div = frac_feedback_div;
+						best_freq = current_freq;
+						best_error = error;
+						best_vco_diff = vco_diff;
+					} else if (current_freq == freq) {
+						if (best_freq == -1) {
+							best_post_div = post_div;
+							best_ref_div = ref_div;
+							best_feedback_div = feedback_div;
+							best_frac_feedback_div = frac_feedback_div;
+							best_freq = current_freq;
+							best_error = error;
+							best_vco_diff = vco_diff;
+						} else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+							best_post_div = post_div;
+							best_ref_div = ref_div;
+							best_feedback_div = feedback_div;
+							best_frac_feedback_div = frac_feedback_div;
+							best_freq = current_freq;
+							best_error = error;
+							best_vco_diff = vco_diff;
+						}
+					}
+					if (current_freq < freq)
+						min_frac_feed_div = frac_feedback_div + 1;
+					else
+						max_frac_feed_div = frac_feedback_div;
+				}
+				if (current_freq < freq)
+					min_feed_div = feedback_div + 1;
+				else
+					max_feed_div = feedback_div;
+			}
+		}
+	}
+
+	*dot_clock_p = best_freq / 10000;
+	*fb_div_p = best_feedback_div;
+	*frac_fb_div_p = best_frac_feedback_div;
+	*ref_div_p = best_ref_div;
+	*post_div_p = best_post_div;
+	DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      (long long)freq,
+		      best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+		      best_ref_div, best_post_div);
+
+}
+
+static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+	.destroy = drm_gem_fb_destroy,
+	.create_handle = drm_gem_fb_create_handle,
+};
+
+int
+radeon_framebuffer_init(struct drm_device *dev,
+			struct drm_framebuffer *fb,
+			const struct drm_mode_fb_cmd2 *mode_cmd,
+			struct drm_gem_object *obj)
+{
+	int ret;
+	fb->obj[0] = obj;
+	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+	ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
+	if (ret) {
+		fb->obj[0] = NULL;
+		return ret;
+	}
+	return 0;
+}
+
+static struct drm_framebuffer *
+radeon_user_framebuffer_create(struct drm_device *dev,
+			       struct drm_file *file_priv,
+			       const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
+	if (obj ==  NULL) {
+		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+			"can't create framebuffer\n", mode_cmd->handles[0]);
+		return ERR_PTR(-ENOENT);
+	}
+
+	/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+	if (obj->import_attach) {
+		DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+	if (fb == NULL) {
+		drm_gem_object_put_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
+	if (ret) {
+		kfree(fb);
+		drm_gem_object_put_unlocked(obj);
+		return ERR_PTR(ret);
+	}
+
+	return fb;
+}
+
+static const struct drm_mode_config_funcs radeon_mode_funcs = {
+	.fb_create = radeon_user_framebuffer_create,
+	.output_poll_changed = drm_fb_helper_output_poll_changed,
+};
+
+static const struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
+{	{ 0, "driver" },
+	{ 1, "bios" },
+};
+
+static const struct drm_prop_enum_list radeon_tv_std_enum_list[] =
+{	{ TV_STD_NTSC, "ntsc" },
+	{ TV_STD_PAL, "pal" },
+	{ TV_STD_PAL_M, "pal-m" },
+	{ TV_STD_PAL_60, "pal-60" },
+	{ TV_STD_NTSC_J, "ntsc-j" },
+	{ TV_STD_SCART_PAL, "scart-pal" },
+	{ TV_STD_PAL_CN, "pal-cn" },
+	{ TV_STD_SECAM, "secam" },
+};
+
+static const struct drm_prop_enum_list radeon_underscan_enum_list[] =
+{	{ UNDERSCAN_OFF, "off" },
+	{ UNDERSCAN_ON, "on" },
+	{ UNDERSCAN_AUTO, "auto" },
+};
+
+static const struct drm_prop_enum_list radeon_audio_enum_list[] =
+{	{ RADEON_AUDIO_DISABLE, "off" },
+	{ RADEON_AUDIO_ENABLE, "on" },
+	{ RADEON_AUDIO_AUTO, "auto" },
+};
+
+/* XXX support different dither options? spatial, temporal, both, etc. */
+static const struct drm_prop_enum_list radeon_dither_enum_list[] =
+{	{ RADEON_FMT_DITHER_DISABLE, "off" },
+	{ RADEON_FMT_DITHER_ENABLE, "on" },
+};
+
+static const struct drm_prop_enum_list radeon_output_csc_enum_list[] =
+{	{ RADEON_OUTPUT_CSC_BYPASS, "bypass" },
+	{ RADEON_OUTPUT_CSC_TVRGB, "tvrgb" },
+	{ RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" },
+	{ RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" },
+};
+
+static int radeon_modeset_create_props(struct radeon_device *rdev)
+{
+	int sz;
+
+	if (rdev->is_atom_bios) {
+		rdev->mode_info.coherent_mode_property =
+			drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+		if (!rdev->mode_info.coherent_mode_property)
+			return -ENOMEM;
+	}
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
+		rdev->mode_info.tmds_pll_property =
+			drm_property_create_enum(rdev->ddev, 0,
+					    "tmds_pll",
+					    radeon_tmds_pll_enum_list, sz);
+	}
+
+	rdev->mode_info.load_detect_property =
+		drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+	if (!rdev->mode_info.load_detect_property)
+		return -ENOMEM;
+
+	drm_mode_create_scaling_mode_property(rdev->ddev);
+
+	sz = ARRAY_SIZE(radeon_tv_std_enum_list);
+	rdev->mode_info.tv_std_property =
+		drm_property_create_enum(rdev->ddev, 0,
+				    "tv standard",
+				    radeon_tv_std_enum_list, sz);
+
+	sz = ARRAY_SIZE(radeon_underscan_enum_list);
+	rdev->mode_info.underscan_property =
+		drm_property_create_enum(rdev->ddev, 0,
+				    "underscan",
+				    radeon_underscan_enum_list, sz);
+
+	rdev->mode_info.underscan_hborder_property =
+		drm_property_create_range(rdev->ddev, 0,
+					"underscan hborder", 0, 128);
+	if (!rdev->mode_info.underscan_hborder_property)
+		return -ENOMEM;
+
+	rdev->mode_info.underscan_vborder_property =
+		drm_property_create_range(rdev->ddev, 0,
+					"underscan vborder", 0, 128);
+	if (!rdev->mode_info.underscan_vborder_property)
+		return -ENOMEM;
+
+	sz = ARRAY_SIZE(radeon_audio_enum_list);
+	rdev->mode_info.audio_property =
+		drm_property_create_enum(rdev->ddev, 0,
+					 "audio",
+					 radeon_audio_enum_list, sz);
+
+	sz = ARRAY_SIZE(radeon_dither_enum_list);
+	rdev->mode_info.dither_property =
+		drm_property_create_enum(rdev->ddev, 0,
+					 "dither",
+					 radeon_dither_enum_list, sz);
+
+	sz = ARRAY_SIZE(radeon_output_csc_enum_list);
+	rdev->mode_info.output_csc_property =
+		drm_property_create_enum(rdev->ddev, 0,
+					 "output_csc",
+					 radeon_output_csc_enum_list, sz);
+
+	return 0;
+}
+
+void radeon_update_display_priority(struct radeon_device *rdev)
+{
+	/* adjustment options for the display watermarks */
+	if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
+		/* set display priority to high for r3xx, rv515 chips
+		 * this avoids flickering due to underflow to the
+		 * display controllers during heavy acceleration.
+		 * Don't force high on rs4xx igp chips as it seems to
+		 * affect the sound card.  See kernel bug 15982.
+		 */
+		if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+		    !(rdev->flags & RADEON_IS_IGP))
+			rdev->disp_priority = 2;
+		else
+			rdev->disp_priority = 0;
+	} else
+		rdev->disp_priority = radeon_disp_priority;
+
+}
+
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+		rdev->mode_info.afmt[i] = NULL;
+
+	if (ASIC_IS_NODCE(rdev)) {
+		/* nothing to do */
+	} else if (ASIC_IS_DCE4(rdev)) {
+		static uint32_t eg_offsets[] = {
+			EVERGREEN_CRTC0_REGISTER_OFFSET,
+			EVERGREEN_CRTC1_REGISTER_OFFSET,
+			EVERGREEN_CRTC2_REGISTER_OFFSET,
+			EVERGREEN_CRTC3_REGISTER_OFFSET,
+			EVERGREEN_CRTC4_REGISTER_OFFSET,
+			EVERGREEN_CRTC5_REGISTER_OFFSET,
+			0x13830 - 0x7030,
+		};
+		int num_afmt;
+
+		/* DCE8 has 7 audio blocks tied to DIG encoders */
+		/* DCE6 has 6 audio blocks tied to DIG encoders */
+		/* DCE4/5 has 6 audio blocks tied to DIG encoders */
+		/* DCE4.1 has 2 audio blocks tied to DIG encoders */
+		if (ASIC_IS_DCE8(rdev))
+			num_afmt = 7;
+		else if (ASIC_IS_DCE6(rdev))
+			num_afmt = 6;
+		else if (ASIC_IS_DCE5(rdev))
+			num_afmt = 6;
+		else if (ASIC_IS_DCE41(rdev))
+			num_afmt = 2;
+		else /* DCE4 */
+			num_afmt = 6;
+
+		BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
+		for (i = 0; i < num_afmt; i++) {
+			rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+			if (rdev->mode_info.afmt[i]) {
+				rdev->mode_info.afmt[i]->offset = eg_offsets[i];
+				rdev->mode_info.afmt[i]->id = i;
+			}
+		}
+	} else if (ASIC_IS_DCE3(rdev)) {
+		/* DCE3.x has 2 audio blocks tied to DIG encoders */
+		rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[1]) {
+			rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+			rdev->mode_info.afmt[1]->id = 1;
+		}
+	} else if (ASIC_IS_DCE2(rdev)) {
+		/* DCE2 has at least 1 routable audio block */
+		rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		/* r6xx has 2 routable audio blocks */
+		if (rdev->family >= CHIP_R600) {
+			rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+			if (rdev->mode_info.afmt[1]) {
+				rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+				rdev->mode_info.afmt[1]->id = 1;
+			}
+		}
+	}
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+		kfree(rdev->mode_info.afmt[i]);
+		rdev->mode_info.afmt[i] = NULL;
+	}
+}
+
+int radeon_modeset_init(struct radeon_device *rdev)
+{
+	int i;
+	int ret;
+
+	drm_mode_config_init(rdev->ddev);
+	rdev->mode_info.mode_config_initialized = true;
+
+	rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
+
+	if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600)
+		rdev->ddev->mode_config.async_page_flip = true;
+
+	if (ASIC_IS_DCE5(rdev)) {
+		rdev->ddev->mode_config.max_width = 16384;
+		rdev->ddev->mode_config.max_height = 16384;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		rdev->ddev->mode_config.max_width = 8192;
+		rdev->ddev->mode_config.max_height = 8192;
+	} else {
+		rdev->ddev->mode_config.max_width = 4096;
+		rdev->ddev->mode_config.max_height = 4096;
+	}
+
+	rdev->ddev->mode_config.preferred_depth = 24;
+	rdev->ddev->mode_config.prefer_shadow = 1;
+
+	rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
+
+	ret = radeon_modeset_create_props(rdev);
+	if (ret) {
+		return ret;
+	}
+
+	/* init i2c buses */
+	radeon_i2c_init(rdev);
+
+	/* check combios for a valid hardcoded EDID - Sun servers */
+	if (!rdev->is_atom_bios) {
+		/* check for hardcoded EDID in BIOS */
+		radeon_combios_check_hardcoded_edid(rdev);
+	}
+
+	/* allocate crtcs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		radeon_crtc_init(rdev->ddev, i);
+	}
+
+	/* okay we should have all the bios connectors */
+	ret = radeon_setup_enc_conn(rdev->ddev);
+	if (!ret) {
+		return ret;
+	}
+
+	/* init dig PHYs, disp eng pll */
+	if (rdev->is_atom_bios) {
+		radeon_atom_encoder_init(rdev);
+		radeon_atom_disp_eng_pll_init(rdev);
+	}
+
+	/* initialize hpd */
+	radeon_hpd_init(rdev);
+
+	/* setup afmt */
+	radeon_afmt_init(rdev);
+
+	radeon_fbdev_init(rdev);
+	drm_kms_helper_poll_init(rdev->ddev);
+
+	/* do pm late init */
+	ret = radeon_pm_late_init(rdev);
+
+	return 0;
+}
+
+void radeon_modeset_fini(struct radeon_device *rdev)
+{
+	if (rdev->mode_info.mode_config_initialized) {
+		drm_kms_helper_poll_fini(rdev->ddev);
+		radeon_hpd_fini(rdev);
+		drm_crtc_force_disable_all(rdev->ddev);
+		radeon_fbdev_fini(rdev);
+		radeon_afmt_fini(rdev);
+		drm_mode_config_cleanup(rdev->ddev);
+		rdev->mode_info.mode_config_initialized = false;
+	}
+
+	kfree(rdev->mode_info.bios_hardcoded_edid);
+
+	/* free i2c buses */
+	radeon_i2c_fini(rdev);
+}
+
+static bool is_hdtv_mode(const struct drm_display_mode *mode)
+{
+	/* try and guess if this is a tv or a monitor */
+	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+	    (mode->vdisplay == 576) || /* 576p */
+	    (mode->vdisplay == 720) || /* 720p */
+	    (mode->vdisplay == 1080)) /* 1080p */
+		return true;
+	else
+		return false;
+}
+
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_encoder *radeon_encoder;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	bool first = true;
+	u32 src_v = 1, dst_v = 1;
+	u32 src_h = 1, dst_h = 1;
+
+	radeon_crtc->h_border = 0;
+	radeon_crtc->v_border = 0;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		radeon_encoder = to_radeon_encoder(encoder);
+		connector = radeon_get_connector_for_encoder(encoder);
+		radeon_connector = to_radeon_connector(connector);
+
+		if (first) {
+			/* set scaling */
+			if (radeon_encoder->rmx_type == RMX_OFF)
+				radeon_crtc->rmx_type = RMX_OFF;
+			else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+				 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+			else
+				radeon_crtc->rmx_type = RMX_OFF;
+			/* copy native mode */
+			memcpy(&radeon_crtc->native_mode,
+			       &radeon_encoder->native_mode,
+				sizeof(struct drm_display_mode));
+			src_v = crtc->mode.vdisplay;
+			dst_v = radeon_crtc->native_mode.vdisplay;
+			src_h = crtc->mode.hdisplay;
+			dst_h = radeon_crtc->native_mode.hdisplay;
+
+			/* fix up for overscan on hdmi */
+			if (ASIC_IS_AVIVO(rdev) &&
+			    (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
+			    ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
+			     ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
+			      drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
+			      is_hdtv_mode(mode)))) {
+				if (radeon_encoder->underscan_hborder != 0)
+					radeon_crtc->h_border = radeon_encoder->underscan_hborder;
+				else
+					radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+				if (radeon_encoder->underscan_vborder != 0)
+					radeon_crtc->v_border = radeon_encoder->underscan_vborder;
+				else
+					radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+				radeon_crtc->rmx_type = RMX_FULL;
+				src_v = crtc->mode.vdisplay;
+				dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
+				src_h = crtc->mode.hdisplay;
+				dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
+			}
+			first = false;
+		} else {
+			if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
+				/* WARNING: Right now this can't happen but
+				 * in the future we need to check that scaling
+				 * are consistent across different encoder
+				 * (ie all encoder can work with the same
+				 *  scaling).
+				 */
+				DRM_ERROR("Scaling not consistent across encoder.\n");
+				return false;
+			}
+		}
+	}
+	if (radeon_crtc->rmx_type != RMX_OFF) {
+		fixed20_12 a, b;
+		a.full = dfixed_const(src_v);
+		b.full = dfixed_const(dst_v);
+		radeon_crtc->vsc.full = dfixed_div(a, b);
+		a.full = dfixed_const(src_h);
+		b.full = dfixed_const(dst_h);
+		radeon_crtc->hsc.full = dfixed_div(a, b);
+	} else {
+		radeon_crtc->vsc.full = dfixed_const(1);
+		radeon_crtc->hsc.full = dfixed_const(1);
+	}
+	return true;
+}
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu, and
+ * an optional accurate timestamp of when query happened.
+ *
+ * \param dev Device to query.
+ * \param crtc Crtc to query.
+ * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ *              For driver internal use only also supports these flags:
+ *
+ *              USE_REAL_VBLANKSTART to use the real start of vblank instead
+ *              of a fudged earlier start of vblank.
+ *
+ *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ *              fudged earlier start of vblank in *vpos and the distance
+ *              to true start of vblank in *hpos.
+ *
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ * \param *stime Target location for timestamp taken immediately before
+ *               scanout position query. Can be NULL to skip timestamp.
+ * \param *etime Target location for timestamp taken immediately after
+ *               scanout position query. Can be NULL to skip timestamp.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successful.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+			       unsigned int flags, int *vpos, int *hpos,
+			       ktime_t *stime, ktime_t *etime,
+			       const struct drm_display_mode *mode)
+{
+	u32 stat_crtc = 0, vbl = 0, position = 0;
+	int vbl_start, vbl_end, vtotal, ret = 0;
+	bool in_vbl = true;
+
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
+	if (ASIC_IS_DCE4(rdev)) {
+		if (pipe == 0) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC0_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC0_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (pipe == 1) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC1_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC1_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (pipe == 2) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC2_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC2_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (pipe == 3) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC3_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC3_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (pipe == 4) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC4_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC4_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (pipe == 5) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC5_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC5_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if (pipe == 0) {
+			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
+			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (pipe == 1) {
+			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
+			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	} else {
+		/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
+		if (pipe == 0) {
+			/* Assume vbl_end == 0, get vbl_start from
+			 * upper 16 bits.
+			 */
+			vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
+				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+			/* Only retrieve vpos from upper 16 bits, set hpos == 0. */
+			position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+			stat_crtc = RREG32(RADEON_CRTC_STATUS);
+			if (!(stat_crtc & 1))
+				in_vbl = false;
+
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (pipe == 1) {
+			vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
+				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+			position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+			stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+			if (!(stat_crtc & 1))
+				in_vbl = false;
+
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	}
+
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
+
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Decode into vertical and horizontal scanout position. */
+	*vpos = position & 0x1fff;
+	*hpos = (position >> 16) & 0x1fff;
+
+	/* Valid vblank area boundaries from gpu retrieved? */
+	if (vbl > 0) {
+		/* Yes: Decode. */
+		ret |= DRM_SCANOUTPOS_ACCURATE;
+		vbl_start = vbl & 0x1fff;
+		vbl_end = (vbl >> 16) & 0x1fff;
+	}
+	else {
+		/* No: Fake something reasonable which gives at least ok results. */
+		vbl_start = mode->crtc_vdisplay;
+		vbl_end = 0;
+	}
+
+	/* Called from driver internal vblank counter query code? */
+	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+	    /* Caller wants distance from real vbl_start in *hpos */
+	    *hpos = *vpos - vbl_start;
+	}
+
+	/* Fudge vblank to start a few scanlines earlier to handle the
+	 * problem that vblank irqs fire a few scanlines before start
+	 * of vblank. Some driver internal callers need the true vblank
+	 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+	 *
+	 * The cause of the "early" vblank irq is that the irq is triggered
+	 * by the line buffer logic when the line buffer read position enters
+	 * the vblank, whereas our crtc scanout position naturally lags the
+	 * line buffer read position.
+	 */
+	if (!(flags & USE_REAL_VBLANKSTART))
+		vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
+	/* Test scanout position against vblank region. */
+	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+		in_vbl = false;
+
+	/* In vblank? */
+	if (in_vbl)
+	    ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+	/* Called from driver internal vblank counter query code? */
+	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+		/* Caller wants distance from fudged earlier vbl_start */
+		*vpos -= vbl_start;
+		return ret;
+	}
+
+	/* Check if inside vblank area and apply corrective offsets:
+	 * vpos will then be >=0 in video scanout area, but negative
+	 * within vblank area, counting down the number of lines until
+	 * start of scanout.
+	 */
+
+	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
+	if (in_vbl && (*vpos >= vbl_start)) {
+		vtotal = mode->crtc_vtotal;
+		*vpos = *vpos - vtotal;
+	}
+
+	/* Correct for shifted end of vbl at vbl_end. */
+	*vpos = *vpos - vbl_end;
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
new file mode 100644
index 0000000..12eac4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "nid.h"
+
+#define AUX_RX_ERROR_FLAGS (AUX_SW_RX_OVERFLOW |	     \
+			    AUX_SW_RX_HPD_DISCON |	     \
+			    AUX_SW_RX_PARTIAL_BYTE |	     \
+			    AUX_SW_NON_AUX_MODE |	     \
+			    AUX_SW_RX_SYNC_INVALID_L |	     \
+			    AUX_SW_RX_SYNC_INVALID_H |	     \
+			    AUX_SW_RX_INVALID_START |	     \
+			    AUX_SW_RX_RECV_NO_DET |	     \
+			    AUX_SW_RX_RECV_INVALID_H |	     \
+			    AUX_SW_RX_RECV_INVALID_V)
+
+#define AUX_SW_REPLY_GET_BYTE_COUNT(x) (((x) >> 24) & 0x1f)
+
+#define BARE_ADDRESS_SIZE 3
+
+static const u32 aux_offset[] =
+{
+	0x6200 - 0x6200,
+	0x6250 - 0x6200,
+	0x62a0 - 0x6200,
+	0x6300 - 0x6200,
+	0x6350 - 0x6200,
+	0x63a0 - 0x6200,
+};
+
+ssize_t
+radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+	struct radeon_i2c_chan *chan =
+		container_of(aux, struct radeon_i2c_chan, aux);
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ret = 0, i;
+	uint32_t tmp, ack = 0;
+	int instance = chan->rec.i2c_id & 0xf;
+	u8 byte;
+	u8 *buf = msg->buffer;
+	int retry_count = 0;
+	int bytes;
+	int msize;
+	bool is_write = false;
+
+	if (WARN_ON(msg->size > 16))
+		return -E2BIG;
+
+	switch (msg->request & ~DP_AUX_I2C_MOT) {
+	case DP_AUX_NATIVE_WRITE:
+	case DP_AUX_I2C_WRITE:
+		is_write = true;
+		break;
+	case DP_AUX_NATIVE_READ:
+	case DP_AUX_I2C_READ:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* work out two sizes required */
+	msize = 0;
+	bytes = BARE_ADDRESS_SIZE;
+	if (msg->size) {
+		msize = msg->size - 1;
+		bytes++;
+		if (is_write)
+			bytes += msg->size;
+	}
+
+	mutex_lock(&chan->mutex);
+
+	/* switch the pad to aux mode */
+	tmp = RREG32(chan->rec.mask_clk_reg);
+	tmp |= (1 << 16);
+	WREG32(chan->rec.mask_clk_reg, tmp);
+
+	/* setup AUX control register with correct HPD pin */
+	tmp = RREG32(AUX_CONTROL + aux_offset[instance]);
+
+	tmp &= AUX_HPD_SEL(0x7);
+	tmp |= AUX_HPD_SEL(chan->rec.hpd);
+	tmp |= AUX_EN | AUX_LS_READ_EN;
+
+	WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+
+	/* atombios appears to write this twice lets copy it */
+	WREG32(AUX_SW_CONTROL + aux_offset[instance],
+	       AUX_SW_WR_BYTES(bytes));
+	WREG32(AUX_SW_CONTROL + aux_offset[instance],
+	       AUX_SW_WR_BYTES(bytes));
+
+	/* write the data header into the registers */
+	/* request, address, msg size */
+	byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+	WREG32(AUX_SW_DATA + aux_offset[instance],
+	       AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
+
+	byte = (msg->address >> 8) & 0xff;
+	WREG32(AUX_SW_DATA + aux_offset[instance],
+	       AUX_SW_DATA_MASK(byte));
+
+	byte = msg->address & 0xff;
+	WREG32(AUX_SW_DATA + aux_offset[instance],
+	       AUX_SW_DATA_MASK(byte));
+
+	byte = msize;
+	WREG32(AUX_SW_DATA + aux_offset[instance],
+	       AUX_SW_DATA_MASK(byte));
+
+	/* if we are writing - write the msg buffer */
+	if (is_write) {
+		for (i = 0; i < msg->size; i++) {
+			WREG32(AUX_SW_DATA + aux_offset[instance],
+			       AUX_SW_DATA_MASK(buf[i]));
+		}
+	}
+
+	/* clear the ACK */
+	WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK);
+
+	/* write the size and GO bits */
+	WREG32(AUX_SW_CONTROL + aux_offset[instance],
+	       AUX_SW_WR_BYTES(bytes) | AUX_SW_GO);
+
+	/* poll the status registers - TODO irq support */
+	do {
+		tmp = RREG32(AUX_SW_STATUS + aux_offset[instance]);
+		if (tmp & AUX_SW_DONE) {
+			break;
+		}
+		usleep_range(100, 200);
+	} while (retry_count++ < 1000);
+
+	if (retry_count >= 1000) {
+		DRM_ERROR("auxch hw never signalled completion, error %08x\n", tmp);
+		ret = -EIO;
+		goto done;
+	}
+
+	if (tmp & AUX_SW_RX_TIMEOUT) {
+		ret = -ETIMEDOUT;
+		goto done;
+	}
+	if (tmp & AUX_RX_ERROR_FLAGS) {
+		DRM_DEBUG_KMS_RATELIMITED("dp_aux_ch flags not zero: %08x\n",
+					  tmp);
+		ret = -EIO;
+		goto done;
+	}
+
+	bytes = AUX_SW_REPLY_GET_BYTE_COUNT(tmp);
+	if (bytes) {
+		WREG32(AUX_SW_DATA + aux_offset[instance],
+		       AUX_SW_DATA_RW | AUX_SW_AUTOINCREMENT_DISABLE);
+
+		tmp = RREG32(AUX_SW_DATA + aux_offset[instance]);
+		ack = (tmp >> 8) & 0xff;
+
+		for (i = 0; i < bytes - 1; i++) {
+			tmp = RREG32(AUX_SW_DATA + aux_offset[instance]);
+			if (buf)
+				buf[i] = (tmp >> 8) & 0xff;
+		}
+		if (buf)
+			ret = bytes - 1;
+	}
+
+	WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK);
+
+	if (is_write)
+		ret = msg->size;
+done:
+	mutex_unlock(&chan->mutex);
+
+	if (ret >= 0)
+		msg->reply = ack >> 4;
+	return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
new file mode 100644
index 0000000..f920be2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drmP.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "radeon.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+static struct radeon_encoder *radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector);
+
+static int radeon_atom_set_enc_offset(int id)
+{
+	static const int offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET,
+				       EVERGREEN_CRTC1_REGISTER_OFFSET,
+				       EVERGREEN_CRTC2_REGISTER_OFFSET,
+				       EVERGREEN_CRTC3_REGISTER_OFFSET,
+				       EVERGREEN_CRTC4_REGISTER_OFFSET,
+				       EVERGREEN_CRTC5_REGISTER_OFFSET,
+				       0x13830 - 0x7030 };
+
+	return offsets[id];
+}
+
+static int radeon_dp_mst_set_be_cntl(struct radeon_encoder *primary,
+				     struct radeon_encoder_mst *mst_enc,
+				     enum radeon_hpd_id hpd, bool enable)
+{
+	struct drm_device *dev = primary->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t reg;
+	int retries = 0;
+	uint32_t temp;
+
+	reg = RREG32(NI_DIG_BE_CNTL + primary->offset);
+
+	/* set MST mode */
+	reg &= ~NI_DIG_FE_DIG_MODE(7);
+	reg |= NI_DIG_FE_DIG_MODE(NI_DIG_MODE_DP_MST);
+
+	if (enable)
+		reg |= NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
+	else
+		reg &= ~NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
+
+	reg |= NI_DIG_HPD_SELECT(hpd);
+	DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DIG_BE_CNTL + primary->offset, reg);
+	WREG32(NI_DIG_BE_CNTL + primary->offset, reg);
+
+	if (enable) {
+		uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
+
+		do {
+			temp = RREG32(NI_DIG_FE_CNTL + offset);
+		} while ((temp & NI_DIG_SYMCLK_FE_ON) && retries++ < 10000);
+		if (retries == 10000)
+			DRM_ERROR("timed out waiting for FE %d %d\n", primary->offset, mst_enc->fe);
+	}
+	return 0;
+}
+
+static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
+					   int stream_number,
+					   int fe,
+					   int slots)
+{
+	struct drm_device *dev = primary->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u32 temp, val;
+	int retries  = 0;
+	int satreg, satidx;
+
+	satreg = stream_number >> 1;
+	satidx = stream_number & 1;
+
+	temp = RREG32(NI_DP_MSE_SAT0 + satreg + primary->offset);
+
+	val = NI_DP_MSE_SAT_SLOT_COUNT0(slots) | NI_DP_MSE_SAT_SRC0(fe);
+
+	val <<= (16 * satidx);
+
+	temp &= ~(0xffff << (16 * satidx));
+
+	temp |= val;
+
+	DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
+	WREG32(NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
+
+	WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
+
+	do {
+		unsigned value1, value2;
+		udelay(10);
+		temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
+
+		value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+		value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+		if (!value1 && !value2)
+			break;
+	} while (retries++ < 50);
+
+	if (retries == 10000)
+		DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
+
+	/* MTP 16 ? */
+	return 0;
+}
+
+static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn,
+					       struct radeon_encoder *primary)
+{
+	struct drm_device *dev = mst_conn->base.dev;
+	struct stream_attribs new_attribs[6];
+	int i;
+	int idx = 0;
+	struct radeon_connector *radeon_connector;
+	struct drm_connector *connector;
+
+	memset(new_attribs, 0, sizeof(new_attribs));
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_encoder *subenc;
+		struct radeon_encoder_mst *mst_enc;
+
+		radeon_connector = to_radeon_connector(connector);
+		if (!radeon_connector->is_mst_connector)
+			continue;
+
+		if (radeon_connector->mst_port != mst_conn)
+			continue;
+
+		subenc = radeon_connector->mst_encoder;
+		mst_enc = subenc->enc_priv;
+
+		if (!mst_enc->enc_active)
+			continue;
+
+		new_attribs[idx].fe = mst_enc->fe;
+		new_attribs[idx].slots = drm_dp_mst_get_vcpi_slots(&mst_conn->mst_mgr, mst_enc->port);
+		idx++;
+	}
+
+	for (i = 0; i < idx; i++) {
+		if (new_attribs[i].fe != mst_conn->cur_stream_attribs[i].fe ||
+		    new_attribs[i].slots != mst_conn->cur_stream_attribs[i].slots) {
+			radeon_dp_mst_set_stream_attrib(primary, i, new_attribs[i].fe, new_attribs[i].slots);
+			mst_conn->cur_stream_attribs[i].fe = new_attribs[i].fe;
+			mst_conn->cur_stream_attribs[i].slots = new_attribs[i].slots;
+		}
+	}
+
+	for (i = idx; i < mst_conn->enabled_attribs; i++) {
+		radeon_dp_mst_set_stream_attrib(primary, i, 0, 0);
+		mst_conn->cur_stream_attribs[i].fe = 0;
+		mst_conn->cur_stream_attribs[i].slots = 0;
+	}
+	mst_conn->enabled_attribs = idx;
+	return 0;
+}
+
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
+{
+	struct drm_device *dev = mst->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_mst *mst_enc = mst->enc_priv;
+	uint32_t val, temp;
+	uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
+	int retries = 0;
+	uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+	uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
+
+	val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
+
+	WREG32(NI_DP_MSE_RATE_CNTL + offset, val);
+
+	do {
+		temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+		udelay(10);
+	} while ((temp & 0x1) && (retries++ < 10000));
+
+	if (retries >= 10000)
+		DRM_ERROR("timed out wait for rate cntl %d\n", mst_enc->fe);
+	return 0;
+}
+
+static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector *master = radeon_connector->mst_port;
+	struct edid *edid;
+	int ret = 0;
+
+	edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, radeon_connector->port);
+	radeon_connector->edid = edid;
+	DRM_DEBUG_KMS("edid retrieved %p\n", edid);
+	if (radeon_connector->edid) {
+		drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+		return ret;
+	}
+	drm_connector_update_edid_property(&radeon_connector->base, NULL);
+
+	return ret;
+}
+
+static int radeon_dp_mst_get_modes(struct drm_connector *connector)
+{
+	return radeon_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+radeon_dp_mst_mode_valid(struct drm_connector *connector,
+			struct drm_display_mode *mode)
+{
+	/* TODO - validate mode against available PBN for link */
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_H_ILLEGAL;
+
+	return MODE_OK;
+}
+
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	return &radeon_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
+	.get_modes = radeon_dp_mst_get_modes,
+	.mode_valid = radeon_dp_mst_mode_valid,
+	.best_encoder = radeon_mst_best_encoder,
+};
+
+static enum drm_connector_status
+radeon_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector *master = radeon_connector->mst_port;
+
+	return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
+}
+
+static void
+radeon_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_encoder *radeon_encoder = radeon_connector->mst_encoder;
+
+	drm_encoder_cleanup(&radeon_encoder->base);
+	kfree(radeon_encoder);
+	drm_connector_cleanup(connector);
+	kfree(radeon_connector);
+}
+
+static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_mst_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = radeon_dp_mst_connector_destroy,
+};
+
+static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+							 struct drm_dp_mst_port *port,
+							 const char *pathprop)
+{
+	struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+	struct drm_device *dev = master->base.dev;
+	struct radeon_connector *radeon_connector;
+	struct drm_connector *connector;
+
+	radeon_connector = kzalloc(sizeof(*radeon_connector), GFP_KERNEL);
+	if (!radeon_connector)
+		return NULL;
+
+	radeon_connector->is_mst_connector = true;
+	connector = &radeon_connector->base;
+	radeon_connector->port = port;
+	radeon_connector->mst_port = master;
+	DRM_DEBUG_KMS("\n");
+
+	drm_connector_init(dev, connector, &radeon_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+	drm_connector_helper_add(connector, &radeon_dp_mst_connector_helper_funcs);
+	radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
+
+	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+	drm_connector_set_path_property(connector, pathprop);
+
+	return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	radeon_fb_add_connector(rdev, connector);
+
+	drm_connector_register(connector);
+}
+
+static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+					    struct drm_connector *connector)
+{
+	struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+	struct drm_device *dev = master->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	drm_connector_unregister(connector);
+	radeon_fb_remove_connector(rdev, connector);
+	drm_connector_cleanup(connector);
+
+	kfree(connector);
+	DRM_DEBUG_KMS("\n");
+}
+
+static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+	struct drm_device *dev = master->base.dev;
+
+	drm_kms_helper_hotplug_event(dev);
+}
+
+static const struct drm_dp_mst_topology_cbs mst_cbs = {
+	.add_connector = radeon_dp_add_mst_connector,
+	.register_connector = radeon_dp_register_mst_connector,
+	.destroy_connector = radeon_dp_destroy_mst_connector,
+	.hotplug = radeon_dp_mst_hotplug,
+};
+
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (!connector->encoder)
+			continue;
+		if (!radeon_connector->is_mst_connector)
+			continue;
+
+		DRM_DEBUG_KMS("checking %p vs %p\n", connector->encoder, encoder);
+		if (connector->encoder == encoder)
+			return radeon_connector;
+	}
+	return NULL;
+}
+
+void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder);
+	struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
+	struct radeon_connector *radeon_connector = radeon_mst_find_connector(&radeon_encoder->base);
+	int dp_clock;
+	struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
+
+	if (radeon_connector) {
+		radeon_connector->pixelclock_for_modeset = mode->clock;
+		if (radeon_connector->base.display_info.bpc)
+			radeon_crtc->bpc = radeon_connector->base.display_info.bpc;
+		else
+			radeon_crtc->bpc = 8;
+	}
+
+	DRM_DEBUG_KMS("dp_clock %p %d\n", dig_connector, dig_connector->dp_clock);
+	dp_clock = dig_connector->dp_clock;
+	radeon_crtc->ss_enabled =
+		radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
+						 ASIC_INTERNAL_SS_ON_DP,
+						 dp_clock);
+}
+
+static void
+radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder, *primary;
+	struct radeon_encoder_mst *mst_enc;
+	struct radeon_encoder_atom_dig *dig_enc;
+	struct radeon_connector *radeon_connector;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	int ret, slots;
+	s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
+	if (!ASIC_IS_DCE5(rdev)) {
+		DRM_ERROR("got mst dpms on non-DCE5\n");
+		return;
+	}
+
+	radeon_connector = radeon_mst_find_connector(encoder);
+	if (!radeon_connector)
+		return;
+
+	radeon_encoder = to_radeon_encoder(encoder);
+
+	mst_enc = radeon_encoder->enc_priv;
+
+	primary = mst_enc->primary;
+
+	dig_enc = primary->enc_priv;
+
+	crtc = encoder->crtc;
+	DRM_DEBUG_KMS("got connector %d\n", dig_enc->active_mst_links);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		dig_enc->active_mst_links++;
+
+		radeon_crtc = to_radeon_crtc(crtc);
+
+		if (dig_enc->active_mst_links == 1) {
+			mst_enc->fe = dig_enc->dig_encoder;
+			mst_enc->fe_from_be = true;
+			atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
+
+			atombios_dig_encoder_setup(&primary->base, ATOM_ENCODER_CMD_SETUP, 0);
+			atombios_dig_transmitter_setup2(&primary->base, ATOM_TRANSMITTER_ACTION_ENABLE,
+							0, 0, dig_enc->dig_encoder);
+
+			if (radeon_dp_needs_link_train(mst_enc->connector) ||
+			    dig_enc->active_mst_links == 1) {
+				radeon_dp_link_train(&primary->base, &mst_enc->connector->base);
+			}
+
+		} else {
+			mst_enc->fe = radeon_atom_pick_dig_encoder(encoder, radeon_crtc->crtc_id);
+			if (mst_enc->fe == -1)
+				DRM_ERROR("failed to get frontend for dig encoder\n");
+			mst_enc->fe_from_be = false;
+			atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
+		}
+
+		DRM_DEBUG_KMS("dig encoder is %d %d %d\n", dig_enc->dig_encoder,
+			      dig_enc->linkb, radeon_crtc->crtc_id);
+
+		slots = drm_dp_find_vcpi_slots(&radeon_connector->mst_port->mst_mgr,
+					       mst_enc->pbn);
+		ret = drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
+					       radeon_connector->port,
+					       mst_enc->pbn, slots);
+		ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+
+		radeon_dp_mst_set_be_cntl(primary, mst_enc,
+					  radeon_connector->mst_port->hpd.hpd, true);
+
+		mst_enc->enc_active = true;
+		radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
+
+		fixed_pbn = drm_int2fixp(mst_enc->pbn);
+		fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+		avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+		radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
+
+		atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
+					    mst_enc->fe);
+		ret = drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
+
+		ret = drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
+
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		DRM_ERROR("DPMS OFF %d\n", dig_enc->active_mst_links);
+
+		if (!mst_enc->enc_active)
+			return;
+
+		drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
+		ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+
+		drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
+		/* and this can also fail */
+		drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
+
+		drm_dp_mst_deallocate_vcpi(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
+
+		mst_enc->enc_active = false;
+		radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
+
+		radeon_dp_mst_set_be_cntl(primary, mst_enc,
+					  radeon_connector->mst_port->hpd.hpd, false);
+		atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0,
+					    mst_enc->fe);
+
+		if (!mst_enc->fe_from_be)
+			radeon_atom_release_dig_encoder(rdev, mst_enc->fe);
+
+		mst_enc->fe_from_be = false;
+		dig_enc->active_mst_links--;
+		if (dig_enc->active_mst_links == 0) {
+			/* drop link */
+		}
+
+		break;
+	}
+
+}
+
+static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder_mst *mst_enc;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector_atom_dig *dig_connector;
+	int bpp = 24;
+
+	mst_enc = radeon_encoder->enc_priv;
+
+	mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+	mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
+	DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+		      mst_enc->primary->active_device, mst_enc->primary->devices,
+		      mst_enc->connector->devices, mst_enc->primary->base.encoder_type);
+
+
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+	dig_connector = mst_enc->connector->con_priv;
+	dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+	dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
+	DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+		      dig_connector->dp_lane_count, dig_connector->dp_clock);
+	return true;
+}
+
+static void radeon_mst_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_connector *radeon_connector;
+	struct radeon_encoder *radeon_encoder, *primary;
+	struct radeon_encoder_mst *mst_enc;
+	struct radeon_encoder_atom_dig *dig_enc;
+
+	radeon_connector = radeon_mst_find_connector(encoder);
+	if (!radeon_connector) {
+		DRM_DEBUG_KMS("failed to find connector %p\n", encoder);
+		return;
+	}
+	radeon_encoder = to_radeon_encoder(encoder);
+
+	radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	mst_enc = radeon_encoder->enc_priv;
+
+	primary = mst_enc->primary;
+
+	dig_enc = primary->enc_priv;
+
+	mst_enc->port = radeon_connector->port;
+
+	if (dig_enc->dig_encoder == -1) {
+		dig_enc->dig_encoder = radeon_atom_pick_dig_encoder(&primary->base, -1);
+		primary->offset = radeon_atom_set_enc_offset(dig_enc->dig_encoder);
+		atombios_set_mst_encoder_crtc_source(encoder, dig_enc->dig_encoder);
+
+
+	}
+	DRM_DEBUG_KMS("%d %d\n", dig_enc->dig_encoder, primary->offset);
+}
+
+static void
+radeon_mst_encoder_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG_KMS("\n");
+}
+
+static void radeon_mst_encoder_commit(struct drm_encoder *encoder)
+{
+	radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+	DRM_DEBUG_KMS("\n");
+}
+
+static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
+	.dpms = radeon_mst_encoder_dpms,
+	.mode_fixup = radeon_mst_mode_fixup,
+	.prepare = radeon_mst_encoder_prepare,
+	.mode_set = radeon_mst_encoder_mode_set,
+	.commit = radeon_mst_encoder_commit,
+};
+
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_funcs radeon_dp_mst_enc_funcs = {
+	.destroy = radeon_dp_mst_encoder_destroy,
+};
+
+static struct radeon_encoder *
+radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_mst *mst_enc;
+	struct drm_encoder *encoder;
+	const struct drm_connector_helper_funcs *connector_funcs = connector->base.helper_private;
+	struct drm_encoder *enc_master = connector_funcs->best_encoder(&connector->base);
+
+	DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+	radeon_encoder = kzalloc(sizeof(*radeon_encoder), GFP_KERNEL);
+	if (!radeon_encoder)
+		return NULL;
+
+	radeon_encoder->enc_priv = kzalloc(sizeof(*mst_enc), GFP_KERNEL);
+	if (!radeon_encoder->enc_priv) {
+		kfree(radeon_encoder);
+		return NULL;
+	}
+	encoder = &radeon_encoder->base;
+	switch (rdev->num_crtc) {
+	case 1:
+		encoder->possible_crtcs = 0x1;
+		break;
+	case 2:
+	default:
+		encoder->possible_crtcs = 0x3;
+		break;
+	case 4:
+		encoder->possible_crtcs = 0xf;
+		break;
+	case 6:
+		encoder->possible_crtcs = 0x3f;
+		break;
+	}
+
+	drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
+			 DRM_MODE_ENCODER_DPMST, NULL);
+	drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
+
+	mst_enc = radeon_encoder->enc_priv;
+	mst_enc->connector = connector;
+	mst_enc->primary = to_radeon_encoder(enc_master);
+	radeon_encoder->is_mst_encoder = true;
+	return radeon_encoder;
+}
+
+int
+radeon_dp_mst_init(struct radeon_connector *radeon_connector)
+{
+	struct drm_device *dev = radeon_connector->base.dev;
+
+	if (!radeon_connector->ddc_bus->has_aux)
+		return 0;
+
+	radeon_connector->mst_mgr.cbs = &mst_cbs;
+	return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev,
+					    &radeon_connector->ddc_bus->aux, 16, 6,
+					    radeon_connector->base.base.id);
+}
+
+int
+radeon_dp_mst_probe(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ret;
+	u8 msg[1];
+
+	if (!radeon_mst)
+		return 0;
+
+	if (!ASIC_IS_DCE5(rdev))
+		return 0;
+
+	if (dig_connector->dpcd[DP_DPCD_REV] < 0x12)
+		return 0;
+
+	ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_MSTM_CAP, msg,
+			       1);
+	if (ret) {
+		if (msg[0] & DP_MST_CAP) {
+			DRM_DEBUG_KMS("Sink is MST capable\n");
+			dig_connector->is_mst = true;
+		} else {
+			DRM_DEBUG_KMS("Sink is not MST capable\n");
+			dig_connector->is_mst = false;
+		}
+
+	}
+	drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
+					dig_connector->is_mst);
+	return dig_connector->is_mst;
+}
+
+int
+radeon_dp_mst_check_status(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	int retry;
+
+	if (dig_connector->is_mst) {
+		u8 esi[16] = { 0 };
+		int dret;
+		int ret = 0;
+		bool handled;
+
+		dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
+				       DP_SINK_COUNT_ESI, esi, 8);
+go_again:
+		if (dret == 8) {
+			DRM_DEBUG_KMS("got esi %3ph\n", esi);
+			ret = drm_dp_mst_hpd_irq(&radeon_connector->mst_mgr, esi, &handled);
+
+			if (handled) {
+				for (retry = 0; retry < 3; retry++) {
+					int wret;
+					wret = drm_dp_dpcd_write(&radeon_connector->ddc_bus->aux,
+								 DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+					if (wret == 3)
+						break;
+				}
+
+				dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
+							DP_SINK_COUNT_ESI, esi, 8);
+				if (dret == 8) {
+					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
+					goto go_again;
+				}
+			} else
+				ret = 0;
+
+			return ret;
+		} else {
+			DRM_DEBUG_KMS("failed to get ESI - device may have failed %d\n", ret);
+			dig_connector->is_mst = false;
+			drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
+							dig_connector->is_mst);
+			/* send a hotplug event */
+		}
+	}
+	return -EINVAL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_mst_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+	int i;
+
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		radeon_connector = to_radeon_connector(connector);
+		dig_connector = radeon_connector->con_priv;
+		if (radeon_connector->is_mst_connector)
+			continue;
+		if (!dig_connector->is_mst)
+			continue;
+		drm_dp_mst_dump_topology(m, &radeon_connector->mst_mgr);
+
+		for (i = 0; i < radeon_connector->enabled_attribs; i++)
+			seq_printf(m, "attrib %d: %d %d\n", i,
+				   radeon_connector->cur_stream_attribs[i].fe,
+				   radeon_connector->cur_stream_attribs[i].slots);
+	}
+	drm_modeset_unlock_all(dev);
+	return 0;
+}
+
+static struct drm_info_list radeon_debugfs_mst_list[] = {
+	{"radeon_mst_info", &radeon_debugfs_mst_info, 0, NULL},
+};
+#endif
+
+int radeon_mst_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_mst_list, 1);
+#endif
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
new file mode 100644
index 0000000..2a7977a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -0,0 +1,651 @@
+/**
+ * \file radeon_drv.c
+ * ATI Radeon driver
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+#include <drm/drm_pciids.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/compat.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_fb_helper.h>
+
+#include <drm/drm_crtc_helper.h>
+
+/*
+ * KMS wrapper.
+ * - 2.0.0 - initial interface
+ * - 2.1.0 - add square tiling interface
+ * - 2.2.0 - add r6xx/r7xx const buffer support
+ * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
+ * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+ *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ *   2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
+ *   2.10.0 - fusion 2D tiling
+ *   2.11.0 - backend map, initial compute support for the CS checker
+ *   2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ *   2.13.0 - virtual memory support, streamout
+ *   2.14.0 - add evergreen tiling informations
+ *   2.15.0 - add max_pipes query
+ *   2.16.0 - fix evergreen 2D tiled surface calculation
+ *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ *   2.18.0 - r600-eg: allow "invalid" DB formats
+ *   2.19.0 - r600-eg: MSAA textures
+ *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
+ *   2.21.0 - r600-r700: FMASK and CMASK
+ *   2.22.0 - r600 only: RESOLVE_BOX allowed
+ *   2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
+ *   2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ *   2.25.0 - eg+: new info request for num SE and num SH
+ *   2.26.0 - r600-eg: fix htile size computation
+ *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ *   2.28.0 - r600-eg: Add MEM_WRITE packet support
+ *   2.29.0 - R500 FP16 color clear registers
+ *   2.30.0 - fix for FMASK texturing
+ *   2.31.0 - Add fastfb support for rs690
+ *   2.32.0 - new info request for rings working
+ *   2.33.0 - Add SI tiling mode array query
+ *   2.34.0 - Add CIK tiling mode array query
+ *   2.35.0 - Add CIK macrotile mode array query
+ *   2.36.0 - Fix CIK DCE tiling setup
+ *   2.37.0 - allow GS ring setup on r6xx/r7xx
+ *   2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
+ *            CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
+ *   2.39.0 - Add INFO query for number of active CUs
+ *   2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
+ *            CS to GPU on >= r600
+ *   2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support
+ *   2.42.0 - Add VCE/VUI (Video Usability Information) support
+ *   2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
+ *   2.44.0 - SET_APPEND_CNT packet3 support
+ *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
+ *   2.46.0 - Add PFP_SYNC_ME support on evergreen
+ *   2.47.0 - Add UVD_NO_OP register support
+ *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
+ *   2.50.0 - Allows unaligned shader loads on CIK. (needed by OpenGL)
+ */
+#define KMS_DRIVER_MAJOR	2
+#define KMS_DRIVER_MINOR	50
+#define KMS_DRIVER_PATCHLEVEL	0
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+void radeon_driver_unload_kms(struct drm_device *dev);
+void radeon_driver_lastclose_kms(struct drm_device *dev);
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
+void radeon_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv);
+int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+		       bool fbcon, bool freeze);
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
+int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
+void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+					struct drm_gem_object *gobj,
+					int flags);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
+				      unsigned int flags, int *vpos, int *hpos,
+				      ktime_t *stime, ktime_t *etime,
+				      const struct drm_display_mode *mode);
+extern bool radeon_is_px(struct drm_device *dev);
+extern const struct drm_ioctl_desc radeon_ioctls_kms[];
+extern int radeon_max_kms_ioctl;
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+							struct dma_buf_attachment *,
+							struct sg_table *sg);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void radeon_gem_prime_unpin(struct drm_gem_object *obj);
+struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
+int radeon_no_wb;
+int radeon_modeset = -1;
+int radeon_dynclks = -1;
+int radeon_r4xx_atom = 0;
+#ifdef __powerpc__
+/* Default to PCI on PowerPC (fdo #95017) */
+int radeon_agpmode = -1;
+#else
+int radeon_agpmode = 0;
+#endif
+int radeon_vram_limit = 0;
+int radeon_gart_size = -1; /* auto */
+int radeon_benchmarking = 0;
+int radeon_testing = 0;
+int radeon_connector_table = 0;
+int radeon_tv = 1;
+int radeon_audio = -1;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = -1;
+int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
+int radeon_fastfb = 0;
+int radeon_dpm = -1;
+int radeon_aspm = -1;
+int radeon_runtime_pm = -1;
+int radeon_hard_reset = 0;
+int radeon_vm_size = 8;
+int radeon_vm_block_size = -1;
+int radeon_deep_color = 0;
+int radeon_use_pflipirq = 2;
+int radeon_bapm = -1;
+int radeon_backlight = -1;
+int radeon_auxch = -1;
+int radeon_mst = 0;
+int radeon_uvd = 1;
+int radeon_vce = 1;
+
+MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
+module_param_named(no_wb, radeon_no_wb, int, 0444);
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, radeon_modeset, int, 0400);
+
+MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
+module_param_named(dynclks, radeon_dynclks, int, 0444);
+
+MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
+module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
+
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
+module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+
+MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+module_param_named(agpmode, radeon_agpmode, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
+module_param_named(gartsize, radeon_gart_size, int, 0600);
+
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, radeon_benchmarking, int, 0444);
+
+MODULE_PARM_DESC(test, "Run tests");
+module_param_named(test, radeon_testing, int, 0444);
+
+MODULE_PARM_DESC(connector_table, "Force connector table");
+module_param_named(connector_table, radeon_connector_table, int, 0444);
+
+MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
+module_param_named(tv, radeon_tv, int, 0444);
+
+MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
+MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
+module_param_named(fastfb, radeon_fastfb, int, 0444);
+
+MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(dpm, radeon_dpm, int, 0444);
+
+MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(aspm, radeon_aspm, int, 0444);
+
+MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+module_param_named(runpm, radeon_runtime_pm, int, 0444);
+
+MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
+module_param_named(hard_reset, radeon_hard_reset, int, 0444);
+
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
+module_param_named(vm_size, radeon_vm_size, int, 0444);
+
+MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
+module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
+
+MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+module_param_named(deep_color, radeon_deep_color, int, 0444);
+
+MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
+module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
+
+MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(bapm, radeon_bapm, int, 0444);
+
+MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(backlight, radeon_backlight, int, 0444);
+
+MODULE_PARM_DESC(auxch, "Use native auxch experimental support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(auxch, radeon_auxch, int, 0444);
+
+MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)");
+module_param_named(mst, radeon_mst, int, 0444);
+
+MODULE_PARM_DESC(uvd, "uvd enable/disable uvd support (1 = enable, 0 = disable)");
+module_param_named(uvd, radeon_uvd, int, 0444);
+
+MODULE_PARM_DESC(vce, "vce enable/disable vce support (1 = enable, 0 = disable)");
+module_param_named(vce, radeon_vce, int, 0444);
+
+int radeon_si_support = 1;
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
+module_param_named(si_support, radeon_si_support, int, 0444);
+
+int radeon_cik_support = 1;
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
+module_param_named(cik_support, radeon_cik_support, int, 0444);
+
+static struct pci_device_id pciidlist[] = {
+	radeon_PCI_IDS
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static struct drm_driver kms_driver;
+
+bool radeon_device_is_virtual(void);
+
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
+	kfree(ap);
+
+	return 0;
+}
+
+static int radeon_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	int ret;
+
+	if (vga_switcheroo_client_probe_defer(pdev))
+		return -EPROBE_DEFER;
+
+	/* Get rid of things like offb */
+	ret = radeon_kick_out_firmware_fb(pdev);
+	if (ret)
+		return ret;
+
+	return drm_get_pci_dev(pdev, ent, &kms_driver);
+}
+
+static void
+radeon_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+	/* if we are running in a VM, make sure the device
+	 * torn down properly on reboot/shutdown
+	 */
+	if (radeon_device_is_virtual())
+		radeon_pci_remove(pdev);
+}
+
+static int radeon_pmops_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_suspend_kms(drm_dev, true, true, false);
+}
+
+static int radeon_pmops_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	/* GPU comes up enabled by the bios on resume */
+	if (radeon_is_px(drm_dev)) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
+
+	return radeon_resume_kms(drm_dev, true, true);
+}
+
+static int radeon_pmops_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_suspend_kms(drm_dev, false, true, true);
+}
+
+static int radeon_pmops_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_resume_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (!radeon_is_px(drm_dev)) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+	drm_kms_helper_poll_disable(drm_dev);
+
+	ret = radeon_suspend_kms(drm_dev, false, false, false);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_ignore_hotplug(pdev);
+	if (radeon_is_atpx_hybrid())
+		pci_set_power_state(pdev, PCI_D3cold);
+	else if (!radeon_has_atpx_dgpu_power_cntl())
+		pci_set_power_state(pdev, PCI_D3hot);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+
+	return 0;
+}
+
+static int radeon_pmops_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (!radeon_is_px(drm_dev))
+		return -EINVAL;
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+	if (radeon_is_atpx_hybrid() ||
+	    !radeon_has_atpx_dgpu_power_cntl())
+		pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+	pci_set_master(pdev);
+
+	ret = radeon_resume_kms(drm_dev, false, false);
+	drm_kms_helper_poll_enable(drm_dev);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	return 0;
+}
+
+static int radeon_pmops_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct drm_crtc *crtc;
+
+	if (!radeon_is_px(drm_dev)) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
+
+	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+		if (crtc->enabled) {
+			DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+			return -EBUSY;
+		}
+	}
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_autosuspend(dev);
+	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+	return 1;
+}
+
+long radeon_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev;
+	long ret;
+	dev = file_priv->minor->dev;
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_ioctl(filp, cmd, arg);
+	
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	ret = radeon_drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
+#endif
+
+static const struct dev_pm_ops radeon_pm_ops = {
+	.suspend = radeon_pmops_suspend,
+	.resume = radeon_pmops_resume,
+	.freeze = radeon_pmops_freeze,
+	.thaw = radeon_pmops_thaw,
+	.poweroff = radeon_pmops_freeze,
+	.restore = radeon_pmops_resume,
+	.runtime_suspend = radeon_pmops_runtime_suspend,
+	.runtime_resume = radeon_pmops_runtime_resume,
+	.runtime_idle = radeon_pmops_runtime_idle,
+};
+
+static const struct file_operations radeon_driver_kms_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = radeon_drm_ioctl,
+	.mmap = radeon_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
+static bool
+radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
+				 bool in_vblank_irq, int *vpos, int *hpos,
+				 ktime_t *stime, ktime_t *etime,
+				 const struct drm_display_mode *mode)
+{
+	return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+					  stime, etime, mode);
+}
+
+static struct drm_driver kms_driver = {
+	.driver_features =
+	    DRIVER_USE_AGP |
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+	    DRIVER_PRIME | DRIVER_RENDER,
+	.load = radeon_driver_load_kms,
+	.open = radeon_driver_open_kms,
+	.postclose = radeon_driver_postclose_kms,
+	.lastclose = radeon_driver_lastclose_kms,
+	.unload = radeon_driver_unload_kms,
+	.get_vblank_counter = radeon_get_vblank_counter_kms,
+	.enable_vblank = radeon_enable_vblank_kms,
+	.disable_vblank = radeon_disable_vblank_kms,
+	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
+	.get_scanout_position = radeon_get_crtc_scanout_position,
+	.irq_preinstall = radeon_driver_irq_preinstall_kms,
+	.irq_postinstall = radeon_driver_irq_postinstall_kms,
+	.irq_uninstall = radeon_driver_irq_uninstall_kms,
+	.irq_handler = radeon_driver_irq_handler_kms,
+	.ioctls = radeon_ioctls_kms,
+	.gem_free_object_unlocked = radeon_gem_object_free,
+	.gem_open_object = radeon_gem_object_open,
+	.gem_close_object = radeon_gem_object_close,
+	.dumb_create = radeon_mode_dumb_create,
+	.dumb_map_offset = radeon_mode_dumb_mmap,
+	.fops = &radeon_driver_kms_fops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = radeon_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_pin = radeon_gem_prime_pin,
+	.gem_prime_unpin = radeon_gem_prime_unpin,
+	.gem_prime_res_obj = radeon_gem_prime_res_obj,
+	.gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+	.gem_prime_vmap = radeon_gem_prime_vmap,
+	.gem_prime_vunmap = radeon_gem_prime_vunmap,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = KMS_DRIVER_MAJOR,
+	.minor = KMS_DRIVER_MINOR,
+	.patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
+static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+static struct pci_driver radeon_kms_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = radeon_pci_probe,
+	.remove = radeon_pci_remove,
+	.shutdown = radeon_pci_shutdown,
+	.driver.pm = &radeon_pm_ops,
+};
+
+static int __init radeon_init(void)
+{
+	if (vgacon_text_force() && radeon_modeset == -1) {
+		DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+		radeon_modeset = 0;
+	}
+	/* set to modesetting by default if not nomodeset */
+	if (radeon_modeset == -1)
+		radeon_modeset = 1;
+
+	if (radeon_modeset == 1) {
+		DRM_INFO("radeon kernel modesetting enabled.\n");
+		driver = &kms_driver;
+		pdriver = &radeon_kms_pci_driver;
+		driver->driver_features |= DRIVER_MODESET;
+		driver->num_ioctls = radeon_max_kms_ioctl;
+		radeon_register_atpx_handler();
+
+	} else {
+		DRM_ERROR("No UMS support in radeon module!\n");
+		return -EINVAL;
+	}
+
+	return pci_register_driver(pdriver);
+}
+
+static void __exit radeon_exit(void)
+{
+	pci_unregister_driver(pdriver);
+	radeon_unregister_atpx_handler();
+}
+
+module_init(radeon_init);
+module_exit(radeon_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
new file mode 100644
index 0000000..afef2d9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -0,0 +1,122 @@
+/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __RADEON_DRV_H__
+#define __RADEON_DRV_H__
+
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <drm/drm_legacy.h>
+
+#include <drm/ati_pcigart.h>
+#include "radeon_family.h"
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Gareth Hughes, Keith Whitwell, others."
+
+#define DRIVER_NAME		"radeon"
+#define DRIVER_DESC		"ATI Radeon"
+#define DRIVER_DATE		"20080528"
+
+/* Interface history:
+ *
+ * 1.1 - ??
+ * 1.2 - Add vertex2 ioctl (keith)
+ *     - Add stencil capability to clear ioctl (gareth, keith)
+ *     - Increase MAX_TEXTURE_LEVELS (brian)
+ * 1.3 - Add cmdbuf ioctl (keith)
+ *     - Add support for new radeon packets (keith)
+ *     - Add getparam ioctl (keith)
+ *     - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
+ * 1.4 - Add scratch registers to get_param ioctl.
+ * 1.5 - Add r200 packets to cmdbuf ioctl
+ *     - Add r200 function to init ioctl
+ *     - Add 'scalar2' instruction to cmdbuf
+ * 1.6 - Add static GART memory manager
+ *       Add irq handler (won't be turned on unless X server knows to)
+ *       Add irq ioctls and irq_active getparam.
+ *       Add wait command for cmdbuf ioctl
+ *       Add GART offset query for getparam
+ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
+ *       and R200_PP_CUBIC_OFFSET_F1_[0..5].
+ *       Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
+ *       R200_EMIT_PP_CUBIC_OFFSETS_[0..5].  (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
+ *       Add 'GET' queries for starting additional clients on different VT's.
+ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
+ *       Add texture rectangle support for r100.
+ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
+ *       clients use to tell the DRM where they think the framebuffer is
+ *       located in the card's address space
+ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
+ *       and GL_EXT_blend_[func|equation]_separate on r200
+ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
+ *       (No 3D support yet - just microcode loading).
+ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
+ *     - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ *     - Add R100/R200 surface allocation/free support
+ * 1.15- Add support for texture micro tiling
+ *     - Add support for r100 cube maps
+ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
+ *       texture filtering on r200
+ * 1.17- Add initial support for R300 (3D).
+ * 1.18- Add support for GL_ATI_fragment_shader, new packets
+ *       R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
+ *       R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
+ *       (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
+ * 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
+ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
+ * 1.23- Add new radeon memory map work from benh
+ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
+ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
+ *       new packet type)
+ * 1.26- Add support for variable size PCI(E) gart aperture
+ * 1.27- Add support for IGP GART
+ * 1.28- Add support for VBL on CRTC2
+ * 1.29- R500 3D cmd buffer support
+ * 1.30- Add support for occlusion queries
+ * 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
+ * 1.33- Add r6xx/r7xx const buffer support
+ * 1.34- fix evergreen/cayman GS register
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		34
+#define DRIVER_PATCHLEVEL	0
+
+long radeon_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg);
+
+#endif				/* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
new file mode 100644
index 0000000..c341fb2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern void
+radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+			     struct drm_connector *drm_connector);
+extern void
+radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+			   struct drm_connector *drm_connector);
+
+
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *clone_encoder;
+	uint32_t index_mask = 0;
+	int count;
+
+	/* DIG routing gets problematic */
+	if (rdev->family >= CHIP_R600)
+		return index_mask;
+	/* LVDS/TV are too wacky */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return index_mask;
+	/* DVO requires 2x ppll clocks depending on tmds chip */
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+		return index_mask;
+
+	count = -1;
+	list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+		count++;
+
+		if (clone_encoder == encoder)
+			continue;
+		if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			continue;
+		if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+			continue;
+		else
+			index_mask |= (1 << count);
+	}
+	return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		encoder->possible_clones = radeon_encoder_clones(encoder);
+	}
+}
+
+uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t ret = 0;
+
+	switch (supported_device) {
+	case ATOM_DEVICE_CRT1_SUPPORT:
+	case ATOM_DEVICE_TV1_SUPPORT:
+	case ATOM_DEVICE_TV2_SUPPORT:
+	case ATOM_DEVICE_CRT2_SUPPORT:
+	case ATOM_DEVICE_CV_SUPPORT:
+		switch (dac) {
+		case 1: /* dac a */
+			if ((rdev->family == CHIP_RS300) ||
+			    (rdev->family == CHIP_RS400) ||
+			    (rdev->family == CHIP_RS480))
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+			else if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
+			else
+				ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
+			break;
+		case 2: /* dac b */
+			if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
+			else {
+				/*if (rdev->family == CHIP_R200)
+				  ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+				  else*/
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+			}
+			break;
+		case 3: /* external dac */
+			if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+			else
+				ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+			break;
+		}
+		break;
+	case ATOM_DEVICE_LCD1_SUPPORT:
+		if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_DFP1_SUPPORT:
+		if ((rdev->family == CHIP_RS300) ||
+		    (rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480))
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+		else if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_LCD2_SUPPORT:
+	case ATOM_DEVICE_DFP2_SUPPORT:
+		if ((rdev->family == CHIP_RS600) ||
+		    (rdev->family == CHIP_RS690) ||
+		    (rdev->family == CHIP_RS740))
+			ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
+		else if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_DFP3_SUPPORT:
+		ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+		break;
+	}
+
+	return ret;
+}
+
+static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+					 struct drm_connector *connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	bool use_bl = false;
+
+	if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
+		return;
+
+	if (radeon_backlight == 0) {
+		return;
+	} else if (radeon_backlight == 1) {
+		use_bl = true;
+	} else if (radeon_backlight == -1) {
+		/* Quirks */
+		/* Amilo Xi 2550 only works with acpi bl */
+		if ((rdev->pdev->device == 0x9583) &&
+		    (rdev->pdev->subsystem_vendor == 0x1734) &&
+		    (rdev->pdev->subsystem_device == 0x1107))
+			use_bl = false;
+/* Older PPC macs use on-GPU backlight controller */
+#ifndef CONFIG_PPC_PMAC
+		/* disable native backlight control on older asics */
+		else if (rdev->family < CHIP_R600)
+			use_bl = false;
+#endif
+		else
+			use_bl = true;
+	}
+
+	if (use_bl) {
+		if (rdev->is_atom_bios)
+			radeon_atom_backlight_init(radeon_encoder, connector);
+		else
+			radeon_legacy_backlight_init(radeon_encoder, connector);
+	}
+}
+
+void
+radeon_link_encoder_connector(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* walk the list and link encoders to connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			radeon_encoder = to_radeon_encoder(encoder);
+			if (radeon_encoder->devices & radeon_connector->devices) {
+				drm_connector_attach_encoder(connector, encoder);
+				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+					radeon_encoder_add_backlight(radeon_encoder, connector);
+			}
+		}
+	}
+}
+
+void radeon_encoder_set_active_device(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
+			DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+				  radeon_encoder->active_device, radeon_encoder->devices,
+				  radeon_connector->devices, encoder->encoder_type);
+		}
+	}
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_encoder->is_mst_encoder) {
+			struct radeon_encoder_mst *mst_enc;
+
+			if (!radeon_connector->is_mst_connector)
+				continue;
+
+			mst_enc = radeon_encoder->enc_priv;
+			if (mst_enc->connector == radeon_connector->mst_port)
+				return connector;
+		} else if (radeon_encoder->active_device & radeon_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_encoder->devices & radeon_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *other_encoder;
+	struct radeon_encoder *other_radeon_encoder;
+
+	if (radeon_encoder->is_ext_encoder)
+		return NULL;
+
+	list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+		if (other_encoder == encoder)
+			continue;
+		other_radeon_encoder = to_radeon_encoder(other_encoder);
+		if (other_radeon_encoder->is_ext_encoder &&
+		    (radeon_encoder->devices & other_radeon_encoder->devices))
+			return other_encoder;
+	}
+	return NULL;
+}
+
+u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
+{
+	struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
+
+	if (other_encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return radeon_encoder->encoder_id;
+		default:
+			return ENCODER_OBJECT_ID_NONE;
+		}
+	}
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	unsigned hblank = native_mode->htotal - native_mode->hdisplay;
+	unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
+	unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
+	unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
+	unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+	unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+
+	adjusted_mode->clock = native_mode->clock;
+	adjusted_mode->flags = native_mode->flags;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		adjusted_mode->hdisplay = native_mode->hdisplay;
+		adjusted_mode->vdisplay = native_mode->vdisplay;
+	}
+
+	adjusted_mode->htotal = native_mode->hdisplay + hblank;
+	adjusted_mode->hsync_start = native_mode->hdisplay + hover;
+	adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
+
+	adjusted_mode->vtotal = native_mode->vdisplay + vblank;
+	adjusted_mode->vsync_start = native_mode->vdisplay + vover;
+	adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
+		adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
+	}
+
+	adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
+	adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
+	adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
+
+	adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
+	adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
+	adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
+
+}
+
+bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	connector = radeon_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	radeon_connector = to_radeon_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (radeon_connector->use_digital) {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		} else
+			return false;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		if (radeon_connector->is_mst_connector)
+			return false;
+
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return false;
+		else {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		}
+	default:
+		return false;
+	}
+}
+
+bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		return true;
+	default:
+		return false;
+	}
+}
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
new file mode 100644
index 0000000..4b7b87f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+/* this file defines the CHIP_  and family flags used in the pciids,
+ * its is common between kms and non-kms because duplicating it and
+ * changing one place is fail.
+ */
+#ifndef RADEON_FAMILY_H
+#define RADEON_FAMILY_H
+/*
+ * Radeon chip families
+ */
+enum radeon_family {
+	CHIP_R100 = 0,
+	CHIP_RV100,
+	CHIP_RS100,
+	CHIP_RV200,
+	CHIP_RS200,
+	CHIP_R200,
+	CHIP_RV250,
+	CHIP_RS300,
+	CHIP_RV280,
+	CHIP_R300,
+	CHIP_R350,
+	CHIP_RV350,
+	CHIP_RV380,
+	CHIP_R420,
+	CHIP_R423,
+	CHIP_RV410,
+	CHIP_RS400,
+	CHIP_RS480,
+	CHIP_RS600,
+	CHIP_RS690,
+	CHIP_RS740,
+	CHIP_RV515,
+	CHIP_R520,
+	CHIP_RV530,
+	CHIP_RV560,
+	CHIP_RV570,
+	CHIP_R580,
+	CHIP_R600,
+	CHIP_RV610,
+	CHIP_RV630,
+	CHIP_RV670,
+	CHIP_RV620,
+	CHIP_RV635,
+	CHIP_RS780,
+	CHIP_RS880,
+	CHIP_RV770,
+	CHIP_RV730,
+	CHIP_RV710,
+	CHIP_RV740,
+	CHIP_CEDAR,
+	CHIP_REDWOOD,
+	CHIP_JUNIPER,
+	CHIP_CYPRESS,
+	CHIP_HEMLOCK,
+	CHIP_PALM,
+	CHIP_SUMO,
+	CHIP_SUMO2,
+	CHIP_BARTS,
+	CHIP_TURKS,
+	CHIP_CAICOS,
+	CHIP_CAYMAN,
+	CHIP_ARUBA,
+	CHIP_TAHITI,
+	CHIP_PITCAIRN,
+	CHIP_VERDE,
+	CHIP_OLAND,
+	CHIP_HAINAN,
+	CHIP_BONAIRE,
+	CHIP_KAVERI,
+	CHIP_KABINI,
+	CHIP_HAWAII,
+	CHIP_MULLINS,
+	CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum radeon_chip_flags {
+	RADEON_FAMILY_MASK = 0x0000ffffUL,
+	RADEON_FLAGS_MASK = 0xffff0000UL,
+	RADEON_IS_MOBILITY = 0x00010000UL,
+	RADEON_IS_IGP = 0x00020000UL,
+	RADEON_SINGLE_CRTC = 0x00040000UL,
+	RADEON_IS_AGP = 0x00080000UL,
+	RADEON_HAS_HIERZ = 0x00100000UL,
+	RADEON_IS_PCIE = 0x00200000UL,
+	RADEON_NEW_MEMMAP = 0x00400000UL,
+	RADEON_IS_PCI = 0x00800000UL,
+	RADEON_IS_IGPGART = 0x01000000UL,
+	RADEON_IS_PX = 0x02000000UL,
+};
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
new file mode 100644
index 0000000..1179034
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include <drm/drm_fb_helper.h>
+
+#include <linux/vga_switcheroo.h>
+
+/* object hierarchy -
+ * this contains a helper + a radeon fb
+ * the helper contains a pointer to radeon framebuffer baseclass.
+ */
+struct radeon_fbdev {
+	struct drm_fb_helper helper;
+	struct drm_framebuffer fb;
+	struct radeon_device *rdev;
+};
+
+static int
+radeonfb_open(struct fb_info *info, int user)
+{
+	struct radeon_fbdev *rfbdev = info->par;
+	struct radeon_device *rdev = rfbdev->rdev;
+	int ret = pm_runtime_get_sync(rdev->ddev->dev);
+	if (ret < 0 && ret != -EACCES) {
+		pm_runtime_mark_last_busy(rdev->ddev->dev);
+		pm_runtime_put_autosuspend(rdev->ddev->dev);
+		return ret;
+	}
+	return 0;
+}
+
+static int
+radeonfb_release(struct fb_info *info, int user)
+{
+	struct radeon_fbdev *rfbdev = info->par;
+	struct radeon_device *rdev = rfbdev->rdev;
+
+	pm_runtime_mark_last_busy(rdev->ddev->dev);
+	pm_runtime_put_autosuspend(rdev->ddev->dev);
+	return 0;
+}
+
+static struct fb_ops radeonfb_ops = {
+	.owner = THIS_MODULE,
+	DRM_FB_HELPER_DEFAULT_OPS,
+	.fb_open = radeonfb_open,
+	.fb_release = radeonfb_release,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
+};
+
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
+{
+	int aligned = width;
+	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
+	int pitch_mask = 0;
+
+	switch (cpp) {
+	case 1:
+		pitch_mask = align_large ? 255 : 127;
+		break;
+	case 2:
+		pitch_mask = align_large ? 127 : 31;
+		break;
+	case 3:
+	case 4:
+		pitch_mask = align_large ? 63 : 15;
+		break;
+	}
+
+	aligned += pitch_mask;
+	aligned &= ~pitch_mask;
+	return aligned * cpp;
+}
+
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
+	int ret;
+
+	ret = radeon_bo_reserve(rbo, false);
+	if (likely(ret == 0)) {
+		radeon_bo_kunmap(rbo);
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+	drm_gem_object_put_unlocked(gobj);
+}
+
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+					 struct drm_mode_fb_cmd2 *mode_cmd,
+					 struct drm_gem_object **gobj_p)
+{
+	struct radeon_device *rdev = rfbdev->rdev;
+	struct drm_gem_object *gobj = NULL;
+	struct radeon_bo *rbo = NULL;
+	bool fb_tiled = false; /* useful for testing */
+	u32 tiling_flags = 0;
+	int ret;
+	int aligned_size, size;
+	int height = mode_cmd->height;
+	u32 cpp;
+
+	cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
+
+	/* need to align pitch with crtc limits */
+	mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
+						  fb_tiled);
+
+	if (rdev->family >= CHIP_R600)
+		height = ALIGN(mode_cmd->height, 8);
+	size = mode_cmd->pitches[0] * height;
+	aligned_size = ALIGN(size, PAGE_SIZE);
+	ret = radeon_gem_object_create(rdev, aligned_size, 0,
+				       RADEON_GEM_DOMAIN_VRAM,
+				       0, true, &gobj);
+	if (ret) {
+		pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
+		return -ENOMEM;
+	}
+	rbo = gem_to_radeon_bo(gobj);
+
+	if (fb_tiled)
+		tiling_flags = RADEON_TILING_MACRO;
+
+#ifdef __BIG_ENDIAN
+	switch (cpp) {
+	case 4:
+		tiling_flags |= RADEON_TILING_SWAP_32BIT;
+		break;
+	case 2:
+		tiling_flags |= RADEON_TILING_SWAP_16BIT;
+	default:
+		break;
+	}
+#endif
+
+	if (tiling_flags) {
+		ret = radeon_bo_set_tiling_flags(rbo,
+						 tiling_flags | RADEON_TILING_SURFACE,
+						 mode_cmd->pitches[0]);
+		if (ret)
+			dev_err(rdev->dev, "FB failed to set tiling flags\n");
+	}
+
+
+	ret = radeon_bo_reserve(rbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+	/* Only 27 bit offset for legacy CRTC */
+	ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+				       NULL);
+	if (ret) {
+		radeon_bo_unreserve(rbo);
+		goto out_unref;
+	}
+	if (fb_tiled)
+		radeon_bo_check_tiling(rbo, 0, 0);
+	ret = radeon_bo_kmap(rbo, NULL);
+	radeon_bo_unreserve(rbo);
+	if (ret) {
+		goto out_unref;
+	}
+
+	*gobj_p = gobj;
+	return 0;
+out_unref:
+	radeonfb_destroy_pinned_object(gobj);
+	*gobj_p = NULL;
+	return ret;
+}
+
+static int radeonfb_create(struct drm_fb_helper *helper,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct radeon_fbdev *rfbdev =
+		container_of(helper, struct radeon_fbdev, helper);
+	struct radeon_device *rdev = rfbdev->rdev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_gem_object *gobj = NULL;
+	struct radeon_bo *rbo = NULL;
+	int ret;
+	unsigned long tmp;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	/* avivo can't scanout real 24bpp */
+	if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+		sizes->surface_bpp = 32;
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon object %d\n", ret);
+		return ret;
+	}
+
+	rbo = gem_to_radeon_bo(gobj);
+
+	/* okay we have an object now allocate the framebuffer */
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
+		goto out;
+	}
+
+	info->par = rfbdev;
+
+	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
+	if (ret) {
+		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
+		goto out;
+	}
+
+	fb = &rfbdev->fb;
+
+	/* setup helper */
+	rfbdev->helper.fb = fb;
+
+	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
+
+	strcpy(info->fix.id, "radeondrmfb");
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+
+	info->fbops = &radeonfb_ops;
+
+	tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
+	info->fix.smem_start = rdev->mc.aper_base + tmp;
+	info->fix.smem_len = radeon_bo_size(rbo);
+	info->screen_base = rbo->kptr;
+	info->screen_size = radeon_bo_size(rbo);
+
+	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+	info->apertures->ranges[0].size = rdev->mc.aper_size;
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	if (info->screen_base == NULL) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
+	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
+	DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
+	DRM_INFO("fb depth is %d\n", fb->format->depth);
+	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+
+	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+	return 0;
+
+out:
+	if (rbo) {
+
+	}
+	if (fb && ret) {
+		drm_gem_object_put_unlocked(gobj);
+		drm_framebuffer_unregister_private(fb);
+		drm_framebuffer_cleanup(fb);
+		kfree(fb);
+	}
+	return ret;
+}
+
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
+{
+	struct drm_framebuffer *fb = &rfbdev->fb;
+
+	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+
+	if (fb->obj[0]) {
+		radeonfb_destroy_pinned_object(fb->obj[0]);
+		fb->obj[0] = NULL;
+		drm_framebuffer_unregister_private(fb);
+		drm_framebuffer_cleanup(fb);
+	}
+	drm_fb_helper_fini(&rfbdev->helper);
+
+	return 0;
+}
+
+static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+	.fb_probe = radeonfb_create,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+	struct radeon_fbdev *rfbdev;
+	int bpp_sel = 32;
+	int ret;
+
+	/* don't enable fbdev if no connectors */
+	if (list_empty(&rdev->ddev->mode_config.connector_list))
+		return 0;
+
+	/* select 8 bpp console on 8MB cards, or 16 bpp on RN50 or 32MB */
+	if (rdev->mc.real_vram_size <= (8*1024*1024))
+		bpp_sel = 8;
+	else if (ASIC_IS_RN50(rdev) ||
+		 rdev->mc.real_vram_size <= (32*1024*1024))
+		bpp_sel = 16;
+
+	rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+	if (!rfbdev)
+		return -ENOMEM;
+
+	rfbdev->rdev = rdev;
+	rdev->mode_info.rfbdev = rfbdev;
+
+	drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
+			      &radeon_fb_helper_funcs);
+
+	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+				 RADEONFB_CONN_LIMIT);
+	if (ret)
+		goto free;
+
+	ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+	if (ret)
+		goto fini;
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(rdev->ddev);
+
+	ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+	if (ret)
+		goto fini;
+
+	return 0;
+
+fini:
+	drm_fb_helper_fini(&rfbdev->helper);
+free:
+	kfree(rfbdev);
+	return ret;
+}
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+	if (!rdev->mode_info.rfbdev)
+		return;
+
+	radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+	kfree(rdev->mode_info.rfbdev);
+	rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+	if (rdev->mode_info.rfbdev)
+		drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state);
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+	if (!rdev->mode_info.rfbdev)
+		return false;
+
+	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->fb.obj[0]))
+		return true;
+	return false;
+}
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+	if (rdev->mode_info.rfbdev)
+		drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
+
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+	if (rdev->mode_info.rfbdev)
+		drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
new file mode 100644
index 0000000..e86f2bd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -0,0 +1,1122 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Dave Airlie
+ */
+#include <linux/seq_file.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/*
+ * Fences
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization.  When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the the relevant GPU caches have been flushed.  Whether
+ * we use a scratch register or memory location depends on the asic
+ * and whether writeback is enabled.
+ */
+
+/**
+ * radeon_fence_write - write a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @seq: sequence number to write
+ * @ring: ring index the fence is associated with
+ *
+ * Writes a fence value to memory or a scratch register (all asics).
+ */
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
+{
+	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+		if (drv->cpu_addr) {
+			*drv->cpu_addr = cpu_to_le32(seq);
+		}
+	} else {
+		WREG32(drv->scratch_reg, seq);
+	}
+}
+
+/**
+ * radeon_fence_read - read a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Reads a fence value from memory or a scratch register (all asics).
+ * Returns the value of the fence read from memory or register.
+ */
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
+{
+	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+	u32 seq = 0;
+
+	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+		if (drv->cpu_addr) {
+			seq = le32_to_cpu(*drv->cpu_addr);
+		} else {
+			seq = lower_32_bits(atomic64_read(&drv->last_seq));
+		}
+	} else {
+		seq = RREG32(drv->scratch_reg);
+	}
+	return seq;
+}
+
+/**
+ * radeon_fence_schedule_check - schedule lockup check
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index we should work with
+ *
+ * Queues a delayed work item to check for lockups.
+ */
+static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
+{
+	/*
+	 * Do not reset the timer here with mod_delayed_work,
+	 * this can livelock in an interaction with TTM delayed destroy.
+	 */
+	queue_delayed_work(system_power_efficient_wq,
+			   &rdev->fence_drv[ring].lockup_work,
+			   RADEON_FENCE_JIFFIES_TIMEOUT);
+}
+
+/**
+ * radeon_fence_emit - emit a fence on the requested ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ * @ring: ring index the fence is associated with
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int radeon_fence_emit(struct radeon_device *rdev,
+		      struct radeon_fence **fence,
+		      int ring)
+{
+	u64 seq;
+
+	/* we are protected by the ring emission mutex */
+	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+	if ((*fence) == NULL) {
+		return -ENOMEM;
+	}
+	(*fence)->rdev = rdev;
+	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
+	(*fence)->ring = ring;
+	(*fence)->is_vm_update = false;
+	dma_fence_init(&(*fence)->base, &radeon_fence_ops,
+		       &rdev->fence_queue.lock,
+		       rdev->fence_context + ring,
+		       seq);
+	radeon_fence_ring_emit(rdev, ring, *fence);
+	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
+	radeon_fence_schedule_check(rdev, ring);
+	return 0;
+}
+
+/**
+ * radeon_fence_check_signaled - callback from fence_queue
+ *
+ * this function is called with fence_queue lock held, which is also used
+ * for the fence locking itself, so unlocked variants are used for
+ * fence_signal, and remove_wait_queue.
+ */
+static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
+{
+	struct radeon_fence *fence;
+	u64 seq;
+
+	fence = container_of(wait, struct radeon_fence, fence_wake);
+
+	/*
+	 * We cannot use radeon_fence_process here because we're already
+	 * in the waitqueue, in a call from wake_up_all.
+	 */
+	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
+	if (seq >= fence->seq) {
+		int ret = dma_fence_signal_locked(&fence->base);
+
+		if (!ret)
+			DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
+		else
+			DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
+
+		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
+		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
+		dma_fence_put(&fence->base);
+	} else
+		DMA_FENCE_TRACE(&fence->base, "pending\n");
+	return 0;
+}
+
+/**
+ * radeon_fence_activity - check for fence activity
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and calculates the last
+ * signalled fence value. Returns true if activity occured
+ * on the ring, and the fence_queue should be waken up.
+ */
+static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq, last_seq, last_emitted;
+	unsigned count_loop = 0;
+	bool wake = false;
+
+	/* Note there is a scenario here for an infinite loop but it's
+	 * very unlikely to happen. For it to happen, the current polling
+	 * process need to be interrupted by another process and another
+	 * process needs to update the last_seq btw the atomic read and
+	 * xchg of the current process.
+	 *
+	 * More over for this to go in infinite loop there need to be
+	 * continuously new fence signaled ie radeon_fence_read needs
+	 * to return a different value each time for both the currently
+	 * polling process and the other process that xchg the last_seq
+	 * btw atomic read and xchg of the current process. And the
+	 * value the other process set as last seq must be higher than
+	 * the seq value we just read. Which means that current process
+	 * need to be interrupted after radeon_fence_read and before
+	 * atomic xchg.
+	 *
+	 * To be even more safe we count the number of time we loop and
+	 * we bail after 10 loop just accepting the fact that we might
+	 * have temporarly set the last_seq not to the true real last
+	 * seq but to an older one.
+	 */
+	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+	do {
+		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
+		seq = radeon_fence_read(rdev, ring);
+		seq |= last_seq & 0xffffffff00000000LL;
+		if (seq < last_seq) {
+			seq &= 0xffffffff;
+			seq |= last_emitted & 0xffffffff00000000LL;
+		}
+
+		if (seq <= last_seq || seq > last_emitted) {
+			break;
+		}
+		/* If we loop over we don't want to return without
+		 * checking if a fence is signaled as it means that the
+		 * seq we just read is different from the previous on.
+		 */
+		wake = true;
+		last_seq = seq;
+		if ((count_loop++) > 10) {
+			/* We looped over too many time leave with the
+			 * fact that we might have set an older fence
+			 * seq then the current real last seq as signaled
+			 * by the hw.
+			 */
+			break;
+		}
+	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+	if (seq < last_emitted)
+		radeon_fence_schedule_check(rdev, ring);
+
+	return wake;
+}
+
+/**
+ * radeon_fence_check_lockup - check for hardware lockup
+ *
+ * @work: delayed work item
+ *
+ * Checks for fence activity and if there is none probe
+ * the hardware if a lockup occured.
+ */
+static void radeon_fence_check_lockup(struct work_struct *work)
+{
+	struct radeon_fence_driver *fence_drv;
+	struct radeon_device *rdev;
+	int ring;
+
+	fence_drv = container_of(work, struct radeon_fence_driver,
+				 lockup_work.work);
+	rdev = fence_drv->rdev;
+	ring = fence_drv - &rdev->fence_drv[0];
+
+	if (!down_read_trylock(&rdev->exclusive_lock)) {
+		/* just reschedule the check if a reset is going on */
+		radeon_fence_schedule_check(rdev, ring);
+		return;
+	}
+
+	if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
+		unsigned long irqflags;
+
+		fence_drv->delayed_irq = false;
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+
+	if (radeon_fence_activity(rdev, ring))
+		wake_up_all(&rdev->fence_queue);
+
+	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+
+		/* good news we believe it's a lockup */
+		dev_warn(rdev->dev, "GPU lockup (current fence id "
+			 "0x%016llx last fence id 0x%016llx on ring %d)\n",
+			 (uint64_t)atomic64_read(&fence_drv->last_seq),
+			 fence_drv->sync_seq[ring], ring);
+
+		/* remember that we need an reset */
+		rdev->needs_reset = true;
+		wake_up_all(&rdev->fence_queue);
+	}
+	up_read(&rdev->exclusive_lock);
+}
+
+/**
+ * radeon_fence_process - process a fence
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
+void radeon_fence_process(struct radeon_device *rdev, int ring)
+{
+	if (radeon_fence_activity(rdev, ring))
+		wake_up_all(&rdev->fence_queue);
+}
+
+/**
+ * radeon_fence_seq_signaled - check if a fence sequence number has signaled
+ *
+ * @rdev: radeon device pointer
+ * @seq: sequence number
+ * @ring: ring index the fence is associated with
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value.  Helper function for
+ * radeon_fence_signaled().
+ */
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+				      u64 seq, unsigned ring)
+{
+	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+		return true;
+	}
+	/* poll new last sequence at least once */
+	radeon_fence_process(rdev, ring);
+	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+		return true;
+	}
+	return false;
+}
+
+static bool radeon_fence_is_signaled(struct dma_fence *f)
+{
+	struct radeon_fence *fence = to_radeon_fence(f);
+	struct radeon_device *rdev = fence->rdev;
+	unsigned ring = fence->ring;
+	u64 seq = fence->seq;
+
+	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+		return true;
+	}
+
+	if (down_read_trylock(&rdev->exclusive_lock)) {
+		radeon_fence_process(rdev, ring);
+		up_read(&rdev->exclusive_lock);
+
+		if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_enable_signaling - enable signalling on fence
+ * @fence: fence
+ *
+ * This function is called with fence_queue lock held, and adds a callback
+ * to fence_queue that checks if this fence is signaled, and if so it
+ * signals the fence and removes itself.
+ */
+static bool radeon_fence_enable_signaling(struct dma_fence *f)
+{
+	struct radeon_fence *fence = to_radeon_fence(f);
+	struct radeon_device *rdev = fence->rdev;
+
+	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
+		return false;
+
+	if (down_read_trylock(&rdev->exclusive_lock)) {
+		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+
+		if (radeon_fence_activity(rdev, fence->ring))
+			wake_up_all_locked(&rdev->fence_queue);
+
+		/* did fence get signaled after we enabled the sw irq? */
+		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
+			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
+			up_read(&rdev->exclusive_lock);
+			return false;
+		}
+
+		up_read(&rdev->exclusive_lock);
+	} else {
+		/* we're probably in a lockup, lets not fiddle too much */
+		if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
+			rdev->fence_drv[fence->ring].delayed_irq = true;
+		radeon_fence_schedule_check(rdev, fence->ring);
+	}
+
+	fence->fence_wake.flags = 0;
+	fence->fence_wake.private = NULL;
+	fence->fence_wake.func = radeon_fence_check_signaled;
+	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
+	dma_fence_get(f);
+
+	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+	return true;
+}
+
+/**
+ * radeon_fence_signaled - check if a fence has signaled
+ *
+ * @fence: radeon fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+	if (!fence)
+		return true;
+
+	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+		int ret;
+
+		ret = dma_fence_signal(&fence->base);
+		if (!ret)
+			DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_any_seq_signaled - check if any sequence number is signaled
+ *
+ * @rdev: radeon device pointer
+ * @seq: sequence numbers
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if any has signaled (current value is >= requested value)
+ * or false if it has not. Helper function for radeon_fence_wait_seq.
+ */
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
+			return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
+ * @intr: use interruptable sleep
+ * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
+ *
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics).  Sequnce number array is indexed by ring id.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number.  Helper function
+ * for radeon_fence_wait_*().
+ * Returns remaining time if the sequence number has passed, 0 when
+ * the wait timeout, or an error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected.
+ */
+static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
+					  u64 *target_seq, bool intr,
+					  long timeout)
+{
+	long r;
+	int i;
+
+	if (radeon_fence_any_seq_signaled(rdev, target_seq))
+		return timeout;
+
+	/* enable IRQs and tracing */
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!target_seq[i])
+			continue;
+
+		trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
+		radeon_irq_kms_sw_irq_get(rdev, i);
+	}
+
+	if (intr) {
+		r = wait_event_interruptible_timeout(rdev->fence_queue, (
+			radeon_fence_any_seq_signaled(rdev, target_seq)
+			 || rdev->needs_reset), timeout);
+	} else {
+		r = wait_event_timeout(rdev->fence_queue, (
+			radeon_fence_any_seq_signaled(rdev, target_seq)
+			 || rdev->needs_reset), timeout);
+	}
+
+	if (rdev->needs_reset)
+		r = -EDEADLK;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!target_seq[i])
+			continue;
+
+		radeon_irq_kms_sw_irq_put(rdev, i);
+		trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
+	}
+
+	return r;
+}
+
+/**
+ * radeon_fence_wait_timeout - wait for a fence to signal with timeout
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptible sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
+ * Returns remaining time if the sequence number has passed, 0 when
+ * the wait timeout, or an error for all other cases.
+ */
+long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
+{
+	uint64_t seq[RADEON_NUM_RINGS] = {};
+	long r;
+	int r_sig;
+
+	/*
+	 * This function should not be called on !radeon fences.
+	 * If this is the case, it would mean this function can
+	 * also be called on radeon fences belonging to another card.
+	 * exclusive_lock is not held in that case.
+	 */
+	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
+		return dma_fence_wait(&fence->base, intr);
+
+	seq[fence->ring] = fence->seq;
+	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
+	if (r <= 0) {
+		return r;
+	}
+
+	r_sig = dma_fence_signal(&fence->base);
+	if (!r_sig)
+		DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+	return r;
+}
+
+/**
+ * radeon_fence_wait - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptible sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+{
+	long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+	if (r > 0) {
+		return 0;
+	} else {
+		return r;
+	}
+}
+
+/**
+ * radeon_fence_wait_any - wait for a fence to signal on any ring
+ *
+ * @rdev: radeon device pointer
+ * @fences: radeon fence object(s)
+ * @intr: use interruptable sleep
+ *
+ * Wait for any requested fence to signal (all asics).  Fence
+ * array is indexed by ring id.  @intr selects whether to use
+ * interruptable (true) or non-interruptable (false) sleep when
+ * waiting for the fences. Used by the suballocator.
+ * Returns 0 if any fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr)
+{
+	uint64_t seq[RADEON_NUM_RINGS];
+	unsigned i, num_rings = 0;
+	long r;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		seq[i] = 0;
+
+		if (!fences[i]) {
+			continue;
+		}
+
+		seq[i] = fences[i]->seq;
+		++num_rings;
+	}
+
+	/* nothing to wait for ? */
+	if (num_rings == 0)
+		return -ENOENT;
+
+	r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_wait_next - wait for the next fence to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for the next fence on the requested ring to signal (all asics).
+ * Returns 0 if the next fence has passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq[RADEON_NUM_RINGS] = {};
+	long r;
+
+	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
+		/* nothing to wait for, last_seq is
+		   already the last emited fence */
+		return -ENOENT;
+	}
+	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0)
+		return r;
+	return 0;
+}
+
+/**
+ * radeon_fence_wait_empty - wait for all fences to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns 0 if the fences have passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq[RADEON_NUM_RINGS] = {};
+	long r;
+
+	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
+	if (!seq[ring])
+		return 0;
+
+	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		if (r == -EDEADLK)
+			return -EDEADLK;
+
+		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
+			ring, r);
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_ref - take a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
+{
+	dma_fence_get(&fence->base);
+	return fence;
+}
+
+/**
+ * radeon_fence_unref - remove a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Remove a reference on a fence (all asics).
+ */
+void radeon_fence_unref(struct radeon_fence **fence)
+{
+	struct radeon_fence *tmp = *fence;
+
+	*fence = NULL;
+	if (tmp) {
+		dma_fence_put(&tmp->base);
+	}
+}
+
+/**
+ * radeon_fence_count_emitted - get the count of emitted fences
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Get the number of fences emitted on the requested ring (all asics).
+ * Returns the number of emitted fences on the ring.  Used by the
+ * dynpm code to ring track activity.
+ */
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+	uint64_t emitted;
+
+	/* We are not protected by ring lock when reading the last sequence
+	 * but it's ok to report slightly wrong fence count here.
+	 */
+	radeon_fence_process(rdev, ring);
+	emitted = rdev->fence_drv[ring].sync_seq[ring]
+		- atomic64_read(&rdev->fence_drv[ring].last_seq);
+	/* to avoid 32bits warp around */
+	if (emitted > 0x10000000) {
+		emitted = 0x10000000;
+	}
+	return (unsigned)emitted;
+}
+
+/**
+ * radeon_fence_need_sync - do we need a semaphore
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Check if the fence needs to be synced against another ring
+ * (all asics).  If so, we need to emit a semaphore.
+ * Returns true if we need to sync with another ring, false if
+ * not.
+ */
+bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
+{
+	struct radeon_fence_driver *fdrv;
+
+	if (!fence) {
+		return false;
+	}
+
+	if (fence->ring == dst_ring) {
+		return false;
+	}
+
+	/* we are protected by the ring mutex */
+	fdrv = &fence->rdev->fence_drv[dst_ring];
+	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * radeon_fence_note_sync - record the sync point
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Note the sequence number at which point the fence will
+ * be synced with the requested ring (all asics).
+ */
+void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
+{
+	struct radeon_fence_driver *dst, *src;
+	unsigned i;
+
+	if (!fence) {
+		return;
+	}
+
+	if (fence->ring == dst_ring) {
+		return;
+	}
+
+	/* we are protected by the ring mutex */
+	src = &fence->rdev->fence_drv[fence->ring];
+	dst = &fence->rdev->fence_drv[dst_ring];
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (i == dst_ring) {
+			continue;
+		}
+		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
+	}
+}
+
+/**
+ * radeon_fence_driver_start_ring - make the fence driver
+ * ready for use on the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Make the fence driver ready for processing (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has.
+ * Returns 0 for success, errors for failure.
+ */
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
+{
+	uint64_t index;
+	int r;
+
+	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
+		rdev->fence_drv[ring].scratch_reg = 0;
+		if (ring != R600_RING_TYPE_UVD_INDEX) {
+			index = R600_WB_EVENT_OFFSET + ring * 4;
+			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
+							 index;
+
+		} else {
+			/* put fence directly behind firmware */
+			index = ALIGN(rdev->uvd_fw->size, 8);
+			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
+			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
+		}
+
+	} else {
+		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+		if (r) {
+			dev_err(rdev->dev, "fence failed to get scratch register\n");
+			return r;
+		}
+		index = RADEON_WB_SCRATCH_OFFSET +
+			rdev->fence_drv[ring].scratch_reg -
+			rdev->scratch.reg_base;
+		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+	}
+	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
+	rdev->fence_drv[ring].initialized = true;
+	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
+		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+	return 0;
+}
+
+/**
+ * radeon_fence_driver_init_ring - init the fence driver
+ * for the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Init the fence driver for the requested ring (all asics).
+ * Helper function for radeon_fence_driver_init().
+ */
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+	int i;
+
+	rdev->fence_drv[ring].scratch_reg = -1;
+	rdev->fence_drv[ring].cpu_addr = NULL;
+	rdev->fence_drv[ring].gpu_addr = 0;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		rdev->fence_drv[ring].sync_seq[i] = 0;
+	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+	rdev->fence_drv[ring].initialized = false;
+	INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
+			  radeon_fence_check_lockup);
+	rdev->fence_drv[ring].rdev = rdev;
+}
+
+/**
+ * radeon_fence_driver_init - init the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Init the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * radeon_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+	int ring;
+
+	init_waitqueue_head(&rdev->fence_queue);
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		radeon_fence_driver_init_ring(rdev, ring);
+	}
+	if (radeon_debugfs_fence_init(rdev)) {
+		dev_err(rdev->dev, "fence debugfs file creation failed\n");
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
+void radeon_fence_driver_fini(struct radeon_device *rdev)
+{
+	int ring, r;
+
+	mutex_lock(&rdev->ring_lock);
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		if (!rdev->fence_drv[ring].initialized)
+			continue;
+		r = radeon_fence_wait_empty(rdev, ring);
+		if (r) {
+			/* no need to trigger GPU reset as we are unloading */
+			radeon_fence_driver_force_completion(rdev, ring);
+		}
+		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
+		wake_up_all(&rdev->fence_queue);
+		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+		rdev->fence_drv[ring].initialized = false;
+	}
+	mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ * @ring: the ring to complete
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
+{
+	if (rdev->fence_drv[ring].initialized) {
+		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
+	}
+}
+
+
+/*
+ * Fence debugfs
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i, j;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!rdev->fence_drv[i].initialized)
+			continue;
+
+		radeon_fence_process(rdev, i);
+
+		seq_printf(m, "--- ring %d ---\n", i);
+		seq_printf(m, "Last signaled fence 0x%016llx\n",
+			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+		seq_printf(m, "Last emitted        0x%016llx\n",
+			   rdev->fence_drv[i].sync_seq[i]);
+
+		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
+			if (i != j && rdev->fence_drv[j].initialized)
+				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
+					   j, rdev->fence_drv[i].sync_seq[j]);
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_debugfs_gpu_reset - manually trigger a gpu reset
+ *
+ * Manually trigger a gpu reset at the next fence wait.
+ */
+static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	down_read(&rdev->exclusive_lock);
+	seq_printf(m, "%d\n", rdev->needs_reset);
+	rdev->needs_reset = true;
+	wake_up_all(&rdev->fence_queue);
+	up_read(&rdev->exclusive_lock);
+
+	return 0;
+}
+
+static struct drm_info_list radeon_debugfs_fence_list[] = {
+	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
+	{"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
+};
+#endif
+
+int radeon_debugfs_fence_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
+#else
+	return 0;
+#endif
+}
+
+static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "radeon";
+}
+
+static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
+{
+	struct radeon_fence *fence = to_radeon_fence(f);
+	switch (fence->ring) {
+	case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
+	case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
+	case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
+	case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
+	case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
+	case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
+	case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
+	case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
+	default: WARN_ON_ONCE(1); return "radeon.unk";
+	}
+}
+
+static inline bool radeon_test_signaled(struct radeon_fence *fence)
+{
+	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+}
+
+struct radeon_wait_cb {
+	struct dma_fence_cb base;
+	struct task_struct *task;
+};
+
+static void
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct radeon_wait_cb *wait =
+		container_of(cb, struct radeon_wait_cb, base);
+
+	wake_up_process(wait->task);
+}
+
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
+					     signed long t)
+{
+	struct radeon_fence *fence = to_radeon_fence(f);
+	struct radeon_device *rdev = fence->rdev;
+	struct radeon_wait_cb cb;
+
+	cb.task = current;
+
+	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+		return t;
+
+	while (t > 0) {
+		if (intr)
+			set_current_state(TASK_INTERRUPTIBLE);
+		else
+			set_current_state(TASK_UNINTERRUPTIBLE);
+
+		/*
+		 * radeon_test_signaled must be called after
+		 * set_current_state to prevent a race with wake_up_process
+		 */
+		if (radeon_test_signaled(fence))
+			break;
+
+		if (rdev->needs_reset) {
+			t = -EDEADLK;
+			break;
+		}
+
+		t = schedule_timeout(t);
+
+		if (t > 0 && intr && signal_pending(current))
+			t = -ERESTARTSYS;
+	}
+
+	__set_current_state(TASK_RUNNING);
+	dma_fence_remove_callback(f, &cb.base);
+
+	return t;
+}
+
+const struct dma_fence_ops radeon_fence_ops = {
+	.get_driver_name = radeon_fence_get_driver_name,
+	.get_timeline_name = radeon_fence_get_timeline_name,
+	.enable_signaling = radeon_fence_enable_signaling,
+	.signaled = radeon_fence_is_signaled,
+	.wait = radeon_fence_default_wait,
+	.release = NULL,
+};
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
new file mode 100644
index 0000000..1cef155
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+#include "radeon.h"
+
+/*
+ * GART
+ * The GART (Graphics Aperture Remapping Table) is an aperture
+ * in the GPU's address space.  System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective.  A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal GART, as described above,
+ * and AGP.  AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal GART management.
+ */
+
+/*
+ * Common GART table functions.
+ */
+/**
+ * radeon_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
+{
+	void *ptr;
+
+	ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
+				   &rdev->gart.table_addr);
+	if (ptr == NULL) {
+		return -ENOMEM;
+	}
+#ifdef CONFIG_X86
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		set_memory_uc((unsigned long)ptr,
+			      rdev->gart.table_size >> PAGE_SHIFT);
+	}
+#endif
+	rdev->gart.ptr = ptr;
+	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
+	return 0;
+}
+
+/**
+ * radeon_gart_table_ram_free - free system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ */
+void radeon_gart_table_ram_free(struct radeon_device *rdev)
+{
+	if (rdev->gart.ptr == NULL) {
+		return;
+	}
+#ifdef CONFIG_X86
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		set_memory_wb((unsigned long)rdev->gart.ptr,
+			      rdev->gart.table_size >> PAGE_SHIFT);
+	}
+#endif
+	pci_free_consistent(rdev->pdev, rdev->gart.table_size,
+			    (void *)rdev->gart.ptr,
+			    rdev->gart.table_addr);
+	rdev->gart.ptr = NULL;
+	rdev->gart.table_addr = 0;
+}
+
+/**
+ * radeon_gart_table_vram_alloc - allocate vram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		r = radeon_bo_create(rdev, rdev->gart.table_size,
+				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+				     0, NULL, NULL, &rdev->gart.robj);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_pin(struct radeon_device *rdev)
+{
+	uint64_t gpu_addr;
+	int r;
+
+	r = radeon_bo_reserve(rdev->gart.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->gart.robj,
+				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->gart.robj);
+		return r;
+	}
+	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->gart.robj);
+	radeon_bo_unreserve(rdev->gart.robj);
+	rdev->gart.table_addr = gpu_addr;
+
+	if (!r) {
+		int i;
+
+		/* We might have dropped some GART table updates while it wasn't
+		 * mapped, restore all entries
+		 */
+		for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+			radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+		mb();
+		radeon_gart_tlb_flush(rdev);
+	}
+
+	return r;
+}
+
+/**
+ * radeon_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		return;
+	}
+	r = radeon_bo_reserve(rdev->gart.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->gart.robj);
+		radeon_bo_unpin(rdev->gart.robj);
+		radeon_bo_unreserve(rdev->gart.robj);
+		rdev->gart.ptr = NULL;
+	}
+}
+
+/**
+ * radeon_gart_table_vram_free - free gart page table vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+).  These asics require the gart table to
+ * be in video memory.
+ */
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+	if (rdev->gart.robj == NULL) {
+		return;
+	}
+	radeon_bo_unref(&rdev->gart.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * radeon_gart_unbind - unbind pages from the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ */
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+			int pages)
+{
+	unsigned t;
+	unsigned p;
+	int i, j;
+
+	if (!rdev->gart.ready) {
+		WARN(1, "trying to unbind memory from uninitialized GART !\n");
+		return;
+	}
+	t = offset / RADEON_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++) {
+		if (rdev->gart.pages[p]) {
+			rdev->gart.pages[p] = NULL;
+			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+				rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
+				if (rdev->gart.ptr) {
+					radeon_gart_set_page(rdev, t,
+							     rdev->dummy_page.entry);
+				}
+			}
+		}
+	}
+	if (rdev->gart.ptr) {
+		mb();
+		radeon_gart_tlb_flush(rdev);
+	}
+}
+
+/**
+ * radeon_gart_bind - bind pages into the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ * @flags: RADEON_GART_PAGE_* flags
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
+		     uint32_t flags)
+{
+	unsigned t;
+	unsigned p;
+	uint64_t page_base, page_entry;
+	int i, j;
+
+	if (!rdev->gart.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+	t = offset / RADEON_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+
+	for (i = 0; i < pages; i++, p++) {
+		rdev->gart.pages[p] = pagelist[i];
+		page_base = dma_addr[i];
+		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+			page_entry = radeon_gart_get_page_entry(page_base, flags);
+			rdev->gart.pages_entry[t] = page_entry;
+			if (rdev->gart.ptr) {
+				radeon_gart_set_page(rdev, t, page_entry);
+			}
+			page_base += RADEON_GPU_PAGE_SIZE;
+		}
+	}
+	if (rdev->gart.ptr) {
+		mb();
+		radeon_gart_tlb_flush(rdev);
+	}
+	return 0;
+}
+
+/**
+ * radeon_gart_init - init the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_init(struct radeon_device *rdev)
+{
+	int r, i;
+
+	if (rdev->gart.pages) {
+		return 0;
+	}
+	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
+	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
+		DRM_ERROR("Page size is smaller than GPU page size!\n");
+		return -EINVAL;
+	}
+	r = radeon_dummy_page_init(rdev);
+	if (r)
+		return r;
+	/* Compute table size */
+	rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
+	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
+	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+		 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
+	/* Allocate pages table */
+	rdev->gart.pages = vzalloc(array_size(sizeof(void *),
+				   rdev->gart.num_cpu_pages));
+	if (rdev->gart.pages == NULL) {
+		radeon_gart_fini(rdev);
+		return -ENOMEM;
+	}
+	rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
+						    rdev->gart.num_gpu_pages));
+	if (rdev->gart.pages_entry == NULL) {
+		radeon_gart_fini(rdev);
+		return -ENOMEM;
+	}
+	/* set GART entry to point to the dummy page by default */
+	for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+		rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
+	return 0;
+}
+
+/**
+ * radeon_gart_fini - tear down the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void radeon_gart_fini(struct radeon_device *rdev)
+{
+	if (rdev->gart.ready) {
+		/* unbind pages */
+		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
+	}
+	rdev->gart.ready = false;
+	vfree(rdev->gart.pages);
+	vfree(rdev->gart.pages_entry);
+	rdev->gart.pages = NULL;
+	rdev->gart.pages_entry = NULL;
+
+	radeon_dummy_page_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
new file mode 100644
index 0000000..27d8e7d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+void radeon_gem_object_free(struct drm_gem_object *gobj)
+{
+	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
+
+	if (robj) {
+		radeon_mn_unregister(robj);
+		radeon_bo_unref(&robj);
+	}
+}
+
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
+				int alignment, int initial_domain,
+				u32 flags, bool kernel,
+				struct drm_gem_object **obj)
+{
+	struct radeon_bo *robj;
+	unsigned long max_size;
+	int r;
+
+	*obj = NULL;
+	/* At least align on page size */
+	if (alignment < PAGE_SIZE) {
+		alignment = PAGE_SIZE;
+	}
+
+	/* Maximum bo size is the unpinned gtt size since we use the gtt to
+	 * handle vram to system pool migrations.
+	 */
+	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
+	if (size > max_size) {
+		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
+			  size >> 20, max_size >> 20);
+		return -ENOMEM;
+	}
+
+retry:
+	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
+			     flags, NULL, NULL, &robj);
+	if (r) {
+		if (r != -ERESTARTSYS) {
+			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+				initial_domain |= RADEON_GEM_DOMAIN_GTT;
+				goto retry;
+			}
+			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		}
+		return r;
+	}
+	*obj = &robj->gem_base;
+	robj->pid = task_pid_nr(current);
+
+	mutex_lock(&rdev->gem.mutex);
+	list_add_tail(&robj->list, &rdev->gem.objects);
+	mutex_unlock(&rdev->gem.mutex);
+
+	return 0;
+}
+
+static int radeon_gem_set_domain(struct drm_gem_object *gobj,
+			  uint32_t rdomain, uint32_t wdomain)
+{
+	struct radeon_bo *robj;
+	uint32_t domain;
+	long r;
+
+	/* FIXME: reeimplement */
+	robj = gem_to_radeon_bo(gobj);
+	/* work out where to validate the buffer to */
+	domain = wdomain;
+	if (!domain) {
+		domain = rdomain;
+	}
+	if (!domain) {
+		/* Do nothings */
+		pr_warn("Set domain without domain !\n");
+		return 0;
+	}
+	if (domain == RADEON_GEM_DOMAIN_CPU) {
+		/* Asking for cpu access wait for object idle */
+		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+		if (!r)
+			r = -EBUSY;
+
+		if (r < 0 && r != -EINTR) {
+			pr_err("Failed to wait for object: %li\n", r);
+			return r;
+		}
+	}
+	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
+		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int radeon_gem_init(struct radeon_device *rdev)
+{
+	INIT_LIST_HEAD(&rdev->gem.objects);
+	return 0;
+}
+
+void radeon_gem_fini(struct radeon_device *rdev)
+{
+	radeon_bo_force_delete(rdev);
+}
+
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+	struct radeon_device *rdev = rbo->rdev;
+	struct radeon_fpriv *fpriv = file_priv->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	if ((rdev->family < CHIP_CAYMAN) ||
+	    (!rdev->accel_working)) {
+		return 0;
+	}
+
+	r = radeon_bo_reserve(rbo, false);
+	if (r) {
+		return r;
+	}
+
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (!bo_va) {
+		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
+	} else {
+		++bo_va->ref_count;
+	}
+	radeon_bo_unreserve(rbo);
+
+	return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+			     struct drm_file *file_priv)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+	struct radeon_device *rdev = rbo->rdev;
+	struct radeon_fpriv *fpriv = file_priv->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	if ((rdev->family < CHIP_CAYMAN) ||
+	    (!rdev->accel_working)) {
+		return;
+	}
+
+	r = radeon_bo_reserve(rbo, true);
+	if (r) {
+		dev_err(rdev->dev, "leaking bo va because "
+			"we fail to reserve bo (%d)\n", r);
+		return;
+	}
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (bo_va) {
+		if (--bo_va->ref_count == 0) {
+			radeon_vm_bo_rmv(rdev, bo_va);
+		}
+	}
+	radeon_bo_unreserve(rbo);
+}
+
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		r = radeon_gpu_reset(rdev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+/*
+ * GEM ioctls.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_info *args = data;
+	struct ttm_mem_type_manager *man;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+
+	args->vram_size = (u64)man->size << PAGE_SHIFT;
+	args->vram_visible = rdev->mc.visible_vram_size;
+	args->vram_visible -= rdev->vram_pin_size;
+	args->gart_size = rdev->mc.gtt_size;
+	args->gart_size -= rdev->gart_pin_size;
+
+	return 0;
+}
+
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *filp)
+{
+	/* TODO: implement */
+	DRM_ERROR("unimplemented %s\n", __func__);
+	return -ENOSYS;
+}
+
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp)
+{
+	/* TODO: implement */
+	DRM_ERROR("unimplemented %s\n", __func__);
+	return -ENOSYS;
+}
+
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_create *args = data;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	int r;
+
+	down_read(&rdev->exclusive_lock);
+	/* create a gem object to contain this object in */
+	args->size = roundup(args->size, PAGE_SIZE);
+	r = radeon_gem_object_create(rdev, args->size, args->alignment,
+				     args->initial_domain, args->flags,
+				     false, &gobj);
+	if (r) {
+		up_read(&rdev->exclusive_lock);
+		r = radeon_gem_handle_lockup(rdev, r);
+		return r;
+	}
+	r = drm_gem_handle_create(filp, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_put_unlocked(gobj);
+	if (r) {
+		up_read(&rdev->exclusive_lock);
+		r = radeon_gem_handle_lockup(rdev, r);
+		return r;
+	}
+	args->handle = handle;
+	up_read(&rdev->exclusive_lock);
+	return 0;
+}
+
+int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *filp)
+{
+	struct ttm_operation_ctx ctx = { true, false };
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_userptr *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *bo;
+	uint32_t handle;
+	int r;
+
+	if (offset_in_page(args->addr | args->size))
+		return -EINVAL;
+
+	/* reject unknown flag values */
+	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
+	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
+	    RADEON_GEM_USERPTR_REGISTER))
+		return -EINVAL;
+
+	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
+		/* readonly pages not tested on older hardware */
+		if (rdev->family < CHIP_R600)
+			return -EINVAL;
+
+	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
+		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
+
+		/* if we want to write to it we must require anonymous
+		   memory and install a MMU notifier */
+		return -EACCES;
+	}
+
+	down_read(&rdev->exclusive_lock);
+
+	/* create a gem object to contain this object in */
+	r = radeon_gem_object_create(rdev, args->size, 0,
+				     RADEON_GEM_DOMAIN_CPU, 0,
+				     false, &gobj);
+	if (r)
+		goto handle_lockup;
+
+	bo = gem_to_radeon_bo(gobj);
+	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+	if (r)
+		goto release_object;
+
+	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
+		r = radeon_mn_register(bo, args->addr);
+		if (r)
+			goto release_object;
+	}
+
+	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
+		down_read(&current->mm->mmap_sem);
+		r = radeon_bo_reserve(bo, true);
+		if (r) {
+			up_read(&current->mm->mmap_sem);
+			goto release_object;
+		}
+
+		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+		radeon_bo_unreserve(bo);
+		up_read(&current->mm->mmap_sem);
+		if (r)
+			goto release_object;
+	}
+
+	r = drm_gem_handle_create(filp, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_put_unlocked(gobj);
+	if (r)
+		goto handle_lockup;
+
+	args->handle = handle;
+	up_read(&rdev->exclusive_lock);
+	return 0;
+
+release_object:
+	drm_gem_object_put_unlocked(gobj);
+
+handle_lockup:
+	up_read(&rdev->exclusive_lock);
+	r = radeon_gem_handle_lockup(rdev, r);
+
+	return r;
+}
+
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	/* transition the BO to a domain -
+	 * just validate the BO into a certain domain */
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_set_domain *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+
+	/* for now if someone requests domain CPU -
+	 * just make sure the buffer is finished with */
+	down_read(&rdev->exclusive_lock);
+
+	/* just do a BO wait for now */
+	gobj = drm_gem_object_lookup(filp, args->handle);
+	if (gobj == NULL) {
+		up_read(&rdev->exclusive_lock);
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+
+	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
+
+	drm_gem_object_put_unlocked(gobj);
+	up_read(&rdev->exclusive_lock);
+	r = radeon_gem_handle_lockup(robj->rdev, r);
+	return r;
+}
+
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+
+	gobj = drm_gem_object_lookup(filp, handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
+		drm_gem_object_put_unlocked(gobj);
+		return -EPERM;
+	}
+	*offset_p = radeon_bo_mmap_offset(robj);
+	drm_gem_object_put_unlocked(gobj);
+	return 0;
+}
+
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct drm_radeon_gem_mmap *args = data;
+
+	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct drm_radeon_gem_busy *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+	uint32_t cur_placement = 0;
+
+	gobj = drm_gem_object_lookup(filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+
+	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+	if (r == 0)
+		r = -EBUSY;
+	else
+		r = 0;
+
+	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+	args->domain = radeon_mem_type_to_domain(cur_placement);
+	drm_gem_object_put_unlocked(gobj);
+	return r;
+}
+
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_wait_idle *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r = 0;
+	uint32_t cur_placement = 0;
+	long ret;
+
+	gobj = drm_gem_object_lookup(filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+
+	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+	if (ret == 0)
+		r = -EBUSY;
+	else if (ret < 0)
+		r = ret;
+
+	/* Flush HDP cache via MMIO if necessary */
+	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+	if (rdev->asic->mmio_hdp_flush &&
+	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+		robj->rdev->asic->mmio_hdp_flush(rdev);
+	drm_gem_object_put_unlocked(gobj);
+	r = radeon_gem_handle_lockup(rdev, r);
+	return r;
+}
+
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct drm_radeon_gem_set_tiling *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r = 0;
+
+	DRM_DEBUG("%d \n", args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	robj = gem_to_radeon_bo(gobj);
+	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+	drm_gem_object_put_unlocked(gobj);
+	return r;
+}
+
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct drm_radeon_gem_get_tiling *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *rbo;
+	int r = 0;
+
+	DRM_DEBUG("\n");
+	gobj = drm_gem_object_lookup(filp, args->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	rbo = gem_to_radeon_bo(gobj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		goto out;
+	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+	radeon_bo_unreserve(rbo);
+out:
+	drm_gem_object_put_unlocked(gobj);
+	return r;
+}
+
+/**
+ * radeon_gem_va_update_vm -update the bo_va in its VM
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to update
+ *
+ * Update the bo_va directly after setting it's address. Errors are not
+ * vital here, so they are not reported back to userspace.
+ */
+static void radeon_gem_va_update_vm(struct radeon_device *rdev,
+				    struct radeon_bo_va *bo_va)
+{
+	struct ttm_validate_buffer tv, *entry;
+	struct radeon_bo_list *vm_bos;
+	struct ww_acquire_ctx ticket;
+	struct list_head list;
+	unsigned domain;
+	int r;
+
+	INIT_LIST_HEAD(&list);
+
+	tv.bo = &bo_va->bo->tbo;
+	tv.shared = true;
+	list_add(&tv.head, &list);
+
+	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
+	if (!vm_bos)
+		return;
+
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+	if (r)
+		goto error_free;
+
+	list_for_each_entry(entry, &list, head) {
+		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+		/* if anything is swapped out don't swap it in here,
+		   just abort and wait for the next CS */
+		if (domain == RADEON_GEM_DOMAIN_CPU)
+			goto error_unreserve;
+	}
+
+	mutex_lock(&bo_va->vm->mutex);
+	r = radeon_vm_clear_freed(rdev, bo_va->vm);
+	if (r)
+		goto error_unlock;
+
+	if (bo_va->it.start)
+		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+
+error_unlock:
+	mutex_unlock(&bo_va->vm->mutex);
+
+error_unreserve:
+	ttm_eu_backoff_reservation(&ticket, &list);
+
+error_free:
+	kvfree(vm_bos);
+
+	if (r && r != -ERESTARTSYS)
+		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+}
+
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct drm_radeon_gem_va *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_fpriv *fpriv = filp->driver_priv;
+	struct radeon_bo *rbo;
+	struct radeon_bo_va *bo_va;
+	u32 invalid_flags;
+	int r = 0;
+
+	if (!rdev->vm_manager.enabled) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -ENOTTY;
+	}
+
+	/* !! DONT REMOVE !!
+	 * We don't support vm_id yet, to be sure we don't have have broken
+	 * userspace, reject anyone trying to use non 0 value thus moving
+	 * forward we can use those fields without breaking existant userspace
+	 */
+	if (args->vm_id) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	if (args->offset < RADEON_VA_RESERVED_SIZE) {
+		dev_err(&dev->pdev->dev,
+			"offset 0x%lX is in reserved area 0x%X\n",
+			(unsigned long)args->offset,
+			RADEON_VA_RESERVED_SIZE);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	/* don't remove, we need to enforce userspace to set the snooped flag
+	 * otherwise we will endup with broken userspace and we won't be able
+	 * to enable this feature without adding new interface
+	 */
+	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+	if ((args->flags & invalid_flags)) {
+		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+			args->flags, invalid_flags);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	switch (args->operation) {
+	case RADEON_VA_MAP:
+	case RADEON_VA_UNMAP:
+		break;
+	default:
+		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+			args->operation);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	gobj = drm_gem_object_lookup(filp, args->handle);
+	if (gobj == NULL) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -ENOENT;
+	}
+	rbo = gem_to_radeon_bo(gobj);
+	r = radeon_bo_reserve(rbo, false);
+	if (r) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		drm_gem_object_put_unlocked(gobj);
+		return r;
+	}
+	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
+	if (!bo_va) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		radeon_bo_unreserve(rbo);
+		drm_gem_object_put_unlocked(gobj);
+		return -ENOENT;
+	}
+
+	switch (args->operation) {
+	case RADEON_VA_MAP:
+		if (bo_va->it.start) {
+			args->operation = RADEON_VA_RESULT_VA_EXIST;
+			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
+			radeon_bo_unreserve(rbo);
+			goto out;
+		}
+		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
+		break;
+	case RADEON_VA_UNMAP:
+		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
+		break;
+	default:
+		break;
+	}
+	if (!r)
+		radeon_gem_va_update_vm(rdev, bo_va);
+	args->operation = RADEON_VA_RESULT_OK;
+	if (r) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+	}
+out:
+	drm_gem_object_put_unlocked(gobj);
+	return r;
+}
+
+int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *filp)
+{
+	struct drm_radeon_gem_op *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+
+	gobj = drm_gem_object_lookup(filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+
+	r = -EPERM;
+	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
+		goto out;
+
+	r = radeon_bo_reserve(robj, false);
+	if (unlikely(r))
+		goto out;
+
+	switch (args->op) {
+	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
+		args->value = robj->initial_domain;
+		break;
+	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
+		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
+						      RADEON_GEM_DOMAIN_GTT |
+						      RADEON_GEM_DOMAIN_CPU);
+		break;
+	default:
+		r = -EINVAL;
+	}
+
+	radeon_bo_unreserve(robj);
+out:
+	drm_gem_object_put_unlocked(gobj);
+	return r;
+}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	int r;
+
+	args->pitch = radeon_align_pitch(rdev, args->width,
+					 DIV_ROUND_UP(args->bpp, 8), 0);
+	args->size = args->pitch * args->height;
+	args->size = ALIGN(args->size, PAGE_SIZE);
+
+	r = radeon_gem_object_create(rdev, args->size, 0,
+				     RADEON_GEM_DOMAIN_VRAM, 0,
+				     false, &gobj);
+	if (r)
+		return -ENOMEM;
+
+	r = drm_gem_handle_create(file_priv, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_put_unlocked(gobj);
+	if (r) {
+		return r;
+	}
+	args->handle = handle;
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_bo *rbo;
+	unsigned i = 0;
+
+	mutex_lock(&rdev->gem.mutex);
+	list_for_each_entry(rbo, &rdev->gem.objects, list) {
+		unsigned domain;
+		const char *placement;
+
+		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+		switch (domain) {
+		case RADEON_GEM_DOMAIN_VRAM:
+			placement = "VRAM";
+			break;
+		case RADEON_GEM_DOMAIN_GTT:
+			placement = " GTT";
+			break;
+		case RADEON_GEM_DOMAIN_CPU:
+		default:
+			placement = " CPU";
+			break;
+		}
+		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
+			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
+			   placement, (unsigned long)rbo->pid);
+		i++;
+	}
+	mutex_unlock(&rdev->gem.mutex);
+	return 0;
+}
+
+static struct drm_info_list radeon_debugfs_gem_list[] = {
+	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
+};
+#endif
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
+#endif
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
new file mode 100644
index 0000000..29f7817
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -0,0 +1,1156 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+				   struct i2c_msg *msgs, int num);
+extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
+
+/**
+ * radeon_ddc_probe
+ *
+ */
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+{
+	u8 out = 0x0;
+	u8 buf[8];
+	int ret;
+	struct i2c_msg msgs[] = {
+		{
+			.addr = DDC_ADDR,
+			.flags = 0,
+			.len = 1,
+			.buf = &out,
+		},
+		{
+			.addr = DDC_ADDR,
+			.flags = I2C_M_RD,
+			.len = 8,
+			.buf = buf,
+		}
+	};
+
+	/* on hw with routers, select right port */
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
+
+	if (use_aux) {
+		ret = i2c_transfer(&radeon_connector->ddc_bus->aux.ddc, msgs, 2);
+	} else {
+		ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+	}
+
+	if (ret != 2)
+		/* Couldn't find an accessible DDC on this connector */
+		return false;
+	/* Probe also for valid EDID header
+	 * EDID header starts with:
+	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+	 * Only the first 6 bytes must be valid as
+	 * drm_edid_block_valid() can fix the last 2 bytes */
+	if (drm_edid_header_is_valid(buf) < 6) {
+		/* Couldn't find an accessible EDID on this
+		 * connector */
+		return false;
+	}
+	return true;
+}
+
+/* bit banging i2c */
+
+static int pre_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	mutex_lock(&i2c->mutex);
+
+	/* RV410 appears to have a bug where the hw i2c in reset
+	 * holds the i2c port in a bad state - switch hw i2c away before
+	 * doing DDC - do this for all r200s/r300s/r400s for safety sake
+	 */
+	if (rec->hw_capable) {
+		if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+			u32 reg;
+
+			if (rdev->family >= CHIP_RV350)
+				reg = RADEON_GPIO_MONID;
+			else if ((rdev->family == CHIP_R300) ||
+				 (rdev->family == CHIP_R350))
+				reg = RADEON_GPIO_DVI_DDC;
+			else
+				reg = RADEON_GPIO_CRT2_DDC;
+
+			mutex_lock(&rdev->dc_hw_i2c_mutex);
+			if (rec->a_clk_reg == reg) {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+			} else {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+			}
+			mutex_unlock(&rdev->dc_hw_i2c_mutex);
+		}
+	}
+
+	/* switch the pads to ddc mode */
+	if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+		temp = RREG32(rec->mask_clk_reg);
+		temp &= ~(1 << 16);
+		WREG32(rec->mask_clk_reg, temp);
+	}
+
+	/* clear the output pin values */
+	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, temp);
+
+	temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, temp);
+
+	/* set the pins to input */
+	temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, temp);
+
+	temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, temp);
+
+	/* mask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+
+	return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	/* unmask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+
+	mutex_unlock(&i2c->mutex);
+}
+
+static int get_clock(void *i2c_priv)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_clk_reg);
+	val &= rec->y_clk_mask;
+
+	return (val != 0);
+}
+
+
+static int get_data(void *i2c_priv)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_data_reg);
+	val &= rec->y_data_mask;
+
+	return (val != 0);
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	val |= clock ? 0 : rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, val);
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	val |= data ? 0 : rec->en_data_mask;
+	WREG32(rec->en_data_reg, val);
+}
+
+/* hw i2c */
+
+static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
+{
+	u32 sclk = rdev->pm.current_sclk;
+	u32 prescale = 0;
+	u32 nm;
+	u8 n, m, loop;
+	int i2c_clock;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+		i2c_clock = 60;
+		nm = (sclk * 10) / (i2c_clock * 4);
+		for (loop = 1; loop < 255; loop++) {
+			if ((nm / loop) < loop)
+				break;
+		}
+		n = loop - 1;
+		m = loop - 2;
+		prescale = m | (n << 8);
+		break;
+	case CHIP_RV380:
+	case CHIP_RS400:
+	case CHIP_RS480:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+		prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+		break;
+	case CHIP_RS600:
+	case CHIP_RS690:
+	case CHIP_RS740:
+		/* todo */
+		break;
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		i2c_clock = 50;
+		if (rdev->family == CHIP_R520)
+			prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
+		else
+			prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV670:
+		/* todo */
+		break;
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		/* todo */
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* todo */
+		break;
+	default:
+		DRM_ERROR("i2c: unhandled radeon chip\n");
+		break;
+	}
+	return prescale;
+}
+
+
+/* hw i2c engine for r1xx-4xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			    struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	struct i2c_msg *p;
+	int i, j, k, ret = num;
+	u32 prescale;
+	u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
+	u32 tmp, reg;
+
+	mutex_lock(&rdev->dc_hw_i2c_mutex);
+	/* take the pm lock since we need a constant sclk */
+	mutex_lock(&rdev->pm.mutex);
+
+	prescale = radeon_get_i2c_prescale(rdev);
+
+	reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+	       RADEON_I2C_DRIVE_EN |
+	       RADEON_I2C_START |
+	       RADEON_I2C_STOP |
+	       RADEON_I2C_GO);
+
+	if (rdev->is_atom_bios) {
+		tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+		WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+	}
+
+	if (rec->mm_i2c) {
+		i2c_cntl_0 = RADEON_I2C_CNTL_0;
+		i2c_cntl_1 = RADEON_I2C_CNTL_1;
+		i2c_data = RADEON_I2C_DATA;
+	} else {
+		i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
+		i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
+		i2c_data = RADEON_DVI_I2C_DATA;
+
+		switch (rdev->family) {
+		case CHIP_R100:
+		case CHIP_RV100:
+		case CHIP_RS100:
+		case CHIP_RV200:
+		case CHIP_RS200:
+		case CHIP_RS300:
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				/* no gpio select bit */
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_R200:
+			/* only bit 4 on r200 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_MONID:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_RV250:
+		case CHIP_RV280:
+			/* bits 3 and 4 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+				break;
+			case RADEON_GPIO_CRT2_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_R300:
+		case CHIP_R350:
+			/* only bit 4 on r300/r350 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_RV350:
+		case CHIP_RV380:
+		case CHIP_R420:
+		case CHIP_R423:
+		case CHIP_RV410:
+		case CHIP_RS400:
+		case CHIP_RS480:
+			/* bits 3 and 4 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+				break;
+			case RADEON_GPIO_MONID:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		default:
+			DRM_ERROR("unsupported asic\n");
+			ret = -EINVAL;
+			goto done;
+			break;
+		}
+	}
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+				    RADEON_I2C_NACK |
+				    RADEON_I2C_HALT |
+				    RADEON_I2C_SOFT_RST));
+		WREG32(i2c_data, (p->addr << 1) & 0xff);
+		WREG32(i2c_data, 0);
+		WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+				    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+				    RADEON_I2C_EN |
+				    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+		WREG32(i2c_cntl_0, reg);
+		for (k = 0; k < 32; k++) {
+			udelay(10);
+			tmp = RREG32(i2c_cntl_0);
+			if (tmp & RADEON_I2C_GO)
+				continue;
+			tmp = RREG32(i2c_cntl_0);
+			if (tmp & RADEON_I2C_DONE)
+				break;
+			else {
+				DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+				WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+				ret = -EIO;
+				goto done;
+			}
+		}
+		goto done;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		for (j = 0; j < p->len; j++) {
+			if (p->flags & I2C_M_RD) {
+				WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+						    RADEON_I2C_NACK |
+						    RADEON_I2C_HALT |
+						    RADEON_I2C_SOFT_RST));
+				WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
+				WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+						    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+						    RADEON_I2C_EN |
+						    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+				WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
+				for (k = 0; k < 32; k++) {
+					udelay(10);
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_GO)
+						continue;
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+						WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				p->buf[j] = RREG32(i2c_data) & 0xff;
+			} else {
+				WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+						    RADEON_I2C_NACK |
+						    RADEON_I2C_HALT |
+						    RADEON_I2C_SOFT_RST));
+				WREG32(i2c_data, (p->addr << 1) & 0xff);
+				WREG32(i2c_data, p->buf[j]);
+				WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+						    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+						    RADEON_I2C_EN |
+						    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+				WREG32(i2c_cntl_0, reg);
+				for (k = 0; k < 32; k++) {
+					udelay(10);
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_GO)
+						continue;
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+						WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+			}
+		}
+	}
+
+done:
+	WREG32(i2c_cntl_0, 0);
+	WREG32(i2c_cntl_1, 0);
+	WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+			    RADEON_I2C_NACK |
+			    RADEON_I2C_HALT |
+			    RADEON_I2C_SOFT_RST));
+
+	if (rdev->is_atom_bios) {
+		tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+		tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+		WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+	}
+
+	mutex_unlock(&rdev->pm.mutex);
+	mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+	return ret;
+}
+
+/* hw i2c engine for r5xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			    struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	struct i2c_msg *p;
+	int i, j, remaining, current_count, buffer_offset, ret = num;
+	u32 prescale;
+	u32 tmp, reg;
+	u32 saved1, saved2;
+
+	mutex_lock(&rdev->dc_hw_i2c_mutex);
+	/* take the pm lock since we need a constant sclk */
+	mutex_lock(&rdev->pm.mutex);
+
+	prescale = radeon_get_i2c_prescale(rdev);
+
+	/* clear gpio mask bits */
+	tmp = RREG32(rec->mask_clk_reg);
+	tmp &= ~rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, tmp);
+	tmp = RREG32(rec->mask_clk_reg);
+
+	tmp = RREG32(rec->mask_data_reg);
+	tmp &= ~rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, tmp);
+	tmp = RREG32(rec->mask_data_reg);
+
+	/* clear pin values */
+	tmp = RREG32(rec->a_clk_reg);
+	tmp &= ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, tmp);
+	tmp = RREG32(rec->a_clk_reg);
+
+	tmp = RREG32(rec->a_data_reg);
+	tmp &= ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, tmp);
+	tmp = RREG32(rec->a_data_reg);
+
+	/* set the pins to input */
+	tmp = RREG32(rec->en_clk_reg);
+	tmp &= ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, tmp);
+	tmp = RREG32(rec->en_clk_reg);
+
+	tmp = RREG32(rec->en_data_reg);
+	tmp &= ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, tmp);
+	tmp = RREG32(rec->en_data_reg);
+
+	/* */
+	tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+	WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+	saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
+	saved2 = RREG32(0x494);
+	WREG32(0x494, saved2 | 0x1);
+
+	WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
+	for (i = 0; i < 50; i++) {
+		udelay(1);
+		if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
+			break;
+	}
+	if (i == 50) {
+		DRM_ERROR("failed to get i2c bus\n");
+		ret = -EBUSY;
+		goto done;
+	}
+
+	reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
+	switch (rec->mask_clk_reg) {
+	case AVIVO_DC_GPIO_DDC1_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
+		break;
+	case AVIVO_DC_GPIO_DDC2_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
+		break;
+	case AVIVO_DC_GPIO_DDC3_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
+		break;
+	default:
+		DRM_ERROR("gpio not supported with hw i2c\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+					      AVIVO_DC_I2C_NACK |
+					      AVIVO_DC_I2C_HALT));
+		WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+		udelay(1);
+		WREG32(AVIVO_DC_I2C_RESET, 0);
+
+		WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+		WREG32(AVIVO_DC_I2C_DATA, 0);
+
+		WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+		WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+					       AVIVO_DC_I2C_DATA_COUNT(1) |
+					       (prescale << 16)));
+		WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+		WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+		for (j = 0; j < 200; j++) {
+			udelay(50);
+			tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+			if (tmp & AVIVO_DC_I2C_GO)
+				continue;
+			tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+			if (tmp & AVIVO_DC_I2C_DONE)
+				break;
+			else {
+				DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+				ret = -EIO;
+				goto done;
+			}
+		}
+		goto done;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		remaining = p->len;
+		buffer_offset = 0;
+		if (p->flags & I2C_M_RD) {
+			while (remaining) {
+				if (remaining > 15)
+					current_count = 15;
+				else
+					current_count = remaining;
+				WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+							      AVIVO_DC_I2C_NACK |
+							      AVIVO_DC_I2C_HALT));
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+				udelay(1);
+				WREG32(AVIVO_DC_I2C_RESET, 0);
+
+				WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
+				WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+				WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+							       AVIVO_DC_I2C_DATA_COUNT(current_count) |
+							       (prescale << 16)));
+				WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
+				WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+				for (j = 0; j < 200; j++) {
+					udelay(50);
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_GO)
+						continue;
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+						WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				for (j = 0; j < current_count; j++)
+					p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
+				remaining -= current_count;
+				buffer_offset += current_count;
+			}
+		} else {
+			while (remaining) {
+				if (remaining > 15)
+					current_count = 15;
+				else
+					current_count = remaining;
+				WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+							      AVIVO_DC_I2C_NACK |
+							      AVIVO_DC_I2C_HALT));
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+				udelay(1);
+				WREG32(AVIVO_DC_I2C_RESET, 0);
+
+				WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+				for (j = 0; j < current_count; j++)
+					WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
+
+				WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+				WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+							       AVIVO_DC_I2C_DATA_COUNT(current_count) |
+							       (prescale << 16)));
+				WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+				WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+				for (j = 0; j < 200; j++) {
+					udelay(50);
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_GO)
+						continue;
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+						WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				remaining -= current_count;
+				buffer_offset += current_count;
+			}
+		}
+	}
+
+done:
+	WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+				      AVIVO_DC_I2C_NACK |
+				      AVIVO_DC_I2C_HALT));
+	WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+	udelay(1);
+	WREG32(AVIVO_DC_I2C_RESET, 0);
+
+	WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
+	WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
+	WREG32(0x494, saved2);
+	tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+	tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+	WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+
+	mutex_unlock(&rdev->pm.mutex);
+	mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+	return ret;
+}
+
+static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			      struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	int ret = 0;
+
+	mutex_lock(&i2c->mutex);
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+	case CHIP_RV380:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+	case CHIP_RS400:
+	case CHIP_RS480:
+		ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+		break;
+	case CHIP_RS600:
+	case CHIP_RS690:
+	case CHIP_RS740:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		if (rec->mm_i2c)
+			ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+		else
+			ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV670:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* XXX fill in hw i2c implementation */
+		break;
+	default:
+		DRM_ERROR("i2c: unhandled radeon chip\n");
+		ret = -EIO;
+		break;
+	}
+
+	mutex_unlock(&i2c->mutex);
+
+	return ret;
+}
+
+static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm radeon_i2c_algo = {
+	.master_xfer = radeon_hw_i2c_xfer,
+	.functionality = radeon_hw_i2c_func,
+};
+
+static const struct i2c_algorithm radeon_atom_i2c_algo = {
+	.master_xfer = radeon_atom_hw_i2c_xfer,
+	.functionality = radeon_atom_hw_i2c_func,
+};
+
+struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+					  struct radeon_i2c_bus_rec *rec,
+					  const char *name)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_chan *i2c;
+	int ret;
+
+	/* don't add the mm_i2c bus unless hw_i2c is enabled */
+	if (rec->mm_i2c && (radeon_hw_i2c == 0))
+		return NULL;
+
+	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+	if (i2c == NULL)
+		return NULL;
+
+	i2c->rec = *rec;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	mutex_init(&i2c->mutex);
+	if (rec->mm_i2c ||
+	    (rec->hw_capable &&
+	     radeon_hw_i2c &&
+	     ((rdev->family <= CHIP_RS480) ||
+	      ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
+		/* set the radeon hw i2c adapter */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "Radeon i2c hw bus %s", name);
+		i2c->adapter.algo = &radeon_i2c_algo;
+		ret = i2c_add_adapter(&i2c->adapter);
+		if (ret)
+			goto out_free;
+	} else if (rec->hw_capable &&
+		   radeon_hw_i2c &&
+		   ASIC_IS_DCE3(rdev)) {
+		/* hw i2c using atom */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "Radeon i2c hw bus %s", name);
+		i2c->adapter.algo = &radeon_atom_i2c_algo;
+		ret = i2c_add_adapter(&i2c->adapter);
+		if (ret)
+			goto out_free;
+	} else {
+		/* set the radeon bit adapter */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "Radeon i2c bit bus %s", name);
+		i2c->adapter.algo_data = &i2c->bit;
+		i2c->bit.pre_xfer = pre_xfer;
+		i2c->bit.post_xfer = post_xfer;
+		i2c->bit.setsda = set_data;
+		i2c->bit.setscl = set_clock;
+		i2c->bit.getsda = get_data;
+		i2c->bit.getscl = get_clock;
+		i2c->bit.udelay = 10;
+		i2c->bit.timeout = usecs_to_jiffies(2200);	/* from VESA */
+		i2c->bit.data = i2c;
+		ret = i2c_bit_add_bus(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register bit i2c %s\n", name);
+			goto out_free;
+		}
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+
+}
+
+void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	WARN_ON(i2c->has_aux);
+	i2c_del_adapter(&i2c->adapter);
+	kfree(i2c);
+}
+
+/* Add the default buses */
+void radeon_i2c_init(struct radeon_device *rdev)
+{
+	if (radeon_hw_i2c)
+		DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_i2c_init(rdev);
+	else
+		radeon_combios_i2c_init(rdev);
+}
+
+/* remove all the buses */
+void radeon_i2c_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (rdev->i2c_bus[i]) {
+			radeon_i2c_destroy(rdev->i2c_bus[i]);
+			rdev->i2c_bus[i] = NULL;
+		}
+	}
+}
+
+/* Add additional buses */
+void radeon_i2c_add(struct radeon_device *rdev,
+		    struct radeon_i2c_bus_rec *rec,
+		    const char *name)
+{
+	struct drm_device *dev = rdev->ddev;
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (!rdev->i2c_bus[i]) {
+			rdev->i2c_bus[i] = radeon_i2c_create(dev, rec, name);
+			return;
+		}
+	}
+}
+
+/* looks up bus based on id */
+struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+					  struct radeon_i2c_bus_rec *i2c_bus)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (rdev->i2c_bus[i] &&
+		    (rdev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
+			return rdev->i2c_bus[i];
+		}
+	}
+	return NULL;
+}
+
+void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+			 u8 slave_addr,
+			 u8 addr,
+			 u8 *val)
+{
+	u8 out_buf[2];
+	u8 in_buf[2];
+	struct i2c_msg msgs[] = {
+		{
+			.addr = slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+		*val = in_buf[0];
+		DRM_DEBUG("val = 0x%02x\n", *val);
+	} else {
+		DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
+			  addr, *val);
+	}
+}
+
+void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
+			 u8 slave_addr,
+			 u8 addr,
+			 u8 val)
+{
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = val;
+
+	if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+		DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
+			  addr, val);
+}
+
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.ddc_valid)
+		return;
+
+	if (!radeon_connector->router_bus)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	val |= radeon_connector->router.ddc_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.cd_valid)
+		return;
+
+	if (!radeon_connector->router_bus)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	val |= radeon_connector->router.cd_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
new file mode 100644
index 0000000..92ce0e5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ *          Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored.  You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them.  Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics).  IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size)
+{
+	int r;
+
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
+	if (r) {
+		dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+		return r;
+	}
+
+	radeon_sync_create(&ib->sync);
+
+	ib->ring = ring;
+	ib->fence = NULL;
+	ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+	ib->vm = vm;
+	if (vm) {
+		/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+		 * space and soffset is the offset inside the pool bo
+		 */
+		ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+	} else {
+		ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+	}
+	ib->is_const_ib = false;
+
+	return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	radeon_sync_free(rdev, &ib->sync, ib->fence);
+	radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+	radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ * @hdp_flush: Whether or not to perform an HDP cache flush
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine).  Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed.  To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE.  If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+		       struct radeon_ib *const_ib, bool hdp_flush)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	int r = 0;
+
+	if (!ib->length_dw || !ring->ready) {
+		/* TODO: Nothings in the ib we should report. */
+		dev_err(rdev->dev, "couldn't schedule ib\n");
+		return -EINVAL;
+	}
+
+	/* 64 dwords should be enough for fence too */
+	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
+	if (r) {
+		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+		return r;
+	}
+
+	/* grab a vm id if necessary */
+	if (ib->vm) {
+		struct radeon_fence *vm_id_fence;
+		vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
+		radeon_sync_fence(&ib->sync, vm_id_fence);
+	}
+
+	/* sync with other rings */
+	r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
+	if (r) {
+		dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	if (ib->vm)
+		radeon_vm_flush(rdev, ib->vm, ib->ring,
+				ib->sync.last_vm_update);
+
+	if (const_ib) {
+		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+		radeon_sync_free(rdev, &const_ib->sync, NULL);
+	}
+	radeon_ring_ib_execute(rdev, ib->ring, ib);
+	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+	if (r) {
+		dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+	if (const_ib) {
+		const_ib->fence = radeon_fence_ref(ib->fence);
+	}
+
+	if (ib->vm)
+		radeon_vm_fence(rdev, ib->vm, ib->fence);
+
+	radeon_ring_unlock_commit(rdev, ring, hdp_flush);
+	return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->ib_pool_ready) {
+		return 0;
+	}
+
+	if (rdev->family >= CHIP_BONAIRE) {
+		r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+					      RADEON_IB_POOL_SIZE*64*1024,
+					      RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_GTT,
+					      RADEON_GEM_GTT_WC);
+	} else {
+		/* Before CIK, it's better to stick to cacheable GTT due
+		 * to the command stream checking
+		 */
+		r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+					      RADEON_IB_POOL_SIZE*64*1024,
+					      RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_GTT, 0);
+	}
+	if (r) {
+		return r;
+	}
+
+	r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+	if (r) {
+		return r;
+	}
+
+	rdev->ib_pool_ready = true;
+	if (radeon_debugfs_sa_init(rdev)) {
+		dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+	}
+	return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+	if (rdev->ib_pool_ready) {
+		radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+		radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+		rdev->ib_pool_ready = false;
+	}
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+	unsigned i;
+	int r;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_ring *ring = &rdev->ring[i];
+
+		if (!ring->ready)
+			continue;
+
+		r = radeon_ib_test(rdev, i, ring);
+		if (r) {
+			radeon_fence_driver_force_completion(rdev, i);
+			ring->ready = false;
+			rdev->needs_reset = false;
+
+			if (i == RADEON_RING_TYPE_GFX_INDEX) {
+				/* oh, oh, that's really bad */
+				DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+				rdev->accel_working = false;
+				return r;
+
+			} else {
+				/* still not good, but we can live with it */
+				DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+			}
+		}
+	}
+	return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+	return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+	{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+	return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
new file mode 100644
index 0000000..afaf10d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+#include <linux/pm_runtime.h>
+
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
+/**
+ * radeon_driver_irq_handler_kms - irq handler for KMS
+ *
+ * @int irq, void *arg: args
+ *
+ * This is the irq handler for the radeon KMS driver (all asics).
+ * radeon_irq_process is a macro that points to the per-asic
+ * irq handler callback.
+ */
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct radeon_device *rdev = dev->dev_private;
+	irqreturn_t ret;
+
+	ret = radeon_irq_process(rdev);
+	if (ret == IRQ_HANDLED)
+		pm_runtime_mark_last_busy(dev->dev);
+	return ret;
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+/**
+ * radeon_hotplug_work_func - display hotplug work handler
+ *
+ * @work: work struct
+ *
+ * This is the hot plug event work handler (all asics).
+ * The work gets scheduled from the irq handler if there
+ * was a hot plug interrupt.  It walks the connector table
+ * and calls the hotplug handler for each one, then sends
+ * a drm hotplug event to alert userspace.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+	struct radeon_device *rdev = container_of(work, struct radeon_device,
+						  hotplug_work.work);
+	struct drm_device *dev = rdev->ddev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	/* we can race here at startup, some boards seem to trigger
+	 * hotplug irqs when they shouldn't. */
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	mutex_lock(&mode_config->mutex);
+	list_for_each_entry(connector, &mode_config->connector_list, head)
+		radeon_connector_hotplug(connector);
+	mutex_unlock(&mode_config->mutex);
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_helper_hpd_irq_event(dev);
+}
+
+static void radeon_dp_work_func(struct work_struct *work)
+{
+	struct radeon_device *rdev = container_of(work, struct radeon_device,
+						  dp_work);
+	struct drm_device *dev = rdev->ddev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	/* this should take a mutex */
+	list_for_each_entry(connector, &mode_config->connector_list, head)
+		radeon_connector_hotplug(connector);
+}
+/**
+ * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Gets the hw ready to enable irqs (all asics).
+ * This function disables all interrupt sources on the GPU.
+ */
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	unsigned i;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	/* Disable *all* interrupts */
+	for (i = 0; i < RADEON_NUM_RINGS; i++)
+		atomic_set(&rdev->irq.ring_int[i], 0);
+	rdev->irq.dpm_thermal = false;
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
+		atomic_set(&rdev->irq.pflip[i], 0);
+		rdev->irq.afmt[i] = false;
+	}
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	/* Clear bits */
+	radeon_irq_process(rdev);
+}
+
+/**
+ * radeon_driver_irq_postinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Handles stuff to be done after enabling irqs (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_AVIVO(rdev))
+		dev->max_vblank_count = 0x00ffffff;
+	else
+		dev->max_vblank_count = 0x001fffff;
+
+	return 0;
+}
+
+/**
+ * radeon_driver_irq_uninstall_kms - drm irq uninstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * This function disables all interrupt sources on the GPU (all asics).
+ */
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	unsigned i;
+
+	if (rdev == NULL) {
+		return;
+	}
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	/* Disable *all* interrupts */
+	for (i = 0; i < RADEON_NUM_RINGS; i++)
+		atomic_set(&rdev->irq.ring_int[i], 0);
+	rdev->irq.dpm_thermal = false;
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
+		atomic_set(&rdev->irq.pflip[i], 0);
+		rdev->irq.afmt[i] = false;
+	}
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_msi_ok - asic specific msi checks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Handles asic specific MSI checks to determine if
+ * MSIs should be enabled on a particular chip (all asics).
+ * Returns true if MSIs should be enabled, false if MSIs
+ * should not be enabled.
+ */
+static bool radeon_msi_ok(struct radeon_device *rdev)
+{
+	/* RV370/RV380 was first asic with MSI support */
+	if (rdev->family < CHIP_RV380)
+		return false;
+
+	/* MSIs don't work on AGP */
+	if (rdev->flags & RADEON_IS_AGP)
+		return false;
+
+	/*
+	 * Older chips have a HW limitation, they can only generate 40 bits
+	 * of address for "64-bit" MSIs which breaks on some platforms, notably
+	 * IBM POWER servers, so we limit them
+	 */
+	if (rdev->family < CHIP_BONAIRE) {
+		dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
+		rdev->pdev->no_64bit_msi = 1;
+	}
+
+	/* force MSI on */
+	if (radeon_msi == 1)
+		return true;
+	else if (radeon_msi == 0)
+		return false;
+
+	/* Quirks */
+	/* HP RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x103c) &&
+	    (rdev->pdev->subsystem_device == 0x30c2))
+		return true;
+
+	/* Dell RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x1028) &&
+	    (rdev->pdev->subsystem_device == 0x01fc))
+		return true;
+
+	/* Dell RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x1028) &&
+	    (rdev->pdev->subsystem_device == 0x01fd))
+		return true;
+
+	/* Gateway RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x107b) &&
+	    (rdev->pdev->subsystem_device == 0x0185))
+		return true;
+
+	/* try and enable MSIs by default on all RS690s */
+	if (rdev->family == CHIP_RS690)
+		return true;
+
+	/* RV515 seems to have MSI issues where it loses
+	 * MSI rearms occasionally. This leads to lockups and freezes.
+	 * disable it by default.
+	 */
+	if (rdev->family == CHIP_RV515)
+		return false;
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* APUs work fine with MSIs */
+		if (rdev->family >= CHIP_PALM)
+			return true;
+		/* lots of IGPs have problems with MSIs */
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * radeon_irq_kms_init - init driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_irq_kms_init(struct radeon_device *rdev)
+{
+	int r = 0;
+
+	spin_lock_init(&rdev->irq.lock);
+
+	/* Disable vblank irqs aggressively for power-saving */
+	rdev->ddev->vblank_disable_immediate = true;
+
+	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
+	if (r) {
+		return r;
+	}
+
+	/* enable msi */
+	rdev->msi_enabled = 0;
+
+	if (radeon_msi_ok(rdev)) {
+		int ret = pci_enable_msi(rdev->pdev);
+		if (!ret) {
+			rdev->msi_enabled = 1;
+			dev_info(rdev->dev, "radeon: using MSI.\n");
+		}
+	}
+
+	INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+	INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
+	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+
+	rdev->irq.installed = true;
+	r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
+	if (r) {
+		rdev->irq.installed = false;
+		flush_delayed_work(&rdev->hotplug_work);
+		return r;
+	}
+
+	DRM_INFO("radeon: irq initialized.\n");
+	return 0;
+}
+
+/**
+ * radeon_irq_kms_fini - tear down driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ */
+void radeon_irq_kms_fini(struct radeon_device *rdev)
+{
+	if (rdev->irq.installed) {
+		drm_irq_uninstall(rdev->ddev);
+		rdev->irq.installed = false;
+		if (rdev->msi_enabled)
+			pci_disable_msi(rdev->pdev);
+		flush_delayed_work(&rdev->hotplug_work);
+	}
+}
+
+/**
+ * radeon_irq_kms_sw_irq_get - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_sw_irq_get_delayed - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
+{
+	return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
+}
+
+/**
+ * radeon_irq_kms_sw_irq_put - disable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to disable
+ *
+ * Disables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to enable
+ *
+ * Enables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to disable
+ *
+ * Disables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_enable_afmt - enable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to enable
+ *
+ * Enables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = true;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+}
+
+/**
+ * radeon_irq_kms_disable_afmt - disable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to disable
+ *
+ * Disables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = false;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to enable.
+ *
+ * Enables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to disable.
+ *
+ * Disables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_update_int_n - helper for updating interrupt enable registers
+ *
+ * @rdev: radeon device pointer
+ * @reg: the register to write to enable/disable interrupts
+ * @mask: the mask that enables the interrupts
+ * @enable: whether to enable or disable the interrupt register
+ * @name: the name of the interrupt register to print to the kernel log
+ * @num: the number of the interrupt register to print to the kernel log
+ *
+ * Helper for updating the enable state of interrupt registers. Checks whether
+ * or not the interrupt matches the enable state we want. If it doesn't, then
+ * we update it and print a debugging message to the kernel log indicating the
+ * new state of the interrupt register.
+ *
+ * Used for updating sequences of interrupts registers like HPD1, HPD2, etc.
+ */
+void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
+				      u32 reg, u32 mask,
+				      bool enable, const char *name, unsigned n)
+{
+	u32 tmp = RREG32(reg);
+
+	/* Interrupt state didn't change */
+	if (!!(tmp & mask) == enable)
+		return;
+
+	if (enable) {
+		DRM_DEBUG("%s%d interrupts enabled\n", name, n);
+		WREG32(reg, tmp |= mask);
+	} else {
+		DRM_DEBUG("%s%d interrupts disabled\n", name, n);
+		WREG32(reg, tmp & ~mask);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
new file mode 100644
index 0000000..dec1e08
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -0,0 +1,923 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+#include "radeon_asic.h"
+
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx(void);
+#else
+static inline bool radeon_has_atpx(void) { return false; }
+#endif
+
+/**
+ * radeon_driver_unload_kms - Main unload function for KMS.
+ *
+ * @dev: drm dev pointer
+ *
+ * This is the main unload function for KMS (all asics).
+ * It calls radeon_modeset_fini() to tear down the
+ * displays, and radeon_device_fini() to tear down
+ * the rest of the device (CP, writeback, etc.).
+ * Returns 0 on success.
+ */
+void radeon_driver_unload_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev == NULL)
+		return;
+
+	if (rdev->rmmio == NULL)
+		goto done_free;
+
+	if (radeon_is_px(dev)) {
+		pm_runtime_get_sync(dev->dev);
+		pm_runtime_forbid(dev->dev);
+	}
+
+	radeon_acpi_fini(rdev);
+	
+	radeon_modeset_fini(rdev);
+	radeon_device_fini(rdev);
+
+done_free:
+	kfree(rdev);
+	dev->dev_private = NULL;
+}
+
+/**
+ * radeon_driver_load_kms - Main load function for KMS.
+ *
+ * @dev: drm dev pointer
+ * @flags: device flags
+ *
+ * This is the main load function for KMS (all asics).
+ * It calls radeon_device_init() to set up the non-display
+ * parts of the chip (asic init, CP, writeback, etc.), and
+ * radeon_modeset_init() to set up the display parts
+ * (crtcs, encoders, hotplug detect, etc.).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+{
+	struct radeon_device *rdev;
+	int r, acpi_status;
+
+	if (!radeon_si_support) {
+		switch (flags & RADEON_FAMILY_MASK) {
+		case CHIP_TAHITI:
+		case CHIP_PITCAIRN:
+		case CHIP_VERDE:
+		case CHIP_OLAND:
+		case CHIP_HAINAN:
+			dev_info(dev->dev,
+				 "SI support disabled by module param\n");
+			return -ENODEV;
+		}
+	}
+	if (!radeon_cik_support) {
+		switch (flags & RADEON_FAMILY_MASK) {
+		case CHIP_KAVERI:
+		case CHIP_BONAIRE:
+		case CHIP_HAWAII:
+		case CHIP_KABINI:
+		case CHIP_MULLINS:
+			dev_info(dev->dev,
+				 "CIK support disabled by module param\n");
+			return -ENODEV;
+		}
+	}
+
+	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
+	if (rdev == NULL) {
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)rdev;
+
+	/* update BUS flag */
+	if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
+		flags |= RADEON_IS_AGP;
+	} else if (pci_is_pcie(dev->pdev)) {
+		flags |= RADEON_IS_PCIE;
+	} else {
+		flags |= RADEON_IS_PCI;
+	}
+
+	if ((radeon_runtime_pm != 0) &&
+	    radeon_has_atpx() &&
+	    ((flags & RADEON_IS_IGP) == 0) &&
+	    !pci_is_thunderbolt_attached(dev->pdev))
+		flags |= RADEON_IS_PX;
+
+	/* radeon_device_init should report only fatal error
+	 * like memory allocation failure or iomapping failure,
+	 * or memory manager initialization failure, it must
+	 * properly initialize the GPU MC controller and permit
+	 * VRAM allocation
+	 */
+	r = radeon_device_init(rdev, dev, dev->pdev, flags);
+	if (r) {
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+		goto out;
+	}
+
+	/* Again modeset_init should fail only on fatal error
+	 * otherwise it should provide enough functionalities
+	 * for shadowfb to run
+	 */
+	r = radeon_modeset_init(rdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+
+	/* Call ACPI methods: require modeset init
+	 * but failure is not fatal
+	 */
+	if (!r) {
+		acpi_status = radeon_acpi_init(rdev);
+		if (acpi_status)
+		dev_dbg(&dev->pdev->dev,
+				"Error during ACPI methods call\n");
+	}
+
+	if (radeon_is_px(dev)) {
+		pm_runtime_use_autosuspend(dev->dev);
+		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+		pm_runtime_set_active(dev->dev);
+		pm_runtime_allow(dev->dev);
+		pm_runtime_mark_last_busy(dev->dev);
+		pm_runtime_put_autosuspend(dev->dev);
+	}
+
+out:
+	if (r)
+		radeon_driver_unload_kms(dev);
+
+
+	return r;
+}
+
+/**
+ * radeon_set_filp_rights - Set filp right.
+ *
+ * @dev: drm dev pointer
+ * @owner: drm file
+ * @applier: drm file
+ * @value: value
+ *
+ * Sets the filp rights for the device (all asics).
+ */
+static void radeon_set_filp_rights(struct drm_device *dev,
+				   struct drm_file **owner,
+				   struct drm_file *applier,
+				   uint32_t *value)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	mutex_lock(&rdev->gem.mutex);
+	if (*value == 1) {
+		/* wants rights */
+		if (!*owner)
+			*owner = applier;
+	} else if (*value == 0) {
+		/* revokes rights */
+		if (*owner == applier)
+			*owner = NULL;
+	}
+	*value = *owner == applier ? 1 : 0;
+	mutex_unlock(&rdev->gem.mutex);
+}
+
+/*
+ * Userspace get information ioctl
+ */
+/**
+ * radeon_info_ioctl - answer a device specific request.
+ *
+ * @rdev: radeon device pointer
+ * @data: request object
+ * @filp: drm filp
+ *
+ * This function is used to pass device specific parameters to the userspace
+ * drivers.  Examples include: pci device id, pipeline parms, tiling params,
+ * etc. (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_info *info = data;
+	struct radeon_mode_info *minfo = &rdev->mode_info;
+	uint32_t *value, value_tmp, *value_ptr, value_size;
+	uint64_t value64;
+	struct drm_crtc *crtc;
+	int i, found;
+
+	value_ptr = (uint32_t *)((unsigned long)info->value);
+	value = &value_tmp;
+	value_size = sizeof(uint32_t);
+
+	switch (info->request) {
+	case RADEON_INFO_DEVICE_ID:
+		*value = dev->pdev->device;
+		break;
+	case RADEON_INFO_NUM_GB_PIPES:
+		*value = rdev->num_gb_pipes;
+		break;
+	case RADEON_INFO_NUM_Z_PIPES:
+		*value = rdev->num_z_pipes;
+		break;
+	case RADEON_INFO_ACCEL_WORKING:
+		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
+		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
+			*value = false;
+		else
+			*value = rdev->accel_working;
+		break;
+	case RADEON_INFO_CRTC_FROM_ID:
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+			crtc = (struct drm_crtc *)minfo->crtcs[i];
+			if (crtc && crtc->base.id == *value) {
+				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+				*value = radeon_crtc->crtc_id;
+				found = 1;
+				break;
+			}
+		}
+		if (!found) {
+			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_ACCEL_WORKING2:
+		if (rdev->family == CHIP_HAWAII) {
+			if (rdev->accel_working) {
+				if (rdev->new_fw)
+					*value = 3;
+				else
+					*value = 2;
+			} else {
+				*value = 0;
+			}
+		} else {
+			*value = rdev->accel_working;
+		}
+		break;
+	case RADEON_INFO_TILING_CONFIG:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.tile_config;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.tile_config;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.tile_config;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.tile_config;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.tile_config;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.tile_config;
+		else {
+			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_WANT_HYPERZ:
+		/* The "value" here is both an input and output parameter.
+		 * If the input value is 1, filp requests hyper-z access.
+		 * If the input value is 0, filp revokes its hyper-z access.
+		 *
+		 * When returning, the value is 1 if filp owns hyper-z access,
+		 * 0 otherwise. */
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		if (*value >= 2) {
+			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
+			return -EINVAL;
+		}
+		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
+		break;
+	case RADEON_INFO_WANT_CMASK:
+		/* The same logic as Hyper-Z. */
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		if (*value >= 2) {
+			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
+			return -EINVAL;
+		}
+		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
+		break;
+	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+		/* return clock value in KHz */
+		if (rdev->asic->get_xclk)
+			*value = radeon_get_xclk(rdev) * 10;
+		else
+			*value = rdev->clock.spll.reference_freq * 10;
+		break;
+	case RADEON_INFO_NUM_BACKENDS:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.max_backends_per_se *
+				rdev->config.cik.max_shader_engines;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_backends_per_se *
+				rdev->config.si.max_shader_engines;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_backends_per_se *
+				rdev->config.cayman.max_shader_engines;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.max_backends;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.max_backends;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.max_backends;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_NUM_TILE_PIPES:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.max_tile_pipes;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_tile_pipes;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_tile_pipes;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.max_tile_pipes;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.max_tile_pipes;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.max_tile_pipes;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_FUSION_GART_WORKING:
+		*value = 1;
+		break;
+	case RADEON_INFO_BACKEND_MAP:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.backend_map;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.backend_map;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.backend_map;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.backend_map;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.backend_map;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.backend_map;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_VA_START:
+		/* this is where we report if vm is supported or not */
+		if (rdev->family < CHIP_CAYMAN)
+			return -EINVAL;
+		*value = RADEON_VA_RESERVED_SIZE;
+		break;
+	case RADEON_INFO_IB_VM_MAX_SIZE:
+		/* this is where we report if vm is supported or not */
+		if (rdev->family < CHIP_CAYMAN)
+			return -EINVAL;
+		*value = RADEON_IB_VM_MAX_SIZE;
+		break;
+	case RADEON_INFO_MAX_PIPES:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.max_cu_per_sh;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_cu_per_sh;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_pipes_per_simd;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.max_pipes;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.max_pipes;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.max_pipes;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_TIMESTAMP:
+		if (rdev->family < CHIP_R600) {
+			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+			return -EINVAL;
+		}
+		value = (uint32_t*)&value64;
+		value_size = sizeof(uint64_t);
+		value64 = radeon_get_gpu_clock_counter(rdev);
+		break;
+	case RADEON_INFO_MAX_SE:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.max_shader_engines;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_shader_engines;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_shader_engines;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.num_ses;
+		else
+			*value = 1;
+		break;
+	case RADEON_INFO_MAX_SH_PER_SE:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.max_sh_per_se;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_sh_per_se;
+		else
+			return -EINVAL;
+		break;
+	case RADEON_INFO_FASTFB_WORKING:
+		*value = rdev->fastfb_working;
+		break;
+	case RADEON_INFO_RING_WORKING:
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		switch (*value) {
+		case RADEON_CS_RING_GFX:
+		case RADEON_CS_RING_COMPUTE:
+			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
+			break;
+		case RADEON_CS_RING_DMA:
+			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
+			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
+			break;
+		case RADEON_CS_RING_UVD:
+			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
+			break;
+		case RADEON_CS_RING_VCE:
+			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_SI_TILE_MODE_ARRAY:
+		if (rdev->family >= CHIP_BONAIRE) {
+			value = rdev->config.cik.tile_mode_array;
+			value_size = sizeof(uint32_t)*32;
+		} else if (rdev->family >= CHIP_TAHITI) {
+			value = rdev->config.si.tile_mode_array;
+			value_size = sizeof(uint32_t)*32;
+		} else {
+			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
+		if (rdev->family >= CHIP_BONAIRE) {
+			value = rdev->config.cik.macrotile_mode_array;
+			value_size = sizeof(uint32_t)*16;
+		} else {
+			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_SI_CP_DMA_COMPUTE:
+		*value = 1;
+		break;
+	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
+		if (rdev->family >= CHIP_BONAIRE) {
+			*value = rdev->config.cik.backend_enable_mask;
+		} else if (rdev->family >= CHIP_TAHITI) {
+			*value = rdev->config.si.backend_enable_mask;
+		} else {
+			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+		}
+		break;
+	case RADEON_INFO_MAX_SCLK:
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+		    rdev->pm.dpm_enabled)
+			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
+		else
+			*value = rdev->pm.default_sclk * 10;
+		break;
+	case RADEON_INFO_VCE_FW_VERSION:
+		*value = rdev->vce.fw_version;
+		break;
+	case RADEON_INFO_VCE_FB_VERSION:
+		*value = rdev->vce.fb_version;
+		break;
+	case RADEON_INFO_NUM_BYTES_MOVED:
+		value = (uint32_t*)&value64;
+		value_size = sizeof(uint64_t);
+		value64 = atomic64_read(&rdev->num_bytes_moved);
+		break;
+	case RADEON_INFO_VRAM_USAGE:
+		value = (uint32_t*)&value64;
+		value_size = sizeof(uint64_t);
+		value64 = atomic64_read(&rdev->vram_usage);
+		break;
+	case RADEON_INFO_GTT_USAGE:
+		value = (uint32_t*)&value64;
+		value_size = sizeof(uint64_t);
+		value64 = atomic64_read(&rdev->gtt_usage);
+		break;
+	case RADEON_INFO_ACTIVE_CU_COUNT:
+		if (rdev->family >= CHIP_BONAIRE)
+			*value = rdev->config.cik.active_cus;
+		else if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.active_cus;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.active_simds;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.active_simds;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.active_simds;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.active_simds;
+		else
+			*value = 1;
+		break;
+	case RADEON_INFO_CURRENT_GPU_TEMP:
+		/* get temperature in millidegrees C */
+		if (rdev->asic->pm.get_temperature)
+			*value = radeon_get_temperature(rdev);
+		else
+			*value = 0;
+		break;
+	case RADEON_INFO_CURRENT_GPU_SCLK:
+		/* get sclk in Mhz */
+		if (rdev->pm.dpm_enabled)
+			*value = radeon_dpm_get_current_sclk(rdev) / 100;
+		else
+			*value = rdev->pm.current_sclk / 100;
+		break;
+	case RADEON_INFO_CURRENT_GPU_MCLK:
+		/* get mclk in Mhz */
+		if (rdev->pm.dpm_enabled)
+			*value = radeon_dpm_get_current_mclk(rdev) / 100;
+		else
+			*value = rdev->pm.current_mclk / 100;
+		break;
+	case RADEON_INFO_READ_REG:
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		if (radeon_get_allowed_info_register(rdev, *value, value))
+			return -EINVAL;
+		break;
+	case RADEON_INFO_VA_UNMAP_WORKING:
+		*value = true;
+		break;
+	case RADEON_INFO_GPU_RESET_COUNTER:
+		*value = atomic_read(&rdev->gpu_reset_counter);
+		break;
+	default:
+		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
+		return -EINVAL;
+	}
+	if (copy_to_user(value_ptr, (char*)value, value_size)) {
+		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+
+/*
+ * Outdated mess for old drm with Xorg being in charge (void function now).
+ */
+/**
+ * radeon_driver_lastclose_kms - drm callback for last close
+ *
+ * @dev: drm dev pointer
+ *
+ * Switch vga_switcheroo state after last close (all asics).
+ */
+void radeon_driver_lastclose_kms(struct drm_device *dev)
+{
+	drm_fb_helper_lastclose(dev);
+	vga_switcheroo_process_delayed_switch();
+}
+
+/**
+ * radeon_driver_open_kms - drm callback for open
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device open, init vm on cayman+ (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int r;
+
+	file_priv->driver_priv = NULL;
+
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
+
+	/* new gpu have virtual address space support */
+	if (rdev->family >= CHIP_CAYMAN) {
+		struct radeon_fpriv *fpriv;
+		struct radeon_vm *vm;
+
+		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+		if (unlikely(!fpriv)) {
+			r = -ENOMEM;
+			goto out_suspend;
+		}
+
+		if (rdev->accel_working) {
+			vm = &fpriv->vm;
+			r = radeon_vm_init(rdev, vm);
+			if (r) {
+				kfree(fpriv);
+				goto out_suspend;
+			}
+
+			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+			if (r) {
+				radeon_vm_fini(rdev, vm);
+				kfree(fpriv);
+				goto out_suspend;
+			}
+
+			/* map the ib pool buffer read only into
+			 * virtual address space */
+			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+							rdev->ring_tmp_bo.bo);
+			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+						  RADEON_VA_IB_OFFSET,
+						  RADEON_VM_PAGE_READABLE |
+						  RADEON_VM_PAGE_SNOOPED);
+			if (r) {
+				radeon_vm_fini(rdev, vm);
+				kfree(fpriv);
+				goto out_suspend;
+			}
+		}
+		file_priv->driver_priv = fpriv;
+	}
+
+out_suspend:
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+	return r;
+}
+
+/**
+ * radeon_driver_postclose_kms - drm callback for post close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device close, tear down hyperz and cmask filps on r1xx-r5xx
+ * (all asics).  And tear down vm on cayman+ (all asics).
+ */
+void radeon_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	pm_runtime_get_sync(dev->dev);
+
+	mutex_lock(&rdev->gem.mutex);
+	if (rdev->hyperz_filp == file_priv)
+		rdev->hyperz_filp = NULL;
+	if (rdev->cmask_filp == file_priv)
+		rdev->cmask_filp = NULL;
+	mutex_unlock(&rdev->gem.mutex);
+
+	radeon_uvd_free_handles(rdev, file_priv);
+	radeon_vce_free_handles(rdev, file_priv);
+
+	/* new gpu have virtual address space support */
+	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+		struct radeon_fpriv *fpriv = file_priv->driver_priv;
+		struct radeon_vm *vm = &fpriv->vm;
+		int r;
+
+		if (rdev->accel_working) {
+			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+			if (!r) {
+				if (vm->ib_bo_va)
+					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
+				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+			}
+			radeon_vm_fini(rdev, vm);
+		}
+
+		kfree(fpriv);
+		file_priv->driver_priv = NULL;
+	}
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+}
+
+/*
+ * VBlank related functions.
+ */
+/**
+ * radeon_get_vblank_counter_kms - get frame count
+ *
+ * @dev: drm dev pointer
+ * @pipe: crtc to get the frame count from
+ *
+ * Gets the frame count on the requested crtc (all asics).
+ * Returns frame count on success, -EINVAL on failure.
+ */
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+{
+	int vpos, hpos, stat;
+	u32 count;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (pipe >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %u\n", pipe);
+		return -EINVAL;
+	}
+
+	/* The hw increments its frame counter at start of vsync, not at start
+	 * of vblank, as is required by DRM core vblank counter handling.
+	 * Cook the hw count here to make it appear to the caller as if it
+	 * incremented at start of vblank. We measure distance to start of
+	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+	 * result by 1 to give the proper appearance to caller.
+	 */
+	if (rdev->mode_info.crtcs[pipe]) {
+		/* Repeat readout if needed to provide stable result if
+		 * we cross start of vsync during the queries.
+		 */
+		do {
+			count = radeon_get_vblank_counter(rdev, pipe);
+			/* Ask radeon_get_crtc_scanoutpos to return vpos as
+			 * distance to start of vblank, instead of regular
+			 * vertical scanout pos.
+			 */
+			stat = radeon_get_crtc_scanoutpos(
+				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
+				&vpos, &hpos, NULL, NULL,
+				&rdev->mode_info.crtcs[pipe]->base.hwmode);
+		} while (count != radeon_get_vblank_counter(rdev, pipe));
+
+		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+		}
+		else {
+			DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
+				      pipe, vpos);
+
+			/* Bump counter if we are at >= leading edge of vblank,
+			 * but before vsync where vpos would turn negative and
+			 * the hw counter really increments.
+			 */
+			if (vpos >= 0)
+				count++;
+		}
+	}
+	else {
+	    /* Fallback to use value as is. */
+	    count = radeon_get_vblank_counter(rdev, pipe);
+	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+	}
+
+	return count;
+}
+
+/**
+ * radeon_enable_vblank_kms - enable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to enable vblank interrupt for
+ *
+ * Enable the interrupt on the requested crtc (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	int r;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.crtc_vblank_int[crtc] = true;
+	r = radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	return r;
+}
+
+/**
+ * radeon_disable_vblank_kms - disable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to disable vblank interrupt for
+ *
+ * Disable the interrupt on the requested crtc (all asics).
+ */
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return;
+	}
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.crtc_vblank_int[crtc] = false;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+const struct drm_ioctl_desc radeon_ioctls_kms[] = {
+	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
+	/* KMS */
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+};
+int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
new file mode 100644
index 0000000..35a205a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -0,0 +1,1121 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/radeon_drm.h>
+#include <drm/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
+static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
+				       struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	int xres = mode->hdisplay;
+	int yres = mode->vdisplay;
+	bool hscale = true, vscale = true;
+	int hsync_wid;
+	int vsync_wid;
+	int hsync_start;
+	int blank_width;
+	u32 scale, inc, crtc_more_cntl;
+	u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
+	u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
+	u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
+	struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
+
+	fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
+		(RADEON_VERT_STRETCH_RESERVED |
+		 RADEON_VERT_AUTO_RATIO_INC);
+	fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
+		(RADEON_HORZ_FP_LOOP_STRETCH |
+		 RADEON_HORZ_AUTO_RATIO_INC);
+
+	crtc_more_cntl = 0;
+	if ((rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		/* This is to workaround the asic bug for RMX, some versions
+		   of BIOS dosen't have this register initialized correctly. */
+		crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
+	}
+
+
+	fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+				| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+	hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+	if (!hsync_wid)
+		hsync_wid = 1;
+	hsync_start = mode->crtc_hsync_start - 8;
+
+	fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
+			      | ((hsync_wid & 0x3f) << 16)
+			      | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+				 ? RADEON_CRTC_H_SYNC_POL
+				 : 0));
+
+	fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+				| ((mode->crtc_vdisplay - 1) << 16));
+
+	vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	if (!vsync_wid)
+		vsync_wid = 1;
+
+	fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+			      | ((vsync_wid & 0x1f) << 16)
+			      | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+				 ? RADEON_CRTC_V_SYNC_POL
+				 : 0));
+
+	fp_horz_vert_active = 0;
+
+	if (native_mode->hdisplay == 0 ||
+	    native_mode->vdisplay == 0) {
+		hscale = false;
+		vscale = false;
+	} else {
+		if (xres > native_mode->hdisplay)
+			xres = native_mode->hdisplay;
+		if (yres > native_mode->vdisplay)
+			yres = native_mode->vdisplay;
+
+		if (xres == native_mode->hdisplay)
+			hscale = false;
+		if (yres == native_mode->vdisplay)
+			vscale = false;
+	}
+
+	switch (radeon_crtc->rmx_type) {
+	case RMX_FULL:
+	case RMX_ASPECT:
+		if (!hscale)
+			fp_horz_stretch |= ((xres/8-1) << 16);
+		else {
+			inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
+			scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
+				/ native_mode->hdisplay + 1;
+			fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
+					RADEON_HORZ_STRETCH_BLEND |
+					RADEON_HORZ_STRETCH_ENABLE |
+					((native_mode->hdisplay/8-1) << 16));
+		}
+
+		if (!vscale)
+			fp_vert_stretch |= ((yres-1) << 12);
+		else {
+			inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
+			scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
+				/ native_mode->vdisplay + 1;
+			fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
+					RADEON_VERT_STRETCH_ENABLE |
+					RADEON_VERT_STRETCH_BLEND |
+					((native_mode->vdisplay-1) << 12));
+		}
+		break;
+	case RMX_CENTER:
+		fp_horz_stretch |= ((xres/8-1) << 16);
+		fp_vert_stretch |= ((yres-1) << 12);
+
+		crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
+				RADEON_CRTC_AUTO_VERT_CENTER_EN);
+
+		blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
+		if (blank_width > 110)
+			blank_width = 110;
+
+		fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
+				| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+		hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+		if (!hsync_wid)
+			hsync_wid = 1;
+
+		fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
+				| ((hsync_wid & 0x3f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+					? RADEON_CRTC_H_SYNC_POL
+					: 0));
+
+		fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
+				| ((mode->crtc_vdisplay - 1) << 16));
+
+		vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+		if (!vsync_wid)
+			vsync_wid = 1;
+
+		fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
+					| ((vsync_wid & 0x1f) << 16)
+					| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+						? RADEON_CRTC_V_SYNC_POL
+						: 0)));
+
+		fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
+				(((native_mode->hdisplay / 8) & 0x1ff) << 16));
+		break;
+	case RMX_OFF:
+	default:
+		fp_horz_stretch |= ((xres/8-1) << 16);
+		fp_vert_stretch |= ((yres-1) << 12);
+		break;
+	}
+
+	WREG32(RADEON_FP_HORZ_STRETCH,      fp_horz_stretch);
+	WREG32(RADEON_FP_VERT_STRETCH,      fp_vert_stretch);
+	WREG32(RADEON_CRTC_MORE_CNTL,       crtc_more_cntl);
+	WREG32(RADEON_FP_HORZ_VERT_ACTIVE,  fp_horz_vert_active);
+	WREG32(RADEON_FP_H_SYNC_STRT_WID,   fp_h_sync_strt_wid);
+	WREG32(RADEON_FP_V_SYNC_STRT_WID,   fp_v_sync_strt_wid);
+	WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
+	WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
+}
+
+static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int i = 0;
+
+	/* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+	   the cause yet, but this workaround will mask the problem for now.
+	   Other chips usually will pass at the very first test, so the
+	   workaround shouldn't have any effect on them. */
+	for (i = 0;
+	     (i < 10000 &&
+	      RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+	     i++);
+}
+
+static void radeon_pll_write_update(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+
+	WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+			   RADEON_PPLL_ATOMIC_UPDATE_W,
+			   ~(RADEON_PPLL_ATOMIC_UPDATE_W));
+}
+
+static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int i = 0;
+
+
+	/* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+	   the cause yet, but this workaround will mask the problem for now.
+	   Other chips usually will pass at the very first test, so the
+	   workaround shouldn't have any effect on them. */
+	for (i = 0;
+	     (i < 10000 &&
+	      RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+	     i++);
+}
+
+static void radeon_pll2_write_update(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+
+	WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+			   RADEON_P2PLL_ATOMIC_UPDATE_W,
+			   ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
+}
+
+static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
+				       uint16_t fb_div)
+{
+	unsigned int vcoFreq;
+
+	if (!ref_div)
+		return 1;
+
+	vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
+
+	/*
+	 * This is horribly crude: the VCO frequency range is divided into
+	 * 3 parts, each part having a fixed PLL gain value.
+	 */
+	if (vcoFreq >= 30000)
+		/*
+		 * [300..max] MHz : 7
+		 */
+		return 7;
+	else if (vcoFreq >= 18000)
+		/*
+		 * [180..300) MHz : 4
+		 */
+		return 4;
+	else
+		/*
+		 * [0..180) MHz : 1
+		 */
+		return 1;
+}
+
+static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc_ext_cntl = 0;
+	uint32_t mask;
+
+	if (radeon_crtc->crtc_id)
+		mask = (RADEON_CRTC2_DISP_DIS |
+			RADEON_CRTC2_VSYNC_DIS |
+			RADEON_CRTC2_HSYNC_DIS |
+			RADEON_CRTC2_DISP_REQ_EN_B);
+	else
+		mask = (RADEON_CRTC_DISPLAY_DIS |
+			RADEON_CRTC_VSYNC_DIS |
+			RADEON_CRTC_HSYNC_DIS);
+
+	/*
+	 * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
+	 * Therefore it is set in the DAC DMPS function.
+	 * This is different for GPU's with a single CRTC but a primary and a
+	 * TV DAC: here it controls the single CRTC no matter where it is
+	 * routed. Therefore we set it here.
+	 */
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		crtc_ext_cntl = RADEON_CRTC_CRT_ON;
+	
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		/* adjust pm to dpms changes BEFORE enabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		if (radeon_crtc->crtc_id)
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
+		else {
+			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
+									 RADEON_CRTC_DISP_REQ_EN_B));
+			WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
+		}
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_crtc_vblank_on(crtc);
+		radeon_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_crtc_vblank_off(crtc);
+		if (radeon_crtc->crtc_id)
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
+		else {
+			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
+										    RADEON_CRTC_DISP_REQ_EN_B));
+			WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
+		}
+		radeon_crtc->enabled = false;
+		/* adjust pm to dpms changes AFTER disabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		break;
+	}
+}
+
+int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+			 struct drm_framebuffer *old_fb)
+{
+	return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, enum mode_set_atomic state)
+{
+	return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+			 struct drm_framebuffer *fb,
+			 int x, int y, int atomic)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	uint64_t base;
+	uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
+	uint32_t crtc_pitch, pitch_pixels;
+	uint32_t tiling_flags;
+	int format;
+	uint32_t gen_cntl_reg, gen_cntl_val;
+	int r;
+
+	DRM_DEBUG_KMS("\n");
+	/* no fb bound */
+	if (!atomic && !crtc->primary->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic)
+		target_fb = fb;
+	else
+		target_fb = crtc->primary->fb;
+
+	switch (target_fb->format->cpp[0] * 8) {
+	case 8:
+		format = 2;
+		break;
+	case 15:      /*  555 */
+		format = 3;
+		break;
+	case 16:      /*  565 */
+		format = 4;
+		break;
+	case 24:      /*  RGB */
+		format = 5;
+		break;
+	case 32:      /* xRGB */
+		format = 6;
+		break;
+	default:
+		return false;
+	}
+
+	/* Pin framebuffer & get tilling informations */
+	obj = target_fb->obj[0];
+	rbo = gem_to_radeon_bo(obj);
+retry:
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	/* Only 27 bit offset for legacy CRTC */
+	r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27,
+				     &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+
+		/* On old GPU like RN50 with little vram pining can fails because
+		 * current fb is taking all space needed. So instead of unpining
+		 * the old buffer after pining the new one, first unpin old one
+		 * and then retry pining new one.
+		 *
+		 * As only master can set mode only master can pin and it is
+		 * unlikely the master client will race with itself especialy
+		 * on those old gpu with single crtc.
+		 *
+		 * We don't shutdown the display controller because new buffer
+		 * will end up in same spot.
+		 */
+		if (!atomic && fb && fb != crtc->primary->fb) {
+			struct radeon_bo *old_rbo;
+			unsigned long nsize, osize;
+
+			old_rbo = gem_to_radeon_bo(fb->obj[0]);
+			osize = radeon_bo_size(old_rbo);
+			nsize = radeon_bo_size(rbo);
+			if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
+				radeon_bo_unpin(old_rbo);
+				radeon_bo_unreserve(old_rbo);
+				fb = NULL;
+				goto retry;
+			}
+		}
+		return -EINVAL;
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+	if (tiling_flags & RADEON_TILING_MICRO)
+		DRM_ERROR("trying to scanout microtiled buffer\n");
+
+	/* if scanout was in GTT this really wouldn't work */
+	/* crtc offset is from display base addr not FB location */
+	radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
+
+	base -= radeon_crtc->legacy_display_base_addr;
+
+	crtc_offset_cntl = 0;
+
+	pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
+	crtc_pitch = DIV_ROUND_UP(pitch_pixels * target_fb->format->cpp[0] * 8,
+				  target_fb->format->cpp[0] * 8 * 8);
+	crtc_pitch |= crtc_pitch << 16;
+
+	crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (ASIC_IS_R300(rdev))
+			crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
+					     R300_CRTC_MICRO_TILE_BUFFER_DIS |
+					     R300_CRTC_MACRO_TILE_EN);
+		else
+			crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
+	} else {
+		if (ASIC_IS_R300(rdev))
+			crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
+					      R300_CRTC_MICRO_TILE_BUFFER_DIS |
+					      R300_CRTC_MACRO_TILE_EN);
+		else
+			crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
+	}
+
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (ASIC_IS_R300(rdev)) {
+			crtc_tile_x0_y0 = x | (y << 16);
+			base &= ~0x7ff;
+		} else {
+			int byteshift = target_fb->format->cpp[0] * 8 >> 4;
+			int tile_addr = (((y >> 3) * pitch_pixels +  x) >> (8 - byteshift)) << 11;
+			base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
+			crtc_offset_cntl |= (y % 16);
+		}
+	} else {
+		int offset = y * pitch_pixels + x;
+		switch (target_fb->format->cpp[0] * 8) {
+		case 8:
+			offset *= 1;
+			break;
+		case 15:
+		case 16:
+			offset *= 2;
+			break;
+		case 24:
+			offset *= 3;
+			break;
+		case 32:
+			offset *= 4;
+			break;
+		default:
+			return false;
+		}
+		base += offset;
+	}
+
+	base &= ~7;
+
+	if (radeon_crtc->crtc_id == 1)
+		gen_cntl_reg = RADEON_CRTC2_GEN_CNTL;
+	else
+		gen_cntl_reg = RADEON_CRTC_GEN_CNTL;
+
+	gen_cntl_val = RREG32(gen_cntl_reg);
+	gen_cntl_val &= ~(0xf << 8);
+	gen_cntl_val |= (format << 8);
+	gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+	WREG32(gen_cntl_reg, gen_cntl_val);
+
+	crtc_offset = (u32)base;
+
+	WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr);
+
+	if (ASIC_IS_R300(rdev)) {
+		if (radeon_crtc->crtc_id)
+			WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
+		else
+			WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
+	}
+	WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
+	WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+	if (!atomic && fb && fb != crtc->primary->fb) {
+		rbo = gem_to_radeon_bo(fb->obj[0]);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	const struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_encoder *encoder;
+	int format;
+	int hsync_start;
+	int hsync_wid;
+	int vsync_wid;
+	uint32_t crtc_h_total_disp;
+	uint32_t crtc_h_sync_strt_wid;
+	uint32_t crtc_v_total_disp;
+	uint32_t crtc_v_sync_strt_wid;
+	bool is_tv = false;
+
+	DRM_DEBUG_KMS("\n");
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+				is_tv = true;
+				DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id);
+				break;
+			}
+		}
+	}
+
+	switch (fb->format->cpp[0] * 8) {
+	case 8:
+		format = 2;
+		break;
+	case 15:      /*  555 */
+		format = 3;
+		break;
+	case 16:      /*  565 */
+		format = 4;
+		break;
+	case 24:      /*  RGB */
+		format = 5;
+		break;
+	case 32:      /* xRGB */
+		format = 6;
+		break;
+	default:
+		return false;
+	}
+
+	crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+			     | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+	hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+	if (!hsync_wid)
+		hsync_wid = 1;
+	hsync_start = mode->crtc_hsync_start - 8;
+
+	crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
+				| ((hsync_wid & 0x3f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+				   ? RADEON_CRTC_H_SYNC_POL
+				   : 0));
+
+	/* This works for double scan mode. */
+	crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+			     | ((mode->crtc_vdisplay - 1) << 16));
+
+	vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	if (!vsync_wid)
+		vsync_wid = 1;
+
+	crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+				| ((vsync_wid & 0x1f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+				   ? RADEON_CRTC_V_SYNC_POL
+				   : 0));
+
+	if (radeon_crtc->crtc_id) {
+		uint32_t crtc2_gen_cntl;
+		uint32_t disp2_merge_cntl;
+
+		/* if TV DAC is enabled for another crtc and keep it enabled */
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
+		crtc2_gen_cntl |= ((format << 8)
+				   | RADEON_CRTC2_VSYNC_DIS
+				   | RADEON_CRTC2_HSYNC_DIS
+				   | RADEON_CRTC2_DISP_DIS
+				   | RADEON_CRTC2_DISP_REQ_EN_B
+				   | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+				      ? RADEON_CRTC2_DBL_SCAN_EN
+				      : 0)
+				   | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+				      ? RADEON_CRTC2_CSYNC_EN
+				      : 0)
+				   | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+				      ? RADEON_CRTC2_INTERLACE_EN
+				      : 0));
+
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc2_gen_cntl |= RADEON_CRTC2_EN;
+
+		disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+		disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+
+		WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+		WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
+		WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
+	} else {
+		uint32_t crtc_gen_cntl;
+		uint32_t crtc_ext_cntl;
+		uint32_t disp_merge_cntl;
+
+		crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+		crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
+				 | (format << 8)
+				 | RADEON_CRTC_DISP_REQ_EN_B
+				 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+				    ? RADEON_CRTC_DBL_SCAN_EN
+				    : 0)
+				 | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+				    ? RADEON_CRTC_CSYNC_EN
+				    : 0)
+				 | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+				    ? RADEON_CRTC_INTERLACE_EN
+				    : 0));
+
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc_gen_cntl |= RADEON_CRTC_EN;
+
+		crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+		crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
+				  RADEON_CRTC_VSYNC_DIS |
+				  RADEON_CRTC_HSYNC_DIS |
+				  RADEON_CRTC_DISPLAY_DIS);
+
+		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+
+		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+		WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	}
+
+	if (is_tv)
+		radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp,
+						 &crtc_h_sync_strt_wid, &crtc_v_total_disp,
+						 &crtc_v_sync_strt_wid);
+
+	WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
+	WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
+	WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
+	WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
+
+	return true;
+}
+
+static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_encoder *encoder;
+	uint32_t feedback_div = 0;
+	uint32_t frac_fb_div = 0;
+	uint32_t reference_div = 0;
+	uint32_t post_divider = 0;
+	uint32_t freq = 0;
+	uint8_t pll_gain;
+	bool use_bios_divs = false;
+	/* PLL registers */
+	uint32_t pll_ref_div = 0;
+	uint32_t pll_fb_post_div = 0;
+	uint32_t htotal_cntl = 0;
+	bool is_tv = false;
+	struct radeon_pll *pll;
+
+	struct {
+		int divider;
+		int bitvalue;
+	} *post_div, post_divs[]   = {
+		/* From RAGE 128 VR/RAGE 128 GL Register
+		 * Reference Manual (Technical Reference
+		 * Manual P/N RRG-G04100-C Rev. 0.04), page
+		 * 3-17 (PLL_DIV_[3:0]).
+		 */
+		{  1, 0 },              /* VCLK_SRC                 */
+		{  2, 1 },              /* VCLK_SRC/2               */
+		{  4, 2 },              /* VCLK_SRC/4               */
+		{  8, 3 },              /* VCLK_SRC/8               */
+		{  3, 4 },              /* VCLK_SRC/3               */
+		{ 16, 5 },              /* VCLK_SRC/16              */
+		{  6, 6 },              /* VCLK_SRC/6               */
+		{ 12, 7 },              /* VCLK_SRC/12              */
+		{  0, 0 }
+	};
+
+	if (radeon_crtc->crtc_id)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	pll->flags = RADEON_PLL_LEGACY;
+
+	if (mode->clock > 200000) /* range limits??? */
+		pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+	else
+		pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+			if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+				is_tv = true;
+				break;
+			}
+
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+				pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+			if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+				if (!rdev->is_atom_bios) {
+					struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+					struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+					if (lvds) {
+						if (lvds->use_bios_dividers) {
+							pll_ref_div = lvds->panel_ref_divider;
+							pll_fb_post_div   = (lvds->panel_fb_divider |
+									     (lvds->panel_post_divider << 16));
+							htotal_cntl  = 0;
+							use_bios_divs = true;
+						}
+					}
+				}
+				pll->flags |= RADEON_PLL_USE_REF_DIV;
+			}
+		}
+	}
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!use_bios_divs) {
+		radeon_compute_pll_legacy(pll, mode->clock,
+					  &freq, &feedback_div, &frac_fb_div,
+					  &reference_div, &post_divider);
+
+		for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
+			if (post_div->divider == post_divider)
+				break;
+		}
+
+		if (!post_div->divider)
+			post_div = &post_divs[0];
+
+		DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
+			  (unsigned)freq,
+			  feedback_div,
+			  reference_div,
+			  post_divider);
+
+		pll_ref_div   = reference_div;
+#if defined(__powerpc__) && (0) /* TODO */
+		/* apparently programming this otherwise causes a hang??? */
+		if (info->MacModel == RADEON_MAC_IBOOK)
+			pll_fb_post_div = 0x000600ad;
+		else
+#endif
+			pll_fb_post_div     = (feedback_div | (post_div->bitvalue << 16));
+
+		htotal_cntl    = mode->htotal & 0x7;
+
+	}
+
+	pll_gain = radeon_compute_pll_gain(pll->reference_freq,
+					   pll_ref_div & 0x3ff,
+					   pll_fb_post_div & 0x7ff);
+
+	if (radeon_crtc->crtc_id) {
+		uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
+					  ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
+					 RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
+
+		if (is_tv) {
+			radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl,
+						     &pll_ref_div, &pll_fb_post_div,
+						     &pixclks_cntl);
+		}
+
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+			     RADEON_PIX2CLK_SRC_SEL_CPUCLK,
+			     ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+		WREG32_PLL_P(RADEON_P2PLL_CNTL,
+			     RADEON_P2PLL_RESET
+			     | RADEON_P2PLL_ATOMIC_UPDATE_EN
+			     | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
+			     ~(RADEON_P2PLL_RESET
+			       | RADEON_P2PLL_ATOMIC_UPDATE_EN
+			       | RADEON_P2PLL_PVG_MASK));
+
+		WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+			     pll_ref_div,
+			     ~RADEON_P2PLL_REF_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+			     pll_fb_post_div,
+			     ~RADEON_P2PLL_FB0_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+			     pll_fb_post_div,
+			     ~RADEON_P2PLL_POST0_DIV_MASK);
+
+		radeon_pll2_write_update(dev);
+		radeon_pll2_wait_for_read_update_complete(dev);
+
+		WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
+
+		WREG32_PLL_P(RADEON_P2PLL_CNTL,
+			     0,
+			     ~(RADEON_P2PLL_RESET
+			       | RADEON_P2PLL_SLEEP
+			       | RADEON_P2PLL_ATOMIC_UPDATE_EN));
+
+		DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+			  (unsigned)pll_ref_div,
+			  (unsigned)pll_fb_post_div,
+			  (unsigned)htotal_cntl,
+			  RREG32_PLL(RADEON_P2PLL_CNTL));
+		DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
+			  (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
+			  (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
+			  (unsigned)((pll_fb_post_div &
+				      RADEON_P2PLL_POST0_DIV_MASK) >> 16));
+
+		mdelay(50); /* Let the clock to lock */
+
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+			     RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
+			     ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+	} else {
+		uint32_t pixclks_cntl;
+
+
+		if (is_tv) {
+			pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div,
+						     &pll_fb_post_div, &pixclks_cntl);
+		}
+
+		if (rdev->flags & RADEON_IS_MOBILITY) {
+			/* A temporal workaround for the occasional blanking on certain laptop panels.
+			   This appears to related to the PLL divider registers (fail to lock?).
+			   It occurs even when all dividers are the same with their old settings.
+			   In this case we really don't need to fiddle with PLL registers.
+			   By doing this we can avoid the blanking problem with some panels.
+			*/
+			if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
+			    (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
+						 (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
+				WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+					 RADEON_PLL_DIV_SEL,
+					 ~(RADEON_PLL_DIV_SEL));
+				r100_pll_errata_after_index(rdev);
+				return;
+			}
+		}
+
+		WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+			     RADEON_VCLK_SRC_SEL_CPUCLK,
+			     ~(RADEON_VCLK_SRC_SEL_MASK));
+		WREG32_PLL_P(RADEON_PPLL_CNTL,
+			     RADEON_PPLL_RESET
+			     | RADEON_PPLL_ATOMIC_UPDATE_EN
+			     | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+			     | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
+			     ~(RADEON_PPLL_RESET
+			       | RADEON_PPLL_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_PVG_MASK));
+
+		WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+			 RADEON_PLL_DIV_SEL,
+			 ~(RADEON_PLL_DIV_SEL));
+		r100_pll_errata_after_index(rdev);
+
+		if (ASIC_IS_R300(rdev) ||
+		    (rdev->family == CHIP_RS300) ||
+		    (rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+			if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
+				/* When restoring console mode, use saved PPLL_REF_DIV
+				 * setting.
+				 */
+				WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+					     pll_ref_div,
+					     0);
+			} else {
+				/* R300 uses ref_div_acc field as real ref divider */
+				WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+					     (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+					     ~R300_PPLL_REF_DIV_ACC_MASK);
+			}
+		} else
+			WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+				     pll_ref_div,
+				     ~RADEON_PPLL_REF_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_PPLL_DIV_3,
+			     pll_fb_post_div,
+			     ~RADEON_PPLL_FB3_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_PPLL_DIV_3,
+			     pll_fb_post_div,
+			     ~RADEON_PPLL_POST3_DIV_MASK);
+
+		radeon_pll_write_update(dev);
+		radeon_pll_wait_for_read_update_complete(dev);
+
+		WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
+
+		WREG32_PLL_P(RADEON_PPLL_CNTL,
+			     0,
+			     ~(RADEON_PPLL_RESET
+			       | RADEON_PPLL_SLEEP
+			       | RADEON_PPLL_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
+
+		DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+			  pll_ref_div,
+			  pll_fb_post_div,
+			  (unsigned)htotal_cntl,
+			  RREG32_PLL(RADEON_PPLL_CNTL));
+		DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
+			  pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
+			  pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
+			  (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
+
+		mdelay(50); /* Let the clock to lock */
+
+		WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+			     RADEON_VCLK_SRC_SEL_PPLLCLK,
+			     ~(RADEON_VCLK_SRC_SEL_MASK));
+
+		if (is_tv)
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+	}
+}
+
+static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	return true;
+}
+
+static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode,
+				 int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	/* TODO TV */
+	radeon_crtc_set_base(crtc, x, y, old_fb);
+	radeon_set_crtc_timing(crtc, adjusted_mode);
+	radeon_set_pll(crtc, adjusted_mode);
+	radeon_overscan_setup(crtc, adjusted_mode);
+	if (radeon_crtc->crtc_id == 0) {
+		radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
+	} else {
+		if (radeon_crtc->rmx_type != RMX_OFF) {
+			/* FIXME: only first crtc has rmx what should we
+			 * do ?
+			 */
+			DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
+		}
+	}
+	radeon_cursor_reset(crtc);
+	return 0;
+}
+
+static void radeon_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* The hardware wedges sometimes if you reconfigure one CRTC
+	* whilst another is running (see fdo bug #24611).
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+		radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* Reenable the CRTCs that should be running.
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+		if (crtci->enabled)
+			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+	}
+}
+
+static void radeon_crtc_disable(struct drm_crtc *crtc)
+{
+	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->primary->fb) {
+		int r;
+		struct radeon_bo *rbo;
+
+		rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve rbo before unpin\n");
+		else {
+			radeon_bo_unpin(rbo);
+			radeon_bo_unreserve(rbo);
+		}
+	}
+}
+
+static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+	.dpms = radeon_crtc_dpms,
+	.mode_fixup = radeon_crtc_mode_fixup,
+	.mode_set = radeon_crtc_mode_set,
+	.mode_set_base = radeon_crtc_set_base,
+	.mode_set_base_atomic = radeon_crtc_set_base_atomic,
+	.prepare = radeon_crtc_prepare,
+	.commit = radeon_crtc_commit,
+	.disable = radeon_crtc_disable
+};
+
+
+void radeon_legacy_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc)
+{
+	if (radeon_crtc->crtc_id == 1)
+		radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
+	drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
new file mode 100644
index 0000000..222a1fa
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -0,0 +1,1817 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include <linux/backlight.h>
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
+
+static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct drm_encoder_helper_funcs *encoder_funcs;
+
+	encoder_funcs = encoder->helper_private;
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+	radeon_encoder->active_device = 0;
+}
+
+static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
+	int panel_pwr_delay = 2000;
+	bool is_mac = false;
+	uint8_t backlight_level;
+	DRM_DEBUG_KMS("\n");
+
+	lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			panel_pwr_delay = lvds->panel_pwr_delay;
+			if (lvds->bl_dev)
+				backlight_level = lvds->backlight_level;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			panel_pwr_delay = lvds->panel_pwr_delay;
+			if (lvds->bl_dev)
+				backlight_level = lvds->backlight_level;
+		}
+	}
+
+	/* macs (and possibly some x86 oem systems?) wire up LVDS strangely
+	 * Taken from radeonfb.
+	 */
+	if ((rdev->mode_info.connector_table == CT_IBOOK) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
+		is_mac = true;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
+		disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
+		WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
+		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+		lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
+		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+		mdelay(1);
+
+		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+		lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
+		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+
+		lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
+				   RADEON_LVDS_BL_MOD_LEVEL_MASK);
+		lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
+				  RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
+				  (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
+		if (is_mac)
+			lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
+		mdelay(panel_pwr_delay);
+		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
+		lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+		if (is_mac) {
+			lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
+			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
+		} else {
+			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+		}
+		mdelay(panel_pwr_delay);
+		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+		mdelay(panel_pwr_delay);
+		break;
+	}
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DRM_DEBUG("\n");
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			lvds->dpms_mode = mode;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			lvds->dpms_mode = mode;
+		}
+	}
+
+	radeon_legacy_lvds_update(encoder, mode);
+}
+
+static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+	lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
+
+	lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+	if (rdev->is_atom_bios) {
+		/* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+		 * need to call that on resume to set up the reg properly.
+		 */
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	} else {
+		struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+		if (lvds) {
+			DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+			lvds_gen_cntl = lvds->lvds_gen_cntl;
+			lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+					      (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+			lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+					     (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+		} else
+			lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	}
+	lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+	lvds_gen_cntl &= ~(RADEON_LVDS_ON |
+			   RADEON_LVDS_BLON |
+			   RADEON_LVDS_EN |
+			   RADEON_LVDS_RST_FM);
+
+	if (ASIC_IS_R300(rdev))
+		lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
+
+	if (radeon_crtc->crtc_id == 0) {
+		if (ASIC_IS_R300(rdev)) {
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
+		} else
+			lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
+	} else {
+		if (ASIC_IS_R300(rdev))
+			lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
+		else
+			lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
+	}
+
+	WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+	WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+	WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
+
+	if (rdev->family == CHIP_RV410)
+		WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	/* set the active encoder to connector routing */
+	radeon_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
+	.dpms = radeon_legacy_lvds_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_lvds_prepare,
+	.mode_set = radeon_legacy_lvds_mode_set,
+	.commit = radeon_legacy_lvds_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+u8
+radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u8 backlight_level;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	return backlight_level;
+}
+
+void
+radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int dpms_mode = DRM_MODE_DPMS_ON;
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
+		}
+	}
+
+	radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	uint8_t level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	if (pdata->negative)
+		level = RADEON_MAX_BL_LEVEL - level;
+
+	return level;
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	radeon_legacy_set_backlight_level(radeon_encoder,
+					  radeon_legacy_lvds_level(bd));
+
+	return 0;
+}
+
+static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint8_t backlight_level;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
+}
+
+static const struct backlight_ops radeon_backlight_ops = {
+	.get_brightness = radeon_legacy_backlight_get_brightness,
+	.update_status	= radeon_legacy_backlight_update_status,
+};
+
+void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+				  struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct radeon_backlight_privdata *pdata;
+	uint8_t backlight_level;
+	char bl_name[16];
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+	if (!pmac_has_backlight_type("ati") &&
+	    !pmac_has_backlight_type("mnca"))
+		return;
+#endif
+
+	pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	snprintf(bl_name, sizeof(bl_name),
+		 "radeon_bl%d", dev->primary->index);
+	bd = backlight_device_register(bl_name, drm_connector->kdev,
+				       pdata, &radeon_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = radeon_encoder;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	/* First, try to detect backlight level sense based on the assumption
+	 * that firmware set it up at full brightness
+	 */
+	if (backlight_level == 0)
+		pdata->negative = true;
+	else if (backlight_level == 0xff)
+		pdata->negative = false;
+	else {
+		/* XXX hack... maybe some day we can figure out in what direction
+		 * backlight should work on a given panel?
+		 */
+		pdata->negative = (rdev->family != CHIP_RV200 &&
+				   rdev->family != CHIP_RV250 &&
+				   rdev->family != CHIP_RV280 &&
+				   rdev->family != CHIP_RV350);
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+		pdata->negative = (pdata->negative ||
+				   of_machine_is_compatible("PowerBook4,3") ||
+				   of_machine_is_compatible("PowerBook6,3") ||
+				   of_machine_is_compatible("PowerBook6,5"));
+#endif
+	}
+
+	if (rdev->is_atom_bios) {
+		struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+		lvds->bl_dev = bd;
+	} else {
+		struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+		lvds->bl_dev = bd;
+	}
+
+	bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("radeon legacy LVDS backlight initialized\n");
+	rdev->mode_info.bl_encoder = radeon_encoder;
+
+	return;
+
+error:
+	kfree(pdata);
+	return;
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (rdev->is_atom_bios) {
+		struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+		bd = lvds->bl_dev;
+		lvds->bl_dev = NULL;
+	} else {
+		struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+		bd = lvds->bl_dev;
+		lvds->bl_dev = NULL;
+	}
+
+	if (bd) {
+		struct radeon_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		kfree(pdata);
+
+		DRM_INFO("radeon legacy LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+
+static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->enc_priv) {
+		radeon_legacy_backlight_exit(radeon_encoder);
+		kfree(radeon_encoder->enc_priv);
+	}
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
+	.destroy = radeon_lvds_enc_destroy,
+};
+
+static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
+	uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
+		dac_cntl &= ~RADEON_DAC_PDWN;
+		dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
+				    RADEON_DAC_PDWN_G |
+				    RADEON_DAC_PDWN_B);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
+		dac_cntl |= RADEON_DAC_PDWN;
+		dac_macro_cntl |= (RADEON_DAC_PDWN_R |
+				   RADEON_DAC_PDWN_G |
+				   RADEON_DAC_PDWN_B);
+		break;
+	}
+
+	/* handled in radeon_crtc_dpms() */
+	if (!(rdev->flags & RADEON_SINGLE_CRTC))
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	WREG32(RADEON_DAC_CNTL, dac_cntl);
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
+					       struct drm_display_mode *mode,
+					       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (radeon_crtc->crtc_id == 0) {
+		if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+				~(RADEON_DISP_DAC_SOURCE_MASK);
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+		} else {
+			dac2_cntl = RREG32(RADEON_DAC_CNTL2)  & ~(RADEON_DAC2_DAC_CLK_SEL);
+			WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+		}
+	} else {
+		if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+				~(RADEON_DISP_DAC_SOURCE_MASK);
+			disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+		} else {
+			dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
+			WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+		}
+	}
+
+	dac_cntl = (RADEON_DAC_MASK_ALL |
+		    RADEON_DAC_VGA_ADR_EN |
+		    /* TODO 6-bits */
+		    RADEON_DAC_8BIT_EN);
+
+	WREG32_P(RADEON_DAC_CNTL,
+		       dac_cntl,
+		       RADEON_DAC_RANGE_CNTL |
+		       RADEON_DAC_BLANKING);
+
+	if (radeon_encoder->enc_priv) {
+		struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
+		dac_macro_cntl = p_dac->ps2_pdac_adj;
+	} else
+		dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+	dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
+								  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t vclk_ecp_cntl, crtc_ext_cntl;
+	uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
+	enum drm_connector_status found = connector_status_disconnected;
+	bool color = true;
+
+	/* just don't bother on RN50 those chip are often connected to remoting
+	 * console hw and often we get failure to load detect those. So to make
+	 * everyone happy report the encoder as always connected.
+	 */
+	if (ASIC_IS_RN50(rdev)) {
+		return connector_status_connected;
+	}
+
+	/* save the regs we need */
+	vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	dac_cntl = RREG32(RADEON_DAC_CNTL);
+	dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+	tmp = vclk_ecp_cntl &
+		~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
+	WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+	tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+	WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+
+	tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
+		RADEON_DAC_FORCE_DATA_EN;
+
+	if (color)
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+	else
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+	if (ASIC_IS_R300(rdev))
+		tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+	else if (ASIC_IS_RV100(rdev))
+		tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
+	else
+		tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+	WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+	tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
+	tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+	WREG32(RADEON_DAC_CNTL, tmp);
+
+	tmp = dac_macro_cntl;
+	tmp &= ~(RADEON_DAC_PDWN_R |
+		 RADEON_DAC_PDWN_G |
+		 RADEON_DAC_PDWN_B);
+
+	WREG32(RADEON_DAC_MACRO_CNTL, tmp);
+
+	mdelay(2);
+
+	if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
+		found = connector_status_connected;
+
+	/* restore the regs we used */
+	WREG32(RADEON_DAC_CNTL, dac_cntl);
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
+
+	return found;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
+	.dpms = radeon_legacy_primary_dac_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_primary_dac_prepare,
+	.mode_set = radeon_legacy_primary_dac_mode_set,
+	.commit = radeon_legacy_primary_dac_commit,
+	.detect = radeon_legacy_primary_dac_detect,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+		break;
+	}
+
+	WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
+					    struct drm_display_mode *mode,
+					    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
+	int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
+	tmp &= 0xfffff;
+	if (rdev->family == CHIP_RV280) {
+		/* bit 22 of TMDS_PLL_CNTL is read-back inverted */
+		tmp ^= (1 << 22);
+		tmds_pll_cntl ^= (1 << 22);
+	}
+
+	if (radeon_encoder->enc_priv) {
+		struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
+
+		for (i = 0; i < 4; i++) {
+			if (tmds->tmds_pll[i].freq == 0)
+				break;
+			if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
+				tmp = tmds->tmds_pll[i].value ;
+				break;
+			}
+		}
+	}
+
+	if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
+		if (tmp & 0xfff00000)
+			tmds_pll_cntl = tmp;
+		else {
+			tmds_pll_cntl &= 0xfff00000;
+			tmds_pll_cntl |= tmp;
+		}
+	} else
+		tmds_pll_cntl = tmp;
+
+	tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
+		~(RADEON_TMDS_TRANSMITTER_PLLRST);
+
+	if (rdev->family == CHIP_R200 ||
+	    rdev->family == CHIP_R100 ||
+	    ASIC_IS_R300(rdev))
+		tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+	else /* RV chips got this bit reversed */
+		tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+	fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+		      (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+		       RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+	fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+	fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+			 RADEON_FP_DFP_SYNC_SEL |
+			 RADEON_FP_CRT_SYNC_SEL |
+			 RADEON_FP_CRTC_LOCK_8DOT |
+			 RADEON_FP_USE_SHADOW_EN |
+			 RADEON_FP_CRTC_USE_SHADOW_VEND |
+			 RADEON_FP_CRT_SYNC_ALT);
+
+	if (1) /*  FIXME rgbBits == 8 */
+		fp_gen_cntl |= RADEON_FP_PANEL_FORMAT;  /* 24 bit format */
+	else
+		fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+	if (radeon_crtc->crtc_id == 0) {
+		if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+			fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+			else
+				fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+		} else
+			fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
+	} else {
+		if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+			fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+			fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+		} else
+			fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+	}
+
+	WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+	WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+	WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
+	.dpms = radeon_legacy_tmds_int_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tmds_int_prepare,
+	.mode_set = radeon_legacy_tmds_int_mode_set,
+	.commit = radeon_legacy_tmds_int_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
+		fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
+		fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		break;
+	}
+
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+					    struct drm_display_mode *mode,
+					    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t fp2_gen_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (rdev->is_atom_bios) {
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_dvo_setup(encoder, ATOM_ENABLE);
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	} else {
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+		if (1) /*  FIXME rgbBits == 8 */
+			fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
+		else
+			fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
+
+		fp2_gen_cntl &= ~(RADEON_FP2_ON |
+				  RADEON_FP2_DVO_EN |
+				  RADEON_FP2_DVO_RATE_SEL_SDR);
+
+		/* XXX: these are oem specific */
+		if (ASIC_IS_R300(rdev)) {
+			if ((dev->pdev->device == 0x4850) &&
+			    (dev->pdev->subsystem_vendor == 0x1028) &&
+			    (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+				fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
+			else
+				fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
+
+			/*if (mode->clock > 165000)
+			  fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
+		}
+		if (!radeon_combios_external_tmds_setup(encoder))
+			radeon_external_tmds_setup(encoder);
+	}
+
+	if (radeon_crtc->crtc_id == 0) {
+		if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+			fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
+			else
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
+		} else
+			fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
+	} else {
+		if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+			fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+			fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+		} else
+			fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
+	}
+
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+	kfree(radeon_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
+	.dpms = radeon_legacy_tmds_ext_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tmds_ext_prepare,
+	.mode_set = radeon_legacy_tmds_ext_mode_set,
+	.commit = radeon_legacy_tmds_ext_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
+	.destroy = radeon_ext_tmds_enc_destroy,
+};
+
+static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
+	uint32_t tv_master_cntl = 0;
+	bool is_tv;
+	DRM_DEBUG_KMS("\n");
+
+	is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+	if (rdev->family == CHIP_R200)
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	else {
+		if (is_tv)
+			tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+		else
+			crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+		tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (rdev->family == CHIP_R200) {
+			fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		} else {
+			if (is_tv)
+				tv_master_cntl |= RADEON_TV_ON;
+			else
+				crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
+
+			if (rdev->family == CHIP_R420 ||
+			    rdev->family == CHIP_R423 ||
+			    rdev->family == CHIP_RV410)
+				tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
+						 R420_TV_DAC_GDACPD |
+						 R420_TV_DAC_BDACPD |
+						 RADEON_TV_DAC_BGSLEEP);
+			else
+				tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
+						 RADEON_TV_DAC_GDACPD |
+						 RADEON_TV_DAC_BDACPD |
+						 RADEON_TV_DAC_BGSLEEP);
+		}
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (rdev->family == CHIP_R200)
+			fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		else {
+			if (is_tv)
+				tv_master_cntl &= ~RADEON_TV_ON;
+			else
+				crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
+
+			if (rdev->family == CHIP_R420 ||
+			    rdev->family == CHIP_R423 ||
+			    rdev->family == CHIP_RV410)
+				tv_dac_cntl |= (R420_TV_DAC_RDACPD |
+						R420_TV_DAC_GDACPD |
+						R420_TV_DAC_BDACPD |
+						RADEON_TV_DAC_BGSLEEP);
+			else
+				tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
+						RADEON_TV_DAC_GDACPD |
+						RADEON_TV_DAC_BDACPD |
+						RADEON_TV_DAC_BGSLEEP);
+		}
+		break;
+	}
+
+	if (rdev->family == CHIP_R200) {
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	} else {
+		if (is_tv)
+			WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+		/* handled in radeon_crtc_dpms() */
+		else if (!(rdev->flags & RADEON_SINGLE_CRTC))
+			WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	}
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
+	uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
+	bool is_tv = false;
+
+	DRM_DEBUG_KMS("\n");
+
+	is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+	if (rdev->family != CHIP_R200) {
+		tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+		if (rdev->family == CHIP_R420 ||
+		    rdev->family == CHIP_R423 ||
+		    rdev->family == CHIP_RV410) {
+			tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+					 RADEON_TV_DAC_BGADJ_MASK |
+					 R420_TV_DAC_DACADJ_MASK |
+					 R420_TV_DAC_RDACPD |
+					 R420_TV_DAC_GDACPD |
+					 R420_TV_DAC_BDACPD |
+					 R420_TV_DAC_TVENABLE);
+		} else {
+			tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+					 RADEON_TV_DAC_BGADJ_MASK |
+					 RADEON_TV_DAC_DACADJ_MASK |
+					 RADEON_TV_DAC_RDACPD |
+					 RADEON_TV_DAC_GDACPD |
+					 RADEON_TV_DAC_BDACPD);
+		}
+
+		tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+		if (is_tv) {
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J ||
+			    tv_dac->tv_std == TV_STD_PAL_M ||
+			    tv_dac->tv_std == TV_STD_PAL_60)
+				tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
+			else
+				tv_dac_cntl |= tv_dac->pal_tvdac_adj;
+
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J)
+				tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+			else
+				tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+		} else
+			tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
+					tv_dac->ps2_tvdac_adj);
+
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	}
+
+	if (ASIC_IS_R300(rdev)) {
+		gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
+		disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+	} else if (rdev->family != CHIP_R200)
+		disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+	else if (rdev->family == CHIP_R200)
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+	if (rdev->family >= CHIP_R200)
+		disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
+
+	if (is_tv) {
+		uint32_t dac_cntl;
+
+		dac_cntl = RREG32(RADEON_DAC_CNTL);
+		dac_cntl &= ~RADEON_DAC_TVO_EN;
+		WREG32(RADEON_DAC_CNTL, dac_cntl);
+
+		if (ASIC_IS_R300(rdev))
+			gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1;
+
+		dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL;
+		if (radeon_crtc->crtc_id == 0) {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC |
+						     RADEON_DISP_TV_SOURCE_CRTC);
+			}
+			if (rdev->family >= CHIP_R200) {
+				disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2;
+			} else {
+				disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+			}
+		} else {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC;
+			}
+			if (rdev->family >= CHIP_R200) {
+				disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2;
+			} else {
+				disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+			}
+		}
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	} else {
+
+		dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
+
+		if (radeon_crtc->crtc_id == 0) {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
+			} else if (rdev->family == CHIP_R200) {
+				fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+						  RADEON_FP2_DVO_RATE_SEL_SDR);
+			} else
+				disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+		} else {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+			} else if (rdev->family == CHIP_R200) {
+				fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+						  RADEON_FP2_DVO_RATE_SEL_SDR);
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+			} else
+				disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+		}
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	}
+
+	if (ASIC_IS_R300(rdev)) {
+		WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+		WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	} else if (rdev->family != CHIP_R200)
+		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+	else if (rdev->family == CHIP_R200)
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->family >= CHIP_R200)
+		WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
+
+	if (is_tv)
+		radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+
+}
+
+static bool r300_legacy_tv_detect(struct drm_encoder *encoder,
+				  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+	uint32_t disp_output_cntl, gpiopad_a, tmp;
+	bool found = false;
+
+	/* save regs needed */
+	gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+
+	WREG32_P(RADEON_GPIOPAD_A, 0, ~1);
+
+	WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL);
+
+	WREG32(RADEON_CRTC2_GEN_CNTL,
+	       RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT);
+
+	tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+	tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+	WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+
+	WREG32(RADEON_DAC_EXT_CNTL,
+	       RADEON_DAC2_FORCE_BLANK_OFF_EN |
+	       RADEON_DAC2_FORCE_DATA_EN |
+	       RADEON_DAC_FORCE_DATA_SEL_RGB |
+	       (0xec << RADEON_DAC_FORCE_DATA_SHIFT));
+
+	WREG32(RADEON_TV_DAC_CNTL,
+	       RADEON_TV_DAC_STD_NTSC |
+	       (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+	       (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+	RREG32(RADEON_TV_DAC_CNTL);
+	mdelay(4);
+
+	WREG32(RADEON_TV_DAC_CNTL,
+	       RADEON_TV_DAC_NBLANK |
+	       RADEON_TV_DAC_NHOLD |
+	       RADEON_TV_MONITOR_DETECT_EN |
+	       RADEON_TV_DAC_STD_NTSC |
+	       (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+	       (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+	RREG32(RADEON_TV_DAC_CNTL);
+	mdelay(6);
+
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("S-video TV connection detected\n");
+	} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("Composite TV connection detected\n");
+	}
+
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+	return found;
+}
+
+static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
+				    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tv_dac_cntl, dac_cntl2;
+	uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp;
+	bool found = false;
+
+	if (ASIC_IS_R300(rdev))
+		return r300_legacy_tv_detect(encoder, connector);
+
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+	tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	config_cntl = RREG32(RADEON_CONFIG_CNTL);
+	tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL);
+
+	tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL;
+	WREG32(RADEON_DAC_CNTL2, tmp);
+
+	tmp = tv_master_cntl | RADEON_TV_ON;
+	tmp &= ~(RADEON_TV_ASYNC_RST |
+		 RADEON_RESTART_PHASE_FIX |
+		 RADEON_CRT_FIFO_CE_EN |
+		 RADEON_TV_FIFO_CE_EN |
+		 RADEON_RE_SYNC_NOW_SEL_MASK);
+	tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST;
+	WREG32(RADEON_TV_MASTER_CNTL, tmp);
+
+	tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD |
+		RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC |
+		(8 << RADEON_TV_DAC_BGADJ_SHIFT);
+
+	if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK)
+		tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT);
+	else
+		tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT);
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN |
+		RADEON_RED_MX_FORCE_DAC_DATA |
+		RADEON_GRN_MX_FORCE_DAC_DATA |
+		RADEON_BLU_MX_FORCE_DAC_DATA |
+		(0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT);
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp);
+
+	mdelay(3);
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	if (tmp & RADEON_TV_DAC_GDACDET) {
+		found = true;
+		DRM_DEBUG_KMS("S-video TV connection detected\n");
+	} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("Composite TV connection detected\n");
+	}
+
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	return found;
+}
+
+static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
+					 struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
+	uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
+	uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
+	uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
+	uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
+	bool found = false;
+	int i;
+
+	/* save the regs we need */
+	gpio_monid = RREG32(RADEON_GPIO_MONID);
+	fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
+	disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
+	disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
+	disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
+	disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
+	disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
+	crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
+	crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
+	crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
+	crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
+
+	tmp = RREG32(RADEON_GPIO_MONID);
+	tmp &= ~RADEON_GPIO_A_0;
+	WREG32(RADEON_GPIO_MONID, tmp);
+
+	WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
+				     RADEON_FP2_PANEL_FORMAT |
+				     R200_FP2_SOURCE_SEL_TRANS_UNIT |
+				     RADEON_FP2_DVO_EN |
+				     R200_FP2_DVO_RATE_SEL_SDR));
+
+	WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
+					 RADEON_DISP_TRANS_MATRIX_GRAPHICS));
+
+	WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
+				       RADEON_CRTC2_DISP_REQ_EN_B));
+
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
+
+	WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
+	WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
+	WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
+	WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
+
+	for (i = 0; i < 200; i++) {
+		tmp = RREG32(RADEON_GPIO_MONID);
+		if (tmp & RADEON_GPIO_Y_0)
+			found = true;
+
+		if (found)
+			break;
+
+		if (!drm_can_sleep())
+			mdelay(1);
+		else
+			msleep(1);
+	}
+
+	/* restore the regs we used */
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
+	WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
+	WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
+	WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
+	WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
+	WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	WREG32(RADEON_GPIO_MONID, gpio_monid);
+
+	return found;
+}
+
+static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
+							     struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+	uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
+	uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
+	enum drm_connector_status found = connector_status_disconnected;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	bool color = true;
+	struct drm_crtc *crtc;
+
+	/* find out if crtc2 is in use or if this encoder is using it */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+		if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
+			if (encoder->crtc != crtc) {
+				return connector_status_disconnected;
+			}
+		}
+	}
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) {
+		bool tv_detect;
+
+		if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT))
+			return connector_status_disconnected;
+
+		tv_detect = radeon_legacy_tv_detect(encoder, connector);
+		if (tv_detect && tv_dac)
+			found = connector_status_connected;
+		return found;
+	}
+
+	/* don't probe if the encoder is being used for something else not CRT related */
+	if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) {
+		DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device);
+		return connector_status_disconnected;
+	}
+
+	/* R200 uses an external DAC for secondary DAC */
+	if (rdev->family == CHIP_R200) {
+		if (radeon_legacy_ext_dac_detect(encoder, connector))
+			found = connector_status_connected;
+		return found;
+	}
+
+	/* save the regs we need */
+	pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	} else {
+		if (ASIC_IS_R300(rdev)) {
+			gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+		} else {
+			disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+		}
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	}
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+
+	tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
+			       | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
+	WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+		WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+	} else {
+		tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+		tmp |= RADEON_CRTC2_CRT2_ON |
+			(2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+		WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+		if (ASIC_IS_R300(rdev)) {
+			WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+			tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+			tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+			WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+		} else {
+			tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+			WREG32(RADEON_DISP_HW_DEBUG, tmp);
+		}
+	}
+
+	tmp = RADEON_TV_DAC_NBLANK |
+		RADEON_TV_DAC_NHOLD |
+		RADEON_TV_MONITOR_DETECT_EN |
+		RADEON_TV_DAC_STD_PS2;
+
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
+		RADEON_DAC2_FORCE_DATA_EN;
+
+	if (color)
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+	else
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+	if (ASIC_IS_R300(rdev))
+		tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+	else
+		tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+	WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+	tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
+	WREG32(RADEON_DAC_CNTL2, tmp);
+
+	mdelay(10);
+
+	if (ASIC_IS_R300(rdev)) {
+		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
+			found = connector_status_connected;
+	} else {
+		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
+			found = connector_status_connected;
+	}
+
+	/* restore regs we used */
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	} else {
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+		if (ASIC_IS_R300(rdev)) {
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+			WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+		} else {
+			WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+		}
+	}
+
+	WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+
+	return found;
+
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
+	.dpms = radeon_legacy_tv_dac_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tv_dac_prepare,
+	.mode_set = radeon_legacy_tv_dac_mode_set,
+	.commit = radeon_legacy_tv_dac_commit,
+	.detect = radeon_legacy_tv_dac_detect,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+
+static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_int_tmds *tmds = NULL;
+	bool ret;
+
+	tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+	if (!tmds)
+		return NULL;
+
+	if (rdev->is_atom_bios)
+		ret = radeon_atombios_get_tmds_info(encoder, tmds);
+	else
+		ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_ext_tmds *tmds = NULL;
+	bool ret;
+
+	if (rdev->is_atom_bios)
+		return NULL;
+
+	tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+	if (!tmds)
+		return NULL;
+
+	ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
+void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->encoder_enum == encoder_enum) {
+			radeon_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+	if (!radeon_encoder)
+		return;
+
+	encoder = &radeon_encoder->base;
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		encoder->possible_crtcs = 0x1;
+	else
+		encoder->possible_crtcs = 0x3;
+
+	radeon_encoder->enc_priv = NULL;
+
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	radeon_encoder->devices = supported_device;
+	radeon_encoder->rmx_type = RMX_OFF;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		encoder->possible_crtcs = 0x1;
+		drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs,
+				 DRM_MODE_ENCODER_LVDS, NULL);
+		drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
+		radeon_encoder->rmx_type = RMX_FULL;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs,
+				 DRM_MODE_ENCODER_TMDS, NULL);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
+		radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs,
+				 DRM_MODE_ENCODER_DAC, NULL);
+		drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs,
+				 DRM_MODE_ENCODER_TVDAC, NULL);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs,
+				 DRM_MODE_ENCODER_TMDS, NULL);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
+		if (!rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
+		break;
+	}
+}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
new file mode 100644
index 0000000..611cf93
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -0,0 +1,924 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon.h"
+
+/*
+ * Integrated TV out support based on the GATOS code by
+ * Federico Ulivi <fulivi@lycos.com>
+ */
+
+
+/*
+ * Limits of h/v positions (hPos & vPos)
+ */
+#define MAX_H_POSITION 5 /* Range: [-5..5], negative is on the left, 0 is default, positive is on the right */
+#define MAX_V_POSITION 5 /* Range: [-5..5], negative is up, 0 is default, positive is down */
+
+/*
+ * Unit for hPos (in TV clock periods)
+ */
+#define H_POS_UNIT 10
+
+/*
+ * Indexes in h. code timing table for horizontal line position adjustment
+ */
+#define H_TABLE_POS1 6
+#define H_TABLE_POS2 8
+
+/*
+ * Limits of hor. size (hSize)
+ */
+#define MAX_H_SIZE 5 /* Range: [-5..5], negative is smaller, positive is larger */
+
+/* tv standard constants */
+#define NTSC_TV_CLOCK_T 233
+#define NTSC_TV_VFTOTAL 1
+#define NTSC_TV_LINES_PER_FRAME 525
+#define NTSC_TV_ZERO_H_SIZE 479166
+#define NTSC_TV_H_SIZE_UNIT 9478
+
+#define PAL_TV_CLOCK_T 188
+#define PAL_TV_VFTOTAL 3
+#define PAL_TV_LINES_PER_FRAME 625
+#define PAL_TV_ZERO_H_SIZE 473200
+#define PAL_TV_H_SIZE_UNIT 9360
+
+/* tv pll setting for 27 mhz ref clk */
+#define NTSC_TV_PLL_M_27 22
+#define NTSC_TV_PLL_N_27 175
+#define NTSC_TV_PLL_P_27 5
+
+#define PAL_TV_PLL_M_27 113
+#define PAL_TV_PLL_N_27 668
+#define PAL_TV_PLL_P_27 3
+
+/* tv pll setting for 14 mhz ref clk */
+#define NTSC_TV_PLL_M_14 33
+#define NTSC_TV_PLL_N_14 693
+#define NTSC_TV_PLL_P_14 7
+
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
+#define VERT_LEAD_IN_LINES 2
+#define FRAC_BITS 0xe
+#define FRAC_MASK 0x3fff
+
+struct radeon_tv_mode_constants {
+	uint16_t hor_resolution;
+	uint16_t ver_resolution;
+	enum radeon_tv_std standard;
+	uint16_t hor_total;
+	uint16_t ver_total;
+	uint16_t hor_start;
+	uint16_t hor_syncstart;
+	uint16_t ver_syncstart;
+	unsigned def_restart;
+	uint16_t crtcPLL_N;
+	uint8_t  crtcPLL_M;
+	uint8_t  crtcPLL_post_div;
+	unsigned pix_to_tv;
+};
+
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
+	0x0007,
+	0x003f,
+	0x0263,
+	0x0a24,
+	0x2a6b,
+	0x0a36,
+	0x126d, /* H_TABLE_POS1 */
+	0x1bfe,
+	0x1a8f, /* H_TABLE_POS2 */
+	0x1ec7,
+	0x3863,
+	0x1bfe,
+	0x1bfe,
+	0x1a2a,
+	0x1e95,
+	0x0e31,
+	0x201b,
+	0
+};
+
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
+	0x2001,
+	0x200d,
+	0x1006,
+	0x0c06,
+	0x1006,
+	0x1818,
+	0x21e3,
+	0x1006,
+	0x0c06,
+	0x1006,
+	0x1817,
+	0x21d4,
+	0x0002,
+	0
+};
+
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
+	0x0007,
+	0x0058,
+	0x027c,
+	0x0a31,
+	0x2a77,
+	0x0a95,
+	0x124f, /* H_TABLE_POS1 */
+	0x1bfe,
+	0x1b22, /* H_TABLE_POS2 */
+	0x1ef9,
+	0x387c,
+	0x1bfe,
+	0x1bfe,
+	0x1b31,
+	0x1eb5,
+	0x0e43,
+	0x201b,
+	0
+};
+
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
+	0x2001,
+	0x200c,
+	0x1005,
+	0x0c05,
+	0x1005,
+	0x1401,
+	0x1821,
+	0x2240,
+	0x1005,
+	0x0c05,
+	0x1005,
+	0x1401,
+	0x1822,
+	0x2230,
+	0x0002,
+	0
+};
+
+/**********************************************************************
+ *
+ * availableModes
+ *
+ * Table of all allowed modes for tv output
+ *
+ **********************************************************************/
+static const struct radeon_tv_mode_constants available_tv_modes[] = {
+	{   /* NTSC timing for 27 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_NTSC,        /* standard */
+		990,                /* horTotal */
+		740,                /* verTotal */
+		813,                /* horStart */
+		824,                /* horSyncStart */
+		632,                /* verSyncStart */
+		625592,             /* defRestart */
+		592,                /* crtcPLL_N */
+		91,                 /* crtcPLL_M */
+		4,                  /* crtcPLL_postDiv */
+		1022,               /* pixToTV */
+	},
+	{   /* PAL timing for 27 Mhz ref clk */
+		800,               /* horResolution */
+		600,               /* verResolution */
+		TV_STD_PAL,        /* standard */
+		1144,              /* horTotal */
+		706,               /* verTotal */
+		812,               /* horStart */
+		824,               /* horSyncStart */
+		669,               /* verSyncStart */
+		696700,            /* defRestart */
+		1382,              /* crtcPLL_N */
+		231,               /* crtcPLL_M */
+		4,                 /* crtcPLL_postDiv */
+		759,               /* pixToTV */
+	},
+	{   /* NTSC timing for 14 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_NTSC,        /* standard */
+		1018,               /* horTotal */
+		727,                /* verTotal */
+		813,                /* horStart */
+		840,                /* horSyncStart */
+		633,                /* verSyncStart */
+		630627,             /* defRestart */
+		347,                /* crtcPLL_N */
+		14,                 /* crtcPLL_M */
+		8,                  /* crtcPLL_postDiv */
+		1022,               /* pixToTV */
+	},
+	{ /* PAL timing for 14 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_PAL,         /* standard */
+		1131,               /* horTotal */
+		742,                /* verTotal */
+		813,                /* horStart */
+		840,                /* horSyncStart */
+		633,                /* verSyncStart */
+		708369,             /* defRestart */
+		211,                /* crtcPLL_N */
+		9,                  /* crtcPLL_M */
+		8,                  /* crtcPLL_postDiv */
+		759,                /* pixToTV */
+	},
+};
+
+#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
+
+static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(struct radeon_encoder *radeon_encoder,
+									    uint16_t *pll_ref_freq)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_pll *pll;
+
+	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+	if (radeon_crtc->crtc_id == 1)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	if (pll_ref_freq)
+		*pll_ref_freq = pll->reference_freq;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M) {
+		if (pll->reference_freq == 2700)
+			const_ptr = &available_tv_modes[0];
+		else
+			const_ptr = &available_tv_modes[2];
+	} else {
+		if (pll->reference_freq == 2700)
+			const_ptr = &available_tv_modes[1];
+		else
+			const_ptr = &available_tv_modes[3];
+	}
+	return const_ptr;
+}
+
+static long YCOEF_value[5] = { 2, 2, 0, 4, 0 };
+static long YCOEF_EN_value[5] = { 1, 1, 0, 1, 0 };
+static long SLOPE_value[5] = { 1, 2, 2, 4, 8 };
+static long SLOPE_limit[5] = { 6, 5, 4, 3, 2 };
+
+static void radeon_wait_pll_lock(struct drm_encoder *encoder, unsigned n_tests,
+				 unsigned n_wait_loops, unsigned cnt_threshold)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t save_pll_test;
+	unsigned int i, j;
+
+	WREG32(RADEON_TEST_DEBUG_MUX, (RREG32(RADEON_TEST_DEBUG_MUX) & 0xffff60ff) | 0x100);
+	save_pll_test = RREG32_PLL(RADEON_PLL_TEST_CNTL);
+	WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test & ~RADEON_PLL_MASK_READ_B);
+
+	WREG8(RADEON_CLOCK_CNTL_INDEX, RADEON_PLL_TEST_CNTL);
+	for (i = 0; i < n_tests; i++) {
+		WREG8(RADEON_CLOCK_CNTL_DATA + 3, 0);
+		for (j = 0; j < n_wait_loops; j++)
+			if (RREG8(RADEON_CLOCK_CNTL_DATA + 3) >= cnt_threshold)
+				break;
+	}
+	WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test);
+	WREG32(RADEON_TEST_DEBUG_MUX, RREG32(RADEON_TEST_DEBUG_MUX) & 0xffffe0ff);
+}
+
+
+static void radeon_legacy_tv_write_fifo(struct radeon_encoder *radeon_encoder,
+					uint16_t addr, uint32_t value)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+	int i = 0;
+
+	WREG32(RADEON_TV_HOST_WRITE_DATA, value);
+
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_WT);
+
+	do {
+		tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+		if ((tmp & RADEON_HOST_FIFO_WT_ACK) == 0)
+			break;
+		i++;
+	} while (i < 10000);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+}
+
+#if 0 /* included for completeness */
+static uint32_t radeon_legacy_tv_read_fifo(struct radeon_encoder *radeon_encoder, uint16_t addr)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+	int i = 0;
+
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_RD);
+
+	do {
+		tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+		if ((tmp & RADEON_HOST_FIFO_RD_ACK) == 0)
+			break;
+		i++;
+	} while (i < 10000);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+	return RREG32(RADEON_TV_HOST_READ_DATA);
+}
+#endif
+
+static uint16_t radeon_get_htiming_tables_addr(uint32_t tv_uv_adr)
+{
+	uint16_t h_table;
+
+	switch ((tv_uv_adr & RADEON_HCODE_TABLE_SEL_MASK) >> RADEON_HCODE_TABLE_SEL_SHIFT) {
+	case 0:
+		h_table = RADEON_TV_MAX_FIFO_ADDR_INTERNAL;
+		break;
+	case 1:
+		h_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2;
+		break;
+	case 2:
+		h_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2;
+		break;
+	default:
+		h_table = 0;
+		break;
+	}
+	return h_table;
+}
+
+static uint16_t radeon_get_vtiming_tables_addr(uint32_t tv_uv_adr)
+{
+	uint16_t v_table;
+
+	switch ((tv_uv_adr & RADEON_VCODE_TABLE_SEL_MASK) >> RADEON_VCODE_TABLE_SEL_SHIFT) {
+	case 0:
+		v_table = ((tv_uv_adr & RADEON_MAX_UV_ADR_MASK) >> RADEON_MAX_UV_ADR_SHIFT) * 2 + 1;
+		break;
+	case 1:
+		v_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2 + 1;
+		break;
+	case 2:
+		v_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2 + 1;
+		break;
+	default:
+		v_table = 0;
+		break;
+	}
+	return v_table;
+}
+
+static void radeon_restore_tv_timing_tables(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	uint16_t h_table, v_table;
+	uint32_t tmp;
+	int i;
+
+	WREG32(RADEON_TV_UV_ADR, tv_dac->tv.tv_uv_adr);
+	h_table = radeon_get_htiming_tables_addr(tv_dac->tv.tv_uv_adr);
+	v_table = radeon_get_vtiming_tables_addr(tv_dac->tv.tv_uv_adr);
+
+	for (i = 0; i < MAX_H_CODE_TIMING_LEN; i += 2, h_table--) {
+		tmp = ((uint32_t)tv_dac->tv.h_code_timing[i] << 14) | ((uint32_t)tv_dac->tv.h_code_timing[i+1]);
+		radeon_legacy_tv_write_fifo(radeon_encoder, h_table, tmp);
+		if (tv_dac->tv.h_code_timing[i] == 0 || tv_dac->tv.h_code_timing[i + 1] == 0)
+			break;
+	}
+	for (i = 0; i < MAX_V_CODE_TIMING_LEN; i += 2, v_table++) {
+		tmp = ((uint32_t)tv_dac->tv.v_code_timing[i+1] << 14) | ((uint32_t)tv_dac->tv.v_code_timing[i]);
+		radeon_legacy_tv_write_fifo(radeon_encoder, v_table, tmp);
+		if (tv_dac->tv.v_code_timing[i] == 0 || tv_dac->tv.v_code_timing[i + 1] == 0)
+			break;
+	}
+}
+
+static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	WREG32(RADEON_TV_FRESTART, tv_dac->tv.frestart);
+	WREG32(RADEON_TV_HRESTART, tv_dac->tv.hrestart);
+	WREG32(RADEON_TV_VRESTART, tv_dac->tv.vrestart);
+}
+
+static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	struct radeon_crtc *radeon_crtc;
+	int restart;
+	unsigned int h_total, v_total, f_total;
+	int v_offset, h_offset;
+	u16 p1, p2, h_inc;
+	bool h_changed;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_pll *pll;
+
+	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+	if (radeon_crtc->crtc_id == 1)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return false;
+
+	h_total = const_ptr->hor_total;
+	v_total = const_ptr->ver_total;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		f_total = NTSC_TV_VFTOTAL + 1;
+	else
+		f_total = PAL_TV_VFTOTAL + 1;
+
+	/* adjust positions 1&2 in hor. cod timing table */
+	h_offset = tv_dac->h_pos * H_POS_UNIT;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M) {
+		h_offset -= 50;
+		p1 = hor_timing_NTSC[H_TABLE_POS1];
+		p2 = hor_timing_NTSC[H_TABLE_POS2];
+	} else {
+		p1 = hor_timing_PAL[H_TABLE_POS1];
+		p2 = hor_timing_PAL[H_TABLE_POS2];
+	}
+
+	p1 = (u16)((int)p1 + h_offset);
+	p2 = (u16)((int)p2 - h_offset);
+
+	h_changed = (p1 != tv_dac->tv.h_code_timing[H_TABLE_POS1] ||
+		     p2 != tv_dac->tv.h_code_timing[H_TABLE_POS2]);
+
+	tv_dac->tv.h_code_timing[H_TABLE_POS1] = p1;
+	tv_dac->tv.h_code_timing[H_TABLE_POS2] = p2;
+
+	/* Convert hOffset from n. of TV clock periods to n. of CRTC clock periods (CRTC pixels) */
+	h_offset = (h_offset * (int)(const_ptr->pix_to_tv)) / 1000;
+
+	/* adjust restart */
+	restart = const_ptr->def_restart;
+
+	/*
+	 * convert v_pos TV lines to n. of CRTC pixels
+	 */
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(NTSC_TV_LINES_PER_FRAME);
+	else
+		v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(PAL_TV_LINES_PER_FRAME);
+
+	restart -= v_offset + h_offset;
+
+	DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
+		  const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
+
+	tv_dac->tv.hrestart = restart % h_total;
+	restart /= h_total;
+	tv_dac->tv.vrestart = restart % v_total;
+	restart /= v_total;
+	tv_dac->tv.frestart = restart % f_total;
+
+	DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n",
+		  (unsigned)tv_dac->tv.frestart,
+		  (unsigned)tv_dac->tv.vrestart,
+		  (unsigned)tv_dac->tv.hrestart);
+
+	/* compute h_inc from hsize */
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M)
+		h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * NTSC_TV_CLOCK_T) /
+			      (tv_dac->h_size * (int)(NTSC_TV_H_SIZE_UNIT) + (int)(NTSC_TV_ZERO_H_SIZE)));
+	else
+		h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * PAL_TV_CLOCK_T) /
+			      (tv_dac->h_size * (int)(PAL_TV_H_SIZE_UNIT) + (int)(PAL_TV_ZERO_H_SIZE)));
+
+	tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
+		((u32)h_inc << RADEON_H_INC_SHIFT);
+
+	DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
+
+	return h_changed;
+}
+
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_crtc *radeon_crtc;
+	int i;
+	uint16_t pll_ref_freq;
+	uint32_t vert_space, flicker_removal, tmp;
+	uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
+	uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
+	uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
+	uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
+	uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
+	uint32_t m, n, p;
+	const uint16_t *hor_timing;
+	const uint16_t *vert_timing;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, &pll_ref_freq);
+	if (!const_ptr)
+		return;
+
+	radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+	tv_master_cntl = (RADEON_VIN_ASYNC_RST |
+			  RADEON_CRT_FIFO_CE_EN |
+			  RADEON_TV_FIFO_CE_EN |
+			  RADEON_TV_ON);
+
+	if (!ASIC_IS_R300(rdev))
+		tv_master_cntl |= RADEON_TVCLK_ALWAYS_ONb;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J)
+		tv_master_cntl |= RADEON_RESTART_PHASE_FIX;
+
+	tv_modulator_cntl1 = (RADEON_SLEW_RATE_LIMIT |
+			      RADEON_SYNC_TIP_LEVEL |
+			      RADEON_YFLT_EN |
+			      RADEON_UVFLT_EN |
+			      (6 << RADEON_CY_FILT_BLEND_SHIFT));
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J) {
+		tv_modulator_cntl1 |= (0x46 << RADEON_SET_UP_LEVEL_SHIFT) |
+			(0x3b << RADEON_BLANK_LEVEL_SHIFT);
+		tv_modulator_cntl2 = (-111 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	} else if (tv_dac->tv_std == TV_STD_SCART_PAL) {
+		tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN;
+		tv_modulator_cntl2 = (0 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	} else {
+		tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN |
+			(0x3b << RADEON_SET_UP_LEVEL_SHIFT) |
+			(0x3b << RADEON_BLANK_LEVEL_SHIFT);
+		tv_modulator_cntl2 = (-78 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((62 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	}
+
+
+	tv_rgb_cntl = (RADEON_RGB_DITHER_EN
+		       | RADEON_TVOUT_SCALE_EN
+		       | (0x0b << RADEON_UVRAM_READ_MARGIN_SHIFT)
+		       | (0x07 << RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT)
+		       | RADEON_RGB_ATTEN_SEL(0x3)
+		       | RADEON_RGB_ATTEN_VAL(0xc));
+
+	if (radeon_crtc->crtc_id == 1)
+		tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC2;
+	else {
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			tv_rgb_cntl |= RADEON_RGB_SRC_SEL_RMX;
+		else
+			tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC1;
+	}
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		vert_space = const_ptr->ver_total * 2 * 10000 / NTSC_TV_LINES_PER_FRAME;
+	else
+		vert_space = const_ptr->ver_total * 2 * 10000 / PAL_TV_LINES_PER_FRAME;
+
+	tmp = RREG32(RADEON_TV_VSCALER_CNTL1);
+	tmp &= 0xe3ff0000;
+	tmp |= (vert_space * (1 << FRAC_BITS) / 10000);
+	tv_vscaler_cntl1 = tmp;
+
+	if (pll_ref_freq == 2700)
+		tv_vscaler_cntl1 |= RADEON_RESTART_FIELD;
+
+	if (const_ptr->hor_resolution == 1024)
+		tv_vscaler_cntl1 |= (4 << RADEON_Y_DEL_W_SIG_SHIFT);
+	else
+		tv_vscaler_cntl1 |= (2 << RADEON_Y_DEL_W_SIG_SHIFT);
+
+	/* scale up for int divide */
+	tmp = const_ptr->ver_total * 2 * 1000;
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60) {
+		tmp /= NTSC_TV_LINES_PER_FRAME;
+	} else {
+		tmp /= PAL_TV_LINES_PER_FRAME;
+	}
+	flicker_removal = (tmp + 500) / 1000;
+
+	if (flicker_removal < 3)
+		flicker_removal = 3;
+	for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
+		if (flicker_removal == SLOPE_limit[i])
+			break;
+	}
+
+	tv_y_saw_tooth_cntl = (vert_space * SLOPE_value[i] * (1 << (FRAC_BITS - 1)) +
+				5001) / 10000 / 8 | ((SLOPE_value[i] *
+				(1 << (FRAC_BITS - 1)) / 8) << 16);
+	tv_y_fall_cntl =
+		(YCOEF_EN_value[i] << 17) | ((YCOEF_value[i] * (1 << 8) / 8) << 24) |
+		RADEON_Y_FALL_PING_PONG | (272 * SLOPE_value[i] / 8) * (1 << (FRAC_BITS - 1)) /
+		1024;
+	tv_y_rise_cntl = RADEON_Y_RISE_PING_PONG|
+		(flicker_removal * 1024 - 272) * SLOPE_value[i] / 8 * (1 << (FRAC_BITS - 1)) / 1024;
+
+	tv_vscaler_cntl2 = RREG32(RADEON_TV_VSCALER_CNTL2) & 0x00fffff0;
+	tv_vscaler_cntl2 |= (0x10 << 24) |
+		RADEON_DITHER_MODE |
+		RADEON_Y_OUTPUT_DITHER_EN |
+		RADEON_UV_OUTPUT_DITHER_EN |
+		RADEON_UV_TO_BUF_DITHER_EN;
+
+	tmp = (tv_vscaler_cntl1 >> RADEON_UV_INC_SHIFT) & RADEON_UV_INC_MASK;
+	tmp = ((16384 * 256 * 10) / tmp + 5) / 10;
+	tmp = (tmp << RADEON_UV_OUTPUT_POST_SCALE_SHIFT) | 0x000b0000;
+	tv_dac->tv.timing_cntl = tmp;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		tv_dac_cntl = tv_dac->ntsc_tvdac_adj;
+	else
+		tv_dac_cntl = tv_dac->pal_tvdac_adj;
+
+	tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J)
+		tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+	else
+		tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J) {
+		if (pll_ref_freq == 2700) {
+			m = NTSC_TV_PLL_M_27;
+			n = NTSC_TV_PLL_N_27;
+			p = NTSC_TV_PLL_P_27;
+		} else {
+			m = NTSC_TV_PLL_M_14;
+			n = NTSC_TV_PLL_N_14;
+			p = NTSC_TV_PLL_P_14;
+		}
+	} else {
+		if (pll_ref_freq == 2700) {
+			m = PAL_TV_PLL_M_27;
+			n = PAL_TV_PLL_N_27;
+			p = PAL_TV_PLL_P_27;
+		} else {
+			m = PAL_TV_PLL_M_14;
+			n = PAL_TV_PLL_N_14;
+			p = PAL_TV_PLL_P_14;
+		}
+	}
+
+	tv_pll_cntl = (m & RADEON_TV_M0LO_MASK) |
+		(((m >> 8) & RADEON_TV_M0HI_MASK) << RADEON_TV_M0HI_SHIFT) |
+		((n & RADEON_TV_N0LO_MASK) << RADEON_TV_N0LO_SHIFT) |
+		(((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
+		((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
+
+	tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
+			((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
+			((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
+			RADEON_TVCLK_SRC_SEL_TVPLL |
+			RADEON_TVPLL_TEST_DIS);
+
+	tv_dac->tv.tv_uv_adr = 0xc8;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60) {
+		tv_ftotal = NTSC_TV_VFTOTAL;
+		hor_timing = hor_timing_NTSC;
+		vert_timing = vert_timing_NTSC;
+	} else {
+		hor_timing = hor_timing_PAL;
+		vert_timing = vert_timing_PAL;
+		tv_ftotal = PAL_TV_VFTOTAL;
+	}
+
+	for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) {
+		if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0)
+			break;
+	}
+
+	for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) {
+		if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0)
+			break;
+	}
+
+	radeon_legacy_tv_init_restarts(encoder);
+
+	/* play with DAC_CNTL */
+	/* play with GPIOPAD_A */
+	/* DISP_OUTPUT_CNTL */
+	/* use reference freq */
+
+	/* program the TV registers */
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+				       RADEON_CRT_ASYNC_RST | RADEON_TV_FIFO_ASYNC_RST));
+
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	tmp &= ~RADEON_TV_DAC_NBLANK;
+	tmp |= RADEON_TV_DAC_BGSLEEP |
+		RADEON_TV_DAC_RDACPD |
+		RADEON_TV_DAC_GDACPD |
+		RADEON_TV_DAC_BDACPD;
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	/* TV PLL */
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+	WREG32_PLL(RADEON_TV_PLL_CNTL, tv_pll_cntl);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVPLL_RESET, ~RADEON_TVPLL_RESET);
+
+	radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_RESET);
+
+	radeon_wait_pll_lock(encoder, 300, 160, 27);
+	radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~0xf);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVCLK_SRC_SEL_TVPLL, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, (1 << RADEON_TVPDC_SHIFT), ~RADEON_TVPDC_MASK);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_SLEEP);
+
+	/* TV HV */
+	WREG32(RADEON_TV_RGB_CNTL, tv_rgb_cntl);
+	WREG32(RADEON_TV_HTOTAL, const_ptr->hor_total - 1);
+	WREG32(RADEON_TV_HDISP, const_ptr->hor_resolution - 1);
+	WREG32(RADEON_TV_HSTART, const_ptr->hor_start);
+
+	WREG32(RADEON_TV_VTOTAL, const_ptr->ver_total - 1);
+	WREG32(RADEON_TV_VDISP, const_ptr->ver_resolution - 1);
+	WREG32(RADEON_TV_FTOTAL, tv_ftotal);
+	WREG32(RADEON_TV_VSCALER_CNTL1, tv_vscaler_cntl1);
+	WREG32(RADEON_TV_VSCALER_CNTL2, tv_vscaler_cntl2);
+
+	WREG32(RADEON_TV_Y_FALL_CNTL, tv_y_fall_cntl);
+	WREG32(RADEON_TV_Y_RISE_CNTL, tv_y_rise_cntl);
+	WREG32(RADEON_TV_Y_SAW_TOOTH_CNTL, tv_y_saw_tooth_cntl);
+
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+				       RADEON_CRT_ASYNC_RST));
+
+	/* TV restarts */
+	radeon_legacy_write_tv_restarts(radeon_encoder);
+
+	/* tv timings */
+	radeon_restore_tv_timing_tables(radeon_encoder);
+
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST));
+
+	/* tv std */
+	WREG32(RADEON_TV_SYNC_CNTL, (RADEON_SYNC_PUB | RADEON_TV_SYNC_IO_DRIVE));
+	WREG32(RADEON_TV_TIMING_CNTL, tv_dac->tv.timing_cntl);
+	WREG32(RADEON_TV_MODULATOR_CNTL1, tv_modulator_cntl1);
+	WREG32(RADEON_TV_MODULATOR_CNTL2, tv_modulator_cntl2);
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, (RADEON_Y_RED_EN |
+					    RADEON_C_GRN_EN |
+					    RADEON_CMP_BLU_EN |
+					    RADEON_DAC_DITHER_EN));
+
+	WREG32(RADEON_TV_CRC_CNTL, 0);
+
+	WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+
+	WREG32(RADEON_TV_GAIN_LIMIT_SETTINGS, ((0x17f << RADEON_UV_GAIN_LIMIT_SHIFT) |
+					       (0x5ff << RADEON_Y_GAIN_LIMIT_SHIFT)));
+	WREG32(RADEON_TV_LINEAR_GAIN_SETTINGS, ((0x100 << RADEON_UV_GAIN_SHIFT) |
+						(0x100 << RADEON_Y_GAIN_SHIFT)));
+
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+}
+
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+				      uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+				      uint32_t *v_total_disp, uint32_t *v_sync_strt_wid)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+	uint32_t tmp;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*h_total_disp = (((const_ptr->hor_resolution / 8) - 1) << RADEON_CRTC_H_DISP_SHIFT) |
+		(((const_ptr->hor_total / 8) - 1) << RADEON_CRTC_H_TOTAL_SHIFT);
+
+	tmp = *h_sync_strt_wid;
+	tmp &= ~(RADEON_CRTC_H_SYNC_STRT_PIX | RADEON_CRTC_H_SYNC_STRT_CHAR);
+	tmp |= (((const_ptr->hor_syncstart / 8) - 1) << RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT) |
+		(const_ptr->hor_syncstart & 7);
+	*h_sync_strt_wid = tmp;
+
+	*v_total_disp = ((const_ptr->ver_resolution - 1) << RADEON_CRTC_V_DISP_SHIFT) |
+		((const_ptr->ver_total - 1) << RADEON_CRTC_V_TOTAL_SHIFT);
+
+	tmp = *v_sync_strt_wid;
+	tmp &= ~RADEON_CRTC_V_SYNC_STRT;
+	tmp |= ((const_ptr->ver_syncstart - 1) << RADEON_CRTC_V_SYNC_STRT_SHIFT);
+	*v_sync_strt_wid = tmp;
+}
+
+static int get_post_div(int value)
+{
+	int post_div;
+	switch (value) {
+	case 1: post_div = 0; break;
+	case 2: post_div = 1; break;
+	case 3: post_div = 4; break;
+	case 4: post_div = 2; break;
+	case 6: post_div = 6; break;
+	case 8: post_div = 3; break;
+	case 12: post_div = 7; break;
+	case 16:
+	default: post_div = 5; break;
+	}
+	return post_div;
+}
+
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+				  uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+				  uint32_t *ppll_div_3, uint32_t *pixclks_cntl)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*htotal_cntl = (const_ptr->hor_total & 0x7) | RADEON_HTOT_CNTL_VGA_EN;
+
+	*ppll_ref_div = const_ptr->crtcPLL_M;
+
+	*ppll_div_3 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+	*pixclks_cntl &= ~(RADEON_PIX2CLK_SRC_SEL_MASK | RADEON_PIXCLK_TV_SRC_SEL);
+	*pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK;
+}
+
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+				  uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+				  uint32_t *p2pll_div_0, uint32_t *pixclks_cntl)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*htotal2_cntl = (const_ptr->hor_total & 0x7);
+
+	*p2pll_ref_div = const_ptr->crtcPLL_M;
+
+	*p2pll_div_0 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+	*pixclks_cntl &= ~RADEON_PIX2CLK_SRC_SEL_MASK;
+	*pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK | RADEON_PIXCLK_TV_SRC_SEL;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
new file mode 100644
index 0000000..f8b35df
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/mmu_notifier.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+
+struct radeon_mn {
+	/* constant after initialisation */
+	struct radeon_device	*rdev;
+	struct mm_struct	*mm;
+	struct mmu_notifier	mn;
+
+	/* only used on destruction */
+	struct work_struct	work;
+
+	/* protected by rdev->mn_lock */
+	struct hlist_node	node;
+
+	/* objects protected by lock */
+	struct mutex		lock;
+	struct rb_root_cached	objects;
+};
+
+struct radeon_mn_node {
+	struct interval_tree_node	it;
+	struct list_head		bos;
+};
+
+/**
+ * radeon_mn_destroy - destroy the rmn
+ *
+ * @work: previously sheduled work item
+ *
+ * Lazy destroys the notifier from a work item
+ */
+static void radeon_mn_destroy(struct work_struct *work)
+{
+	struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
+	struct radeon_device *rdev = rmn->rdev;
+	struct radeon_mn_node *node, *next_node;
+	struct radeon_bo *bo, *next_bo;
+
+	mutex_lock(&rdev->mn_lock);
+	mutex_lock(&rmn->lock);
+	hash_del(&rmn->node);
+	rbtree_postorder_for_each_entry_safe(node, next_node,
+					     &rmn->objects.rb_root, it.rb) {
+
+		interval_tree_remove(&node->it, &rmn->objects);
+		list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
+			bo->mn = NULL;
+			list_del_init(&bo->mn_list);
+		}
+		kfree(node);
+	}
+	mutex_unlock(&rmn->lock);
+	mutex_unlock(&rdev->mn_lock);
+	mmu_notifier_unregister(&rmn->mn, rmn->mm);
+	kfree(rmn);
+}
+
+/**
+ * radeon_mn_release - callback to notify about mm destruction
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ *
+ * Shedule a work item to lazy destroy our notifier.
+ */
+static void radeon_mn_release(struct mmu_notifier *mn,
+			      struct mm_struct *mm)
+{
+	struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+	INIT_WORK(&rmn->work, radeon_mn_destroy);
+	schedule_work(&rmn->work);
+}
+
+/**
+ * radeon_mn_invalidate_range_start - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * We block for all BOs between start and end to be idle and
+ * unmap them by move them into system domain again.
+ */
+static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+					     struct mm_struct *mm,
+					     unsigned long start,
+					     unsigned long end,
+					     bool blockable)
+{
+	struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+	struct ttm_operation_ctx ctx = { false, false };
+	struct interval_tree_node *it;
+	int ret = 0;
+
+	/* notification is exclusive, but interval is inclusive */
+	end -= 1;
+
+	/* TODO we should be able to split locking for interval tree and
+	 * the tear down.
+	 */
+	if (blockable)
+		mutex_lock(&rmn->lock);
+	else if (!mutex_trylock(&rmn->lock))
+		return -EAGAIN;
+
+	it = interval_tree_iter_first(&rmn->objects, start, end);
+	while (it) {
+		struct radeon_mn_node *node;
+		struct radeon_bo *bo;
+		long r;
+
+		if (!blockable) {
+			ret = -EAGAIN;
+			goto out_unlock;
+		}
+
+		node = container_of(it, struct radeon_mn_node, it);
+		it = interval_tree_iter_next(it, start, end);
+
+		list_for_each_entry(bo, &node->bos, mn_list) {
+
+			if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+				continue;
+
+			r = radeon_bo_reserve(bo, true);
+			if (r) {
+				DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+				continue;
+			}
+
+			r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+				true, false, MAX_SCHEDULE_TIMEOUT);
+			if (r <= 0)
+				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+
+			radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+			if (r)
+				DRM_ERROR("(%ld) failed to validate user bo\n", r);
+
+			radeon_bo_unreserve(bo);
+		}
+	}
+	
+out_unlock:
+	mutex_unlock(&rmn->lock);
+
+	return ret;
+}
+
+static const struct mmu_notifier_ops radeon_mn_ops = {
+	.release = radeon_mn_release,
+	.invalidate_range_start = radeon_mn_invalidate_range_start,
+};
+
+/**
+ * radeon_mn_get - create notifier context
+ *
+ * @rdev: radeon device pointer
+ *
+ * Creates a notifier context for current->mm.
+ */
+static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
+{
+	struct mm_struct *mm = current->mm;
+	struct radeon_mn *rmn;
+	int r;
+
+	if (down_write_killable(&mm->mmap_sem))
+		return ERR_PTR(-EINTR);
+
+	mutex_lock(&rdev->mn_lock);
+
+	hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
+		if (rmn->mm == mm)
+			goto release_locks;
+
+	rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
+	if (!rmn) {
+		rmn = ERR_PTR(-ENOMEM);
+		goto release_locks;
+	}
+
+	rmn->rdev = rdev;
+	rmn->mm = mm;
+	rmn->mn.ops = &radeon_mn_ops;
+	mutex_init(&rmn->lock);
+	rmn->objects = RB_ROOT_CACHED;
+	
+	r = __mmu_notifier_register(&rmn->mn, mm);
+	if (r)
+		goto free_rmn;
+
+	hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
+
+release_locks:
+	mutex_unlock(&rdev->mn_lock);
+	up_write(&mm->mmap_sem);
+
+	return rmn;
+
+free_rmn:
+	mutex_unlock(&rdev->mn_lock);
+	up_write(&mm->mmap_sem);
+	kfree(rmn);
+
+	return ERR_PTR(r);
+}
+
+/**
+ * radeon_mn_register - register a BO for notifier updates
+ *
+ * @bo: radeon buffer object
+ * @addr: userptr addr we should monitor
+ *
+ * Registers an MMU notifier for the given BO at the specified address.
+ * Returns 0 on success, -ERRNO if anything goes wrong.
+ */
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
+{
+	unsigned long end = addr + radeon_bo_size(bo) - 1;
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_mn *rmn;
+	struct radeon_mn_node *node = NULL;
+	struct list_head bos;
+	struct interval_tree_node *it;
+
+	rmn = radeon_mn_get(rdev);
+	if (IS_ERR(rmn))
+		return PTR_ERR(rmn);
+
+	INIT_LIST_HEAD(&bos);
+
+	mutex_lock(&rmn->lock);
+
+	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+		kfree(node);
+		node = container_of(it, struct radeon_mn_node, it);
+		interval_tree_remove(&node->it, &rmn->objects);
+		addr = min(it->start, addr);
+		end = max(it->last, end);
+		list_splice(&node->bos, &bos);
+	}
+
+	if (!node) {
+		node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
+		if (!node) {
+			mutex_unlock(&rmn->lock);
+			return -ENOMEM;
+		}
+	}
+
+	bo->mn = rmn;
+
+	node->it.start = addr;
+	node->it.last = end;
+	INIT_LIST_HEAD(&node->bos);
+	list_splice(&bos, &node->bos);
+	list_add(&bo->mn_list, &node->bos);
+
+	interval_tree_insert(&node->it, &rmn->objects);
+
+	mutex_unlock(&rmn->lock);
+
+	return 0;
+}
+
+/**
+ * radeon_mn_unregister - unregister a BO for notifier updates
+ *
+ * @bo: radeon buffer object
+ *
+ * Remove any registration of MMU notifier updates from the buffer object.
+ */
+void radeon_mn_unregister(struct radeon_bo *bo)
+{
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_mn *rmn;
+	struct list_head *head;
+
+	mutex_lock(&rdev->mn_lock);
+	rmn = bo->mn;
+	if (rmn == NULL) {
+		mutex_unlock(&rdev->mn_lock);
+		return;
+	}
+
+	mutex_lock(&rmn->lock);
+	/* save the next list entry for later */
+	head = bo->mn_list.next;
+
+	bo->mn = NULL;
+	list_del(&bo->mn_list);
+
+	if (list_empty(head)) {
+		struct radeon_mn_node *node;
+		node = container_of(head, struct radeon_mn_node, bos);
+		interval_tree_remove(&node->it, &rmn->objects);
+		kfree(node);
+	}
+
+	mutex_unlock(&rmn->lock);
+	mutex_unlock(&rdev->mn_lock);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
new file mode 100644
index 0000000..fd470d6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -0,0 +1,1002 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Original Authors:
+ *   Kevin E. Martin, Rickard E. Faith, Alan Hourihane
+ *
+ * Kernel port Author: Dave Airlie
+ */
+
+#ifndef RADEON_MODE_H
+#define RADEON_MODE_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+struct radeon_bo;
+struct radeon_device;
+
+#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
+#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
+#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
+
+#define RADEON_MAX_HPD_PINS 7
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
+
+enum radeon_rmx_type {
+	RMX_OFF,
+	RMX_FULL,
+	RMX_CENTER,
+	RMX_ASPECT
+};
+
+enum radeon_tv_std {
+	TV_STD_NTSC,
+	TV_STD_PAL,
+	TV_STD_PAL_M,
+	TV_STD_PAL_60,
+	TV_STD_NTSC_J,
+	TV_STD_SCART_PAL,
+	TV_STD_SECAM,
+	TV_STD_PAL_CN,
+	TV_STD_PAL_N,
+};
+
+enum radeon_underscan_type {
+	UNDERSCAN_OFF,
+	UNDERSCAN_ON,
+	UNDERSCAN_AUTO,
+};
+
+enum radeon_hpd_id {
+	RADEON_HPD_1 = 0,
+	RADEON_HPD_2,
+	RADEON_HPD_3,
+	RADEON_HPD_4,
+	RADEON_HPD_5,
+	RADEON_HPD_6,
+	RADEON_HPD_NONE = 0xff,
+};
+
+enum radeon_output_csc {
+	RADEON_OUTPUT_CSC_BYPASS = 0,
+	RADEON_OUTPUT_CSC_TVRGB = 1,
+	RADEON_OUTPUT_CSC_YCBCR601 = 2,
+	RADEON_OUTPUT_CSC_YCBCR709 = 3,
+};
+
+#define RADEON_MAX_I2C_BUS 16
+
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ *    grabs the gpio pins for software use
+ *    0=not held  1=held
+ * 2. "a" reg and bits
+ *    output pin value
+ *    0=low 1=high
+ * 3. "en" reg and bits
+ *    sets the pin direction
+ *    0=input 1=output
+ * 4. "y" reg and bits
+ *    input pin value
+ *    0=low 1=high
+ */
+struct radeon_i2c_bus_rec {
+	bool valid;
+	/* id used by atom */
+	uint8_t i2c_id;
+	/* id used by atom */
+	enum radeon_hpd_id hpd;
+	/* can be used with hw i2c engine */
+	bool hw_capable;
+	/* uses multi-media i2c engine */
+	bool mm_i2c;
+	/* regs and bits */
+	uint32_t mask_clk_reg;
+	uint32_t mask_data_reg;
+	uint32_t a_clk_reg;
+	uint32_t a_data_reg;
+	uint32_t en_clk_reg;
+	uint32_t en_data_reg;
+	uint32_t y_clk_reg;
+	uint32_t y_data_reg;
+	uint32_t mask_clk_mask;
+	uint32_t mask_data_mask;
+	uint32_t a_clk_mask;
+	uint32_t a_data_mask;
+	uint32_t en_clk_mask;
+	uint32_t en_data_mask;
+	uint32_t y_clk_mask;
+	uint32_t y_data_mask;
+};
+
+struct radeon_tmds_pll {
+    uint32_t freq;
+    uint32_t value;
+};
+
+#define RADEON_MAX_BIOS_CONNECTOR 16
+
+/* pll flags */
+#define RADEON_PLL_USE_BIOS_DIVS        (1 << 0)
+#define RADEON_PLL_NO_ODD_POST_DIV      (1 << 1)
+#define RADEON_PLL_USE_REF_DIV          (1 << 2)
+#define RADEON_PLL_LEGACY               (1 << 3)
+#define RADEON_PLL_PREFER_LOW_REF_DIV   (1 << 4)
+#define RADEON_PLL_PREFER_HIGH_REF_DIV  (1 << 5)
+#define RADEON_PLL_PREFER_LOW_FB_DIV    (1 << 6)
+#define RADEON_PLL_PREFER_HIGH_FB_DIV   (1 << 7)
+#define RADEON_PLL_PREFER_LOW_POST_DIV  (1 << 8)
+#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+#define RADEON_PLL_USE_FRAC_FB_DIV      (1 << 10)
+#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define RADEON_PLL_USE_POST_DIV         (1 << 12)
+#define RADEON_PLL_IS_LCD               (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
+
+struct radeon_pll {
+	/* reference frequency */
+	uint32_t reference_freq;
+
+	/* fixed dividers */
+	uint32_t reference_div;
+	uint32_t post_div;
+
+	/* pll in/out limits */
+	uint32_t pll_in_min;
+	uint32_t pll_in_max;
+	uint32_t pll_out_min;
+	uint32_t pll_out_max;
+	uint32_t lcd_pll_out_min;
+	uint32_t lcd_pll_out_max;
+	uint32_t best_vco;
+
+	/* divider limits */
+	uint32_t min_ref_div;
+	uint32_t max_ref_div;
+	uint32_t min_post_div;
+	uint32_t max_post_div;
+	uint32_t min_feedback_div;
+	uint32_t max_feedback_div;
+	uint32_t min_frac_feedback_div;
+	uint32_t max_frac_feedback_div;
+
+	/* flags for the current clock */
+	uint32_t flags;
+
+	/* pll id */
+	uint32_t id;
+};
+
+struct radeon_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
+	struct radeon_i2c_bus_rec rec;
+	struct drm_dp_aux aux;
+	bool has_aux;
+	struct mutex mutex;
+};
+
+/* mostly for macs, but really any system without connector tables */
+enum radeon_connector_table {
+	CT_NONE = 0,
+	CT_GENERIC,
+	CT_IBOOK,
+	CT_POWERBOOK_EXTERNAL,
+	CT_POWERBOOK_INTERNAL,
+	CT_POWERBOOK_VGA,
+	CT_MINI_EXTERNAL,
+	CT_MINI_INTERNAL,
+	CT_IMAC_G5_ISIGHT,
+	CT_EMAC,
+	CT_RN50_POWER,
+	CT_MAC_X800,
+	CT_MAC_G5_9600,
+	CT_SAM440EP,
+	CT_MAC_G4_SILVER
+};
+
+enum radeon_dvo_chip {
+	DVO_SIL164,
+	DVO_SIL1178,
+};
+
+struct radeon_fbdev;
+
+struct radeon_afmt {
+	bool enabled;
+	int offset;
+	bool last_buffer_filled_status;
+	int id;
+};
+
+struct radeon_mode_info {
+	struct atom_context *atom_context;
+	struct card_info *atom_card_info;
+	enum radeon_connector_table connector_table;
+	bool mode_config_initialized;
+	struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
+	struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
+	/* DVI-I properties */
+	struct drm_property *coherent_mode_property;
+	/* DAC enable load detect */
+	struct drm_property *load_detect_property;
+	/* TV standard */
+	struct drm_property *tv_std_property;
+	/* legacy TMDS PLL detect */
+	struct drm_property *tmds_pll_property;
+	/* underscan */
+	struct drm_property *underscan_property;
+	struct drm_property *underscan_hborder_property;
+	struct drm_property *underscan_vborder_property;
+	/* audio */
+	struct drm_property *audio_property;
+	/* FMT dithering */
+	struct drm_property *dither_property;
+	/* Output CSC */
+	struct drm_property *output_csc_property;
+	/* hardcoded DFP edid from BIOS */
+	struct edid *bios_hardcoded_edid;
+	int bios_hardcoded_edid_size;
+
+	/* pointer to fbdev info structure */
+	struct radeon_fbdev *rfbdev;
+	/* firmware flags */
+	u16 firmware_flags;
+	/* pointer to backlight encoder */
+	struct radeon_encoder *bl_encoder;
+
+	/* bitmask for active encoder frontends */
+	uint32_t active_encoders;
+};
+
+#define RADEON_MAX_BL_LEVEL 0xFF
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+struct radeon_backlight_privdata {
+	struct radeon_encoder *encoder;
+	uint8_t negative;
+};
+
+#endif
+
+#define MAX_H_CODE_TIMING_LEN 32
+#define MAX_V_CODE_TIMING_LEN 32
+
+/* need to store these as reading
+   back code tables is excessive */
+struct radeon_tv_regs {
+	uint32_t tv_uv_adr;
+	uint32_t timing_cntl;
+	uint32_t hrestart;
+	uint32_t vrestart;
+	uint32_t frestart;
+	uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN];
+	uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
+};
+
+struct radeon_atom_ss {
+	uint16_t percentage;
+	uint16_t percentage_divider;
+	uint8_t type;
+	uint16_t step;
+	uint8_t delay;
+	uint8_t range;
+	uint8_t refdiv;
+	/* asic_ss */
+	uint16_t rate;
+	uint16_t amount;
+};
+
+enum radeon_flip_status {
+	RADEON_FLIP_NONE,
+	RADEON_FLIP_PENDING,
+	RADEON_FLIP_SUBMITTED
+};
+
+struct radeon_crtc {
+	struct drm_crtc base;
+	int crtc_id;
+	u16 lut_r[256], lut_g[256], lut_b[256];
+	bool enabled;
+	bool can_tile;
+	bool cursor_out_of_bounds;
+	uint32_t crtc_offset;
+	struct drm_gem_object *cursor_bo;
+	uint64_t cursor_addr;
+	int cursor_x;
+	int cursor_y;
+	int cursor_hot_x;
+	int cursor_hot_y;
+	int cursor_width;
+	int cursor_height;
+	int max_cursor_width;
+	int max_cursor_height;
+	uint32_t legacy_display_base_addr;
+	enum radeon_rmx_type rmx_type;
+	u8 h_border;
+	u8 v_border;
+	fixed20_12 vsc;
+	fixed20_12 hsc;
+	struct drm_display_mode native_mode;
+	int pll_id;
+	/* page flipping */
+	struct workqueue_struct *flip_queue;
+	struct radeon_flip_work *flip_work;
+	enum radeon_flip_status flip_status;
+	/* pll sharing */
+	struct radeon_atom_ss ss;
+	bool ss_enabled;
+	u32 adjusted_clock;
+	int bpc;
+	u32 pll_reference_div;
+	u32 pll_post_div;
+	u32 pll_flags;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	/* for dpm */
+	u32 line_time;
+	u32 wm_low;
+	u32 wm_high;
+	u32 lb_vblank_lead_lines;
+	struct drm_display_mode hw_mode;
+	enum radeon_output_csc output_csc;
+};
+
+struct radeon_encoder_primary_dac {
+	/* legacy primary dac */
+	uint32_t ps2_pdac_adj;
+};
+
+struct radeon_encoder_lvds {
+	/* legacy lvds */
+	uint16_t panel_vcc_delay;
+	uint8_t  panel_pwr_delay;
+	uint8_t  panel_digon_delay;
+	uint8_t  panel_blon_delay;
+	uint16_t panel_ref_divider;
+	uint8_t  panel_post_divider;
+	uint16_t panel_fb_divider;
+	bool     use_bios_dividers;
+	uint32_t lvds_gen_cntl;
+	/* panel mode */
+	struct drm_display_mode native_mode;
+	struct backlight_device *bl_dev;
+	int      dpms_mode;
+	uint8_t  backlight_level;
+};
+
+struct radeon_encoder_tv_dac {
+	/* legacy tv dac */
+	uint32_t ps2_tvdac_adj;
+	uint32_t ntsc_tvdac_adj;
+	uint32_t pal_tvdac_adj;
+
+	int               h_pos;
+	int               v_pos;
+	int               h_size;
+	int               supported_tv_stds;
+	bool              tv_on;
+	enum radeon_tv_std tv_std;
+	struct radeon_tv_regs tv;
+};
+
+struct radeon_encoder_int_tmds {
+	/* legacy int tmds */
+	struct radeon_tmds_pll tmds_pll[4];
+};
+
+struct radeon_encoder_ext_tmds {
+	/* tmds over dvo */
+	struct radeon_i2c_chan *i2c_bus;
+	uint8_t slave_addr;
+	enum radeon_dvo_chip dvo_chip;
+};
+
+/* spread spectrum */
+struct radeon_encoder_atom_dig {
+	bool linkb;
+	/* atom dig */
+	bool coherent_mode;
+	int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+	/* atom lvds/edp */
+	uint32_t lcd_misc;
+	uint16_t panel_pwr_delay;
+	uint32_t lcd_ss_id;
+	/* panel mode */
+	struct drm_display_mode native_mode;
+	struct backlight_device *bl_dev;
+	int dpms_mode;
+	uint8_t backlight_level;
+	int panel_mode;
+	struct radeon_afmt *afmt;
+	struct r600_audio_pin *pin;
+	int active_mst_links;
+};
+
+struct radeon_encoder_atom_dac {
+	enum radeon_tv_std tv_std;
+};
+
+struct radeon_encoder_mst {
+	int crtc;
+	struct radeon_encoder *primary;
+	struct radeon_connector *connector;
+	struct drm_dp_mst_port *port;
+	int pbn;
+	int fe;
+	bool fe_from_be;
+	bool enc_active;
+};
+
+struct radeon_encoder {
+	struct drm_encoder base;
+	uint32_t encoder_enum;
+	uint32_t encoder_id;
+	uint32_t devices;
+	uint32_t active_device;
+	uint32_t flags;
+	uint32_t pixel_clock;
+	enum radeon_rmx_type rmx_type;
+	enum radeon_underscan_type underscan_type;
+	uint32_t underscan_hborder;
+	uint32_t underscan_vborder;
+	struct drm_display_mode native_mode;
+	void *enc_priv;
+	int audio_polling_active;
+	bool is_ext_encoder;
+	u16 caps;
+	struct radeon_audio_funcs *audio;
+	enum radeon_output_csc output_csc;
+	bool can_mst;
+	uint32_t offset;
+	bool is_mst_encoder;
+	/* front end for this mst encoder */
+};
+
+struct radeon_connector_atom_dig {
+	uint32_t igp_lane_info;
+	/* displayport */
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 dp_sink_type;
+	int dp_clock;
+	int dp_lane_count;
+	bool edp_on;
+	bool is_mst;
+};
+
+struct radeon_gpio_rec {
+	bool valid;
+	u8 id;
+	u32 reg;
+	u32 mask;
+	u32 shift;
+};
+
+struct radeon_hpd {
+	enum radeon_hpd_id hpd;
+	u8 plugged_state;
+	struct radeon_gpio_rec gpio;
+};
+
+struct radeon_router {
+	u32 router_id;
+	struct radeon_i2c_bus_rec i2c_info;
+	u8 i2c_addr;
+	/* i2c mux */
+	bool ddc_valid;
+	u8 ddc_mux_type;
+	u8 ddc_mux_control_pin;
+	u8 ddc_mux_state;
+	/* clock/data mux */
+	bool cd_valid;
+	u8 cd_mux_type;
+	u8 cd_mux_control_pin;
+	u8 cd_mux_state;
+};
+
+enum radeon_connector_audio {
+	RADEON_AUDIO_DISABLE = 0,
+	RADEON_AUDIO_ENABLE = 1,
+	RADEON_AUDIO_AUTO = 2
+};
+
+enum radeon_connector_dither {
+	RADEON_FMT_DITHER_DISABLE = 0,
+	RADEON_FMT_DITHER_ENABLE = 1,
+};
+
+struct stream_attribs {
+	uint16_t fe;
+	uint16_t slots;
+};
+
+struct radeon_connector {
+	struct drm_connector base;
+	uint32_t connector_id;
+	uint32_t devices;
+	struct radeon_i2c_chan *ddc_bus;
+	/* some systems have an hdmi and vga port with a shared ddc line */
+	bool shared_ddc;
+	bool use_digital;
+	/* we need to mind the EDID between detect
+	   and get modes due to analog/digital/tvencoder */
+	struct edid *edid;
+	void *con_priv;
+	bool dac_load_detect;
+	bool detected_by_load; /* if the connection status was determined by load */
+	bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
+	uint16_t connector_object_id;
+	struct radeon_hpd hpd;
+	struct radeon_router router;
+	struct radeon_i2c_chan *router_bus;
+	enum radeon_connector_audio audio;
+	enum radeon_connector_dither dither;
+	int pixelclock_for_modeset;
+	bool is_mst_connector;
+	struct radeon_connector *mst_port;
+	struct drm_dp_mst_port *port;
+	struct drm_dp_mst_topology_mgr mst_mgr;
+
+	struct radeon_encoder *mst_encoder;
+	struct stream_attribs cur_stream_attribs[6];
+	int enabled_attribs;
+};
+
+#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
+				((em) == ATOM_ENCODER_MODE_DP_MST))
+
+struct atom_clock_dividers {
+	u32 post_div;
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			u32 reserved : 6;
+			u32 whole_fb_div : 12;
+			u32 frac_fb_div : 14;
+#else
+			u32 frac_fb_div : 14;
+			u32 whole_fb_div : 12;
+			u32 reserved : 6;
+#endif
+		};
+		u32 fb_div;
+	};
+	u32 ref_div;
+	bool enable_post_div;
+	bool enable_dithen;
+	u32 vco_mode;
+	u32 real_clock;
+	/* added for CI */
+	u32 post_divider;
+	u32 flags;
+};
+
+struct atom_mpll_param {
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			u32 reserved : 8;
+			u32 clkfrac : 12;
+			u32 clkf : 12;
+#else
+			u32 clkf : 12;
+			u32 clkfrac : 12;
+			u32 reserved : 8;
+#endif
+		};
+		u32 fb_div;
+	};
+	u32 post_div;
+	u32 bwcntl;
+	u32 dll_speed;
+	u32 vco_mode;
+	u32 yclk_sel;
+	u32 qdr;
+	u32 half_rate;
+};
+
+#define MEM_TYPE_GDDR5  0x50
+#define MEM_TYPE_GDDR4  0x40
+#define MEM_TYPE_GDDR3  0x30
+#define MEM_TYPE_DDR2   0x20
+#define MEM_TYPE_GDDR1  0x10
+#define MEM_TYPE_DDR3   0xb0
+#define MEM_TYPE_MASK   0xf0
+
+struct atom_memory_info {
+	u8 mem_vendor;
+	u8 mem_type;
+};
+
+#define MAX_AC_TIMING_ENTRIES 16
+
+struct atom_memory_clock_range_table
+{
+	u8 num_entries;
+	u8 rsv[3];
+	u32 mclk[MAX_AC_TIMING_ENTRIES];
+};
+
+#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
+#define VBIOS_MAX_AC_TIMING_ENTRIES 20
+
+struct atom_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct atom_mc_register_address {
+	u16 s1;
+	u8 pre_reg_data;
+};
+
+struct atom_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
+	struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
+};
+
+#define MAX_VOLTAGE_ENTRIES 32
+
+struct atom_voltage_table_entry
+{
+	u16 value;
+	u32 smio_low;
+};
+
+struct atom_voltage_table
+{
+	u32 count;
+	u32 mask_low;
+	u32 phase_delay;
+	struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
+};
+
+/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
+#define DRM_SCANOUTPOS_VALID        (1 << 0)
+#define DRM_SCANOUTPOS_IN_VBLANK    (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
+#define USE_REAL_VBLANKSTART 		(1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART	(1 << 31)
+
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+			  uint32_t connector_id,
+			  uint32_t supported_device,
+			  int connector_type,
+			  struct radeon_i2c_bus_rec *i2c_bus,
+			  uint32_t igp_lane_info,
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd,
+			  struct radeon_router *router);
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+			    uint32_t connector_id,
+			    uint32_t supported_device,
+			    int connector_type,
+			    struct radeon_i2c_bus_rec *i2c_bus,
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd);
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
+extern enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev);
+extern enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev);
+extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+						 u16 *vddc, u16 *vddci, u16 *mvdd);
+
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+				      struct drm_encoder *encoder,
+				      bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected);
+
+extern struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+extern struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
+extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock);
+
+extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
+extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+
+extern struct edid *radeon_connector_edid(struct drm_connector *connector);
+
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+				       struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+				      const struct drm_display_mode *mode);
+extern void radeon_dp_link_train(struct drm_encoder *encoder,
+				 struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+				    struct drm_connector *connector);
+extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
+					 u8 power_state);
+extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
+extern ssize_t
+radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg);
+
+extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+extern void atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override);
+extern void radeon_atom_encoder_init(struct radeon_device *rdev);
+extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+					   int action, uint8_t lane_num,
+					   uint8_t lane_set);
+extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder,
+					    int action, uint8_t lane_num,
+					    uint8_t lane_set, int fe);
+extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder,
+						 int fe);
+extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
+extern void radeon_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_fini(struct radeon_device *rdev);
+extern void radeon_combios_i2c_init(struct radeon_device *rdev);
+extern void radeon_atombios_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_add(struct radeon_device *rdev,
+			   struct radeon_i2c_bus_rec *rec,
+			   const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+						 struct radeon_i2c_bus_rec *i2c_bus);
+extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+						 struct radeon_i2c_bus_rec *rec,
+						 const char *name);
+extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+				u8 slave_addr,
+				u8 addr,
+				u8 *val);
+extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+				u8 slave_addr,
+				u8 addr,
+				u8 val);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+
+extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+					     struct radeon_atom_ss *ss,
+					     int id);
+extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+					     struct radeon_atom_ss *ss,
+					     int id, u32 clock);
+extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+							  u8 id);
+
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+				      uint64_t freq,
+				      uint32_t *dot_clock_p,
+				      uint32_t *fb_div_p,
+				      uint32_t *frac_fb_div_p,
+				      uint32_t *ref_div_p,
+				      uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+				     u32 freq,
+				     u32 *dot_clock_p,
+				     u32 *fb_div_p,
+				     u32 *frac_fb_div_p,
+				     u32 *ref_div_p,
+				     u32 *post_div_p);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
+struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
+extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
+extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
+extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
+extern bool radeon_encoder_is_digital(struct drm_encoder *encoder);
+
+extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
+extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				   struct drm_framebuffer *old_fb);
+extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y,
+					 enum mode_set_atomic state);
+extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y,
+				   struct drm_framebuffer *old_fb);
+extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
+
+extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				 struct drm_framebuffer *old_fb);
+extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+				       struct drm_framebuffer *fb,
+				       int x, int y,
+				       enum mode_set_atomic state);
+extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+				   struct drm_framebuffer *fb,
+				   int x, int y, int atomic);
+extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+				   struct drm_file *file_priv,
+				   uint32_t handle,
+				   uint32_t width,
+				   uint32_t height,
+				   int32_t hot_x,
+				   int32_t hot_y);
+extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+				   int x, int y);
+extern void radeon_cursor_reset(struct drm_crtc *crtc);
+
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+				      unsigned int flags, int *vpos, int *hpos,
+				      ktime_t *stime, ktime_t *etime,
+				      const struct drm_display_mode *mode);
+
+extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
+extern struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
+extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+extern bool radeon_combios_get_clock_info(struct drm_device *dev);
+extern struct radeon_encoder_atom_dig *
+radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+					  struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+						     struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+						   struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+							 struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						       struct radeon_encoder_ext_tmds *tmds);
+extern struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_lvds *
+radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
+extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_save_bios_scratch_regs(struct radeon_device *rdev);
+extern void radeon_restore_bios_scratch_regs(struct radeon_device *rdev);
+extern void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+int radeon_framebuffer_init(struct drm_device *dev,
+			     struct drm_framebuffer *rfb,
+			     const struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
+void radeon_atombios_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc);
+void radeon_legacy_init_crtc(struct drm_device *dev,
+			     struct radeon_crtc *radeon_crtc);
+
+void radeon_get_clock_info(struct drm_device *dev);
+
+extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
+extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
+
+void radeon_enc_destroy(struct drm_encoder *encoder);
+void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+void radeon_combios_asic_init(struct drm_device *dev);
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode);
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
+
+/* legacy tv */
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+				      uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+				      uint32_t *v_total_disp, uint32_t *v_sync_strt_wid);
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+				  uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+				  uint32_t *ppll_div_3, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+				  uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+				  uint32_t *p2pll_div_0, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode);
+
+/* fmt blocks */
+void avivo_program_fmt(struct drm_encoder *encoder);
+void dce3_program_fmt(struct drm_encoder *encoder);
+void dce4_program_fmt(struct drm_encoder *encoder);
+void dce8_program_fmt(struct drm_encoder *encoder);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
+
+/* mst */
+int radeon_dp_mst_init(struct radeon_connector *radeon_connector);
+int radeon_dp_mst_probe(struct radeon_connector *radeon_connector);
+int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector);
+int radeon_mst_debugfs_init(struct radeon_device *rdev);
+void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode);
+
+void radeon_setup_mst_connector(struct drm_device *dev);
+
+int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx);
+void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx);
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 0000000..ba2fd29
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,876 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+
+int radeon_ttm_init(struct radeon_device *rdev);
+void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
+
+/*
+ * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+ * function are calling it.
+ */
+
+static void radeon_update_memory_usage(struct radeon_bo *bo,
+				       unsigned mem_type, int sign)
+{
+	struct radeon_device *rdev = bo->rdev;
+	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
+
+	switch (mem_type) {
+	case TTM_PL_TT:
+		if (sign > 0)
+			atomic64_add(size, &rdev->gtt_usage);
+		else
+			atomic64_sub(size, &rdev->gtt_usage);
+		break;
+	case TTM_PL_VRAM:
+		if (sign > 0)
+			atomic64_add(size, &rdev->vram_usage);
+		else
+			atomic64_sub(size, &rdev->vram_usage);
+		break;
+	}
+}
+
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct radeon_bo *bo;
+
+	bo = container_of(tbo, struct radeon_bo, tbo);
+
+	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+
+	mutex_lock(&bo->rdev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&bo->rdev->gem.mutex);
+	radeon_bo_clear_surface_reg(bo);
+	WARN_ON_ONCE(!list_empty(&bo->va));
+	if (bo->gem_base.import_attach)
+		drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
+	drm_gem_object_release(&bo->gem_base);
+	kfree(bo);
+}
+
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &radeon_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+{
+	u32 c = 0, i;
+
+	rbo->placement.placement = rbo->placements;
+	rbo->placement.busy_placement = rbo->placements;
+	if (domain & RADEON_GEM_DOMAIN_VRAM) {
+		/* Try placing BOs which don't need CPU access outside of the
+		 * CPU accessible part of VRAM
+		 */
+		if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
+		    rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
+			rbo->placements[c].fpfn =
+				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+						     TTM_PL_FLAG_UNCACHED |
+						     TTM_PL_FLAG_VRAM;
+		}
+
+		rbo->placements[c].fpfn = 0;
+		rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+					     TTM_PL_FLAG_UNCACHED |
+					     TTM_PL_FLAG_VRAM;
+	}
+
+	if (domain & RADEON_GEM_DOMAIN_GTT) {
+		if (rbo->flags & RADEON_GEM_GTT_UC) {
+			rbo->placements[c].fpfn = 0;
+			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_TT;
+
+		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+			   (rbo->rdev->flags & RADEON_IS_AGP)) {
+			rbo->placements[c].fpfn = 0;
+			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+				TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_TT;
+		} else {
+			rbo->placements[c].fpfn = 0;
+			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+						     TTM_PL_FLAG_TT;
+		}
+	}
+
+	if (domain & RADEON_GEM_DOMAIN_CPU) {
+		if (rbo->flags & RADEON_GEM_GTT_UC) {
+			rbo->placements[c].fpfn = 0;
+			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_SYSTEM;
+
+		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+		    rbo->rdev->flags & RADEON_IS_AGP) {
+			rbo->placements[c].fpfn = 0;
+			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+				TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_SYSTEM;
+		} else {
+			rbo->placements[c].fpfn = 0;
+			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+						     TTM_PL_FLAG_SYSTEM;
+		}
+	}
+	if (!c) {
+		rbo->placements[c].fpfn = 0;
+		rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
+					     TTM_PL_FLAG_SYSTEM;
+	}
+
+	rbo->placement.num_placement = c;
+	rbo->placement.num_busy_placement = c;
+
+	for (i = 0; i < c; ++i) {
+		if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
+		    (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+		    !rbo->placements[i].fpfn)
+			rbo->placements[i].lpfn =
+				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+		else
+			rbo->placements[i].lpfn = 0;
+	}
+}
+
+int radeon_bo_create(struct radeon_device *rdev,
+		     unsigned long size, int byte_align, bool kernel,
+		     u32 domain, u32 flags, struct sg_table *sg,
+		     struct reservation_object *resv,
+		     struct radeon_bo **bo_ptr)
+{
+	struct radeon_bo *bo;
+	enum ttm_bo_type type;
+	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+	size_t acc_size;
+	int r;
+
+	size = ALIGN(size, PAGE_SIZE);
+
+	if (kernel) {
+		type = ttm_bo_type_kernel;
+	} else if (sg) {
+		type = ttm_bo_type_sg;
+	} else {
+		type = ttm_bo_type_device;
+	}
+	*bo_ptr = NULL;
+
+	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+				       sizeof(struct radeon_bo));
+
+	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+	if (bo == NULL)
+		return -ENOMEM;
+	drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size);
+	bo->rdev = rdev;
+	bo->surface_reg = -1;
+	INIT_LIST_HEAD(&bo->list);
+	INIT_LIST_HEAD(&bo->va);
+	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
+				       RADEON_GEM_DOMAIN_GTT |
+				       RADEON_GEM_DOMAIN_CPU);
+
+	bo->flags = flags;
+	/* PCI GART is always snooped */
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
+	/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
+	 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
+	 */
+	if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
+		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
+#ifdef CONFIG_X86_32
+	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+	 */
+	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
+	/* Don't try to enable write-combining when it can't work, or things
+	 * may be slow
+	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
+	 */
+#ifndef CONFIG_COMPILE_TEST
+#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
+	 thanks to write-combining
+#endif
+
+	if (bo->flags & RADEON_GEM_GTT_WC)
+		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+			      "better performance thanks to write-combining\n");
+	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+	/* For architectures that don't support WC memory,
+	 * mask out the WC flag from the BO
+	 */
+	if (!drm_arch_can_wc_memory())
+		bo->flags &= ~RADEON_GEM_GTT_WC;
+#endif
+
+	radeon_ttm_placement_from_domain(bo, domain);
+	/* Kernel allocation are uninterruptible */
+	down_read(&rdev->pm.mclk_lock);
+	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, page_align, !kernel, acc_size,
+			sg, resv, &radeon_ttm_bo_destroy);
+	up_read(&rdev->pm.mclk_lock);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	*bo_ptr = bo;
+
+	trace_radeon_bo_create(bo);
+
+	return 0;
+}
+
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->kptr) {
+		if (ptr) {
+			*ptr = bo->kptr;
+		}
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r) {
+		return r;
+	}
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr) {
+		*ptr = bo->kptr;
+	}
+	radeon_bo_check_tiling(bo, 0, 0);
+	return 0;
+}
+
+void radeon_bo_kunmap(struct radeon_bo *bo)
+{
+	if (bo->kptr == NULL)
+		return;
+	bo->kptr = NULL;
+	radeon_bo_check_tiling(bo, 0, 0);
+	ttm_bo_kunmap(&bo->kmap);
+}
+
+struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
+{
+	if (bo == NULL)
+		return NULL;
+
+	ttm_bo_get(&bo->tbo);
+	return bo;
+}
+
+void radeon_bo_unref(struct radeon_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+	struct radeon_device *rdev;
+
+	if ((*bo) == NULL)
+		return;
+	rdev = (*bo)->rdev;
+	tbo = &((*bo)->tbo);
+	ttm_bo_put(tbo);
+	*bo = NULL;
+}
+
+int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
+			     u64 *gpu_addr)
+{
+	struct ttm_operation_ctx ctx = { false, false };
+	int r, i;
+
+	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
+		return -EPERM;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
+
+		if (max_offset != 0) {
+			u64 domain_start;
+
+			if (domain == RADEON_GEM_DOMAIN_VRAM)
+				domain_start = bo->rdev->mc.vram_start;
+			else
+				domain_start = bo->rdev->mc.gtt_start;
+			WARN_ON_ONCE(max_offset <
+				     (radeon_bo_gpu_offset(bo) - domain_start));
+		}
+
+		return 0;
+	}
+	if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
+		/* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
+		return -EINVAL;
+	}
+
+	radeon_ttm_placement_from_domain(bo, domain);
+	for (i = 0; i < bo->placement.num_placement; i++) {
+		/* force to pin into visible video ram */
+		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+		    !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
+		    (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
+			bo->placements[i].lpfn =
+				bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+		else
+			bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
+
+		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+	}
+
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
+		if (domain == RADEON_GEM_DOMAIN_VRAM)
+			bo->rdev->vram_pin_size += radeon_bo_size(bo);
+		else
+			bo->rdev->gart_pin_size += radeon_bo_size(bo);
+	} else {
+		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+	}
+	return r;
+}
+
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+{
+	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
+}
+
+int radeon_bo_unpin(struct radeon_bo *bo)
+{
+	struct ttm_operation_ctx ctx = { false, false };
+	int r, i;
+
+	if (!bo->pin_count) {
+		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++) {
+		bo->placements[i].lpfn = 0;
+		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+	}
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+	if (likely(r == 0)) {
+		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
+		else
+			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
+	} else {
+		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+	}
+	return r;
+}
+
+int radeon_bo_evict_vram(struct radeon_device *rdev)
+{
+	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+	if (0 && (rdev->flags & RADEON_IS_IGP)) {
+		if (rdev->mc.igp_sideport_enabled == false)
+			/* Useless to evict on IGP chips */
+			return 0;
+	}
+	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+}
+
+void radeon_bo_force_delete(struct radeon_device *rdev)
+{
+	struct radeon_bo *bo, *n;
+
+	if (list_empty(&rdev->gem.objects)) {
+		return;
+	}
+	dev_err(rdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+			*((unsigned long *)&bo->gem_base.refcount));
+		mutex_lock(&bo->rdev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&bo->rdev->gem.mutex);
+		/* this should unref the ttm bo */
+		drm_gem_object_put_unlocked(&bo->gem_base);
+	}
+}
+
+int radeon_bo_init(struct radeon_device *rdev)
+{
+	/* reserve PAT memory space to WC for VRAM */
+	arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+				   rdev->mc.aper_size);
+
+	/* Add an MTRR for the VRAM */
+	if (!rdev->fastfb_working) {
+		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
+						      rdev->mc.aper_size);
+	}
+	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
+		rdev->mc.mc_vram_size >> 20,
+		(unsigned long long)rdev->mc.aper_size >> 20);
+	DRM_INFO("RAM width %dbits %cDR\n",
+			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
+	return radeon_ttm_init(rdev);
+}
+
+void radeon_bo_fini(struct radeon_device *rdev)
+{
+	radeon_ttm_fini(rdev);
+	arch_phys_wc_del(rdev->mc.vram_mtrr);
+	arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
+}
+
+/* Returns how many bytes TTM can move per IB.
+ */
+static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
+{
+	u64 real_vram_size = rdev->mc.real_vram_size;
+	u64 vram_usage = atomic64_read(&rdev->vram_usage);
+
+	/* This function is based on the current VRAM usage.
+	 *
+	 * - If all of VRAM is free, allow relocating the number of bytes that
+	 *   is equal to 1/4 of the size of VRAM for this IB.
+
+	 * - If more than one half of VRAM is occupied, only allow relocating
+	 *   1 MB of data for this IB.
+	 *
+	 * - From 0 to one half of used VRAM, the threshold decreases
+	 *   linearly.
+	 *         __________________
+	 * 1/4 of -|\               |
+	 * VRAM    | \              |
+	 *         |  \             |
+	 *         |   \            |
+	 *         |    \           |
+	 *         |     \          |
+	 *         |      \         |
+	 *         |       \________|1 MB
+	 *         |----------------|
+	 *    VRAM 0 %             100 %
+	 *         used            used
+	 *
+	 * Note: It's a threshold, not a limit. The threshold must be crossed
+	 * for buffer relocations to stop, so any buffer of an arbitrary size
+	 * can be moved as long as the threshold isn't crossed before
+	 * the relocation takes place. We don't want to disable buffer
+	 * relocations completely.
+	 *
+	 * The idea is that buffers should be placed in VRAM at creation time
+	 * and TTM should only do a minimum number of relocations during
+	 * command submission. In practice, you need to submit at least
+	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
+	 *
+	 * Also, things can get pretty crazy under memory pressure and actual
+	 * VRAM usage can change a lot, so playing safe even at 50% does
+	 * consistently increase performance.
+	 */
+
+	u64 half_vram = real_vram_size >> 1;
+	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
+	u64 bytes_moved_threshold = half_free_vram >> 1;
+	return max(bytes_moved_threshold, 1024*1024ull);
+}
+
+int radeon_bo_list_validate(struct radeon_device *rdev,
+			    struct ww_acquire_ctx *ticket,
+			    struct list_head *head, int ring)
+{
+	struct ttm_operation_ctx ctx = { true, false };
+	struct radeon_bo_list *lobj;
+	struct list_head duplicates;
+	int r;
+	u64 bytes_moved = 0, initial_bytes_moved;
+	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
+
+	INIT_LIST_HEAD(&duplicates);
+	r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+
+	list_for_each_entry(lobj, head, tv.head) {
+		struct radeon_bo *bo = lobj->robj;
+		if (!bo->pin_count) {
+			u32 domain = lobj->preferred_domains;
+			u32 allowed = lobj->allowed_domains;
+			u32 current_domain =
+				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+			/* Check if this buffer will be moved and don't move it
+			 * if we have moved too many buffers for this IB already.
+			 *
+			 * Note that this allows moving at least one buffer of
+			 * any size, because it doesn't take the current "bo"
+			 * into account. We don't want to disallow buffer moves
+			 * completely.
+			 */
+			if ((allowed & current_domain) != 0 &&
+			    (domain & current_domain) == 0 && /* will be moved */
+			    bytes_moved > bytes_moved_threshold) {
+				/* don't move it */
+				domain = current_domain;
+			}
+
+		retry:
+			radeon_ttm_placement_from_domain(bo, domain);
+			if (ring == R600_RING_TYPE_UVD_INDEX)
+				radeon_uvd_force_into_uvd_segment(bo, allowed);
+
+			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
+				       initial_bytes_moved;
+
+			if (unlikely(r)) {
+				if (r != -ERESTARTSYS &&
+				    domain != lobj->allowed_domains) {
+					domain = lobj->allowed_domains;
+					goto retry;
+				}
+				ttm_eu_backoff_reservation(ticket, head);
+				return r;
+			}
+		}
+		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+		lobj->tiling_flags = bo->tiling_flags;
+	}
+
+	list_for_each_entry(lobj, &duplicates, tv.head) {
+		lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
+		lobj->tiling_flags = lobj->robj->tiling_flags;
+	}
+
+	return 0;
+}
+
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
+{
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_surface_reg *reg;
+	struct radeon_bo *old_object;
+	int steal;
+	int i;
+
+	lockdep_assert_held(&bo->tbo.resv->lock.base);
+
+	if (!bo->tiling_flags)
+		return 0;
+
+	if (bo->surface_reg >= 0) {
+		reg = &rdev->surface_regs[bo->surface_reg];
+		i = bo->surface_reg;
+		goto out;
+	}
+
+	steal = -1;
+	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+
+		reg = &rdev->surface_regs[i];
+		if (!reg->bo)
+			break;
+
+		old_object = reg->bo;
+		if (old_object->pin_count == 0)
+			steal = i;
+	}
+
+	/* if we are all out */
+	if (i == RADEON_GEM_MAX_SURFACES) {
+		if (steal == -1)
+			return -ENOMEM;
+		/* find someone with a surface reg and nuke their BO */
+		reg = &rdev->surface_regs[steal];
+		old_object = reg->bo;
+		/* blow away the mapping */
+		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
+		ttm_bo_unmap_virtual(&old_object->tbo);
+		old_object->surface_reg = -1;
+		i = steal;
+	}
+
+	bo->surface_reg = i;
+	reg->bo = bo;
+
+out:
+	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+			       bo->tbo.mem.start << PAGE_SHIFT,
+			       bo->tbo.num_pages << PAGE_SHIFT);
+	return 0;
+}
+
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
+{
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_surface_reg *reg;
+
+	if (bo->surface_reg == -1)
+		return;
+
+	reg = &rdev->surface_regs[bo->surface_reg];
+	radeon_clear_surface_reg(rdev, bo->surface_reg);
+
+	reg->bo = NULL;
+	bo->surface_reg = -1;
+}
+
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				uint32_t tiling_flags, uint32_t pitch)
+{
+	struct radeon_device *rdev = bo->rdev;
+	int r;
+
+	if (rdev->family >= CHIP_CEDAR) {
+		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
+
+		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
+		switch (bankw) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (bankh) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (mtaspect) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		if (tilesplit > 6) {
+			return -EINVAL;
+		}
+		if (stilesplit > 6) {
+			return -EINVAL;
+		}
+	}
+	r = radeon_bo_reserve(bo, false);
+	if (unlikely(r != 0))
+		return r;
+	bo->tiling_flags = tiling_flags;
+	bo->pitch = pitch;
+	radeon_bo_unreserve(bo);
+	return 0;
+}
+
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				uint32_t *tiling_flags,
+				uint32_t *pitch)
+{
+	lockdep_assert_held(&bo->tbo.resv->lock.base);
+
+	if (tiling_flags)
+		*tiling_flags = bo->tiling_flags;
+	if (pitch)
+		*pitch = bo->pitch;
+}
+
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop)
+{
+	if (!force_drop)
+		lockdep_assert_held(&bo->tbo.resv->lock.base);
+
+	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
+		return 0;
+
+	if (force_drop) {
+		radeon_bo_clear_surface_reg(bo);
+		return 0;
+	}
+
+	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+		if (!has_moved)
+			return 0;
+
+		if (bo->surface_reg >= 0)
+			radeon_bo_clear_surface_reg(bo);
+		return 0;
+	}
+
+	if ((bo->surface_reg >= 0) && !has_moved)
+		return 0;
+
+	return radeon_bo_get_surface_reg(bo);
+}
+
+void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+			   bool evict,
+			   struct ttm_mem_reg *new_mem)
+{
+	struct radeon_bo *rbo;
+
+	if (!radeon_ttm_bo_is_radeon_bo(bo))
+		return;
+
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 1);
+	radeon_vm_bo_invalidate(rbo->rdev, rbo);
+
+	/* update statistics */
+	if (!new_mem)
+		return;
+
+	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
+}
+
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	struct ttm_operation_ctx ctx = { false, false };
+	struct radeon_device *rdev;
+	struct radeon_bo *rbo;
+	unsigned long offset, size, lpfn;
+	int i, r;
+
+	if (!radeon_ttm_bo_is_radeon_bo(bo))
+		return 0;
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 0);
+	rdev = rbo->rdev;
+	if (bo->mem.mem_type != TTM_PL_VRAM)
+		return 0;
+
+	size = bo->mem.num_pages << PAGE_SHIFT;
+	offset = bo->mem.start << PAGE_SHIFT;
+	if ((offset + size) <= rdev->mc.visible_vram_size)
+		return 0;
+
+	/* Can't move a pinned BO to visible VRAM */
+	if (rbo->pin_count > 0)
+		return -EINVAL;
+
+	/* hurrah the memory is not visible ! */
+	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+	lpfn =	rdev->mc.visible_vram_size >> PAGE_SHIFT;
+	for (i = 0; i < rbo->placement.num_placement; i++) {
+		/* Force into visible VRAM */
+		if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+		    (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
+			rbo->placements[i].lpfn = lpfn;
+	}
+	r = ttm_bo_validate(bo, &rbo->placement, &ctx);
+	if (unlikely(r == -ENOMEM)) {
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		return ttm_bo_validate(bo, &rbo->placement, &ctx);
+	} else if (unlikely(r != 0)) {
+		return r;
+	}
+
+	offset = bo->mem.start << PAGE_SHIFT;
+	/* this should never happen */
+	if ((offset + size) > rdev->mc.visible_vram_size)
+		return -EINVAL;
+
+	return 0;
+}
+
+int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
+	if (unlikely(r != 0))
+		return r;
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+
+	r = ttm_bo_wait(&bo->tbo, true, no_wait);
+	ttm_bo_unreserve(&bo->tbo);
+	return r;
+}
+
+/**
+ * radeon_bo_fence - add fence to buffer object
+ *
+ * @bo: buffer object in question
+ * @fence: fence to add
+ * @shared: true if fence should be added shared
+ *
+ */
+void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+		     bool shared)
+{
+	struct reservation_object *resv = bo->tbo.resv;
+
+	if (shared)
+		reservation_object_add_shared_fence(resv, &fence->base);
+	else
+		reservation_object_add_excl_fence(resv, &fence->base);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
new file mode 100644
index 0000000..9ffd821
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_OBJECT_H__
+#define __RADEON_OBJECT_H__
+
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type:	ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+	switch (mem_type) {
+	case TTM_PL_VRAM:
+		return RADEON_GEM_DOMAIN_VRAM;
+	case TTM_PL_TT:
+		return RADEON_GEM_DOMAIN_GTT;
+	case TTM_PL_SYSTEM:
+		return RADEON_GEM_DOMAIN_CPU;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo:		bo structure
+ * @no_intr:	don't return -ERESTARTSYS on pending signal
+ *
+ * Returns:
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+		return r;
+	}
+	return 0;
+}
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+}
+
+extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+			  bool no_wait);
+
+extern int radeon_bo_create(struct radeon_device *rdev,
+			    unsigned long size, int byte_align,
+			    bool kernel, u32 domain, u32 flags,
+			    struct sg_table *sg,
+			    struct reservation_object *resv,
+			    struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
+				    u64 max_offset, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern int radeon_bo_list_validate(struct radeon_device *rdev,
+				   struct ww_acquire_ctx *ticket,
+				   struct list_head *head, int ring);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+				  bool evict,
+				  struct ttm_mem_reg *new_mem);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+			    bool shared);
+
+/*
+ * sub allocation
+ */
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+				     struct radeon_sa_manager *sa_manager,
+				     unsigned size, u32 align, u32 domain,
+				     u32 flags);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+				      struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+				      struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+					struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+			    struct radeon_sa_manager *sa_manager,
+			    struct radeon_sa_bo **sa_bo,
+			    unsigned size, unsigned align);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+			      struct radeon_sa_bo **sa_bo,
+			      struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+					 struct seq_file *m);
+#endif
+
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
new file mode 100644
index 0000000..4b65425
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -0,0 +1,1907 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rafał Miłecki <zajec5@gmail.com>
+ *          Alex Deucher <alexdeucher@gmail.com>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "avivod.h"
+#include "atom.h"
+#include "r600_dpm.h"
+#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#define RADEON_IDLE_LOOP_MS 100
+#define RADEON_RECLOCK_DELAY_MS 200
+#define RADEON_WAIT_VBLANK_TIMEOUT 200
+
+static const char *radeon_pm_state_type_name[5] = {
+	"",
+	"Powersave",
+	"Battery",
+	"Balanced",
+	"Performance",
+};
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+			     enum radeon_pm_state_type ps_type,
+			     int instance)
+{
+	int i;
+	int found_instance = -1;
+
+	for (i = 0; i < rdev->pm.num_power_states; i++) {
+		if (rdev->pm.power_state[i].type == ps_type) {
+			found_instance++;
+			if (found_instance == instance)
+				return i;
+		}
+	}
+	/* return default if no match */
+	return rdev->pm.default_power_state_index;
+}
+
+void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
+{
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+		mutex_lock(&rdev->pm.mutex);
+		if (power_supply_is_system_supplied() > 0)
+			rdev->pm.dpm.ac_power = true;
+		else
+			rdev->pm.dpm.ac_power = false;
+		if (rdev->family == CHIP_ARUBA) {
+			if (rdev->asic->dpm.enable_bapm)
+				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+		}
+		mutex_unlock(&rdev->pm.mutex);
+	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (rdev->pm.profile == PM_PROFILE_AUTO) {
+			mutex_lock(&rdev->pm.mutex);
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+			mutex_unlock(&rdev->pm.mutex);
+		}
+	}
+}
+
+static void radeon_pm_update_profile(struct radeon_device *rdev)
+{
+	switch (rdev->pm.profile) {
+	case PM_PROFILE_DEFAULT:
+		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
+		break;
+	case PM_PROFILE_AUTO:
+		if (power_supply_is_system_supplied() > 0) {
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+			else
+				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+		} else {
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+			else
+				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+		}
+		break;
+	case PM_PROFILE_LOW:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
+		break;
+	case PM_PROFILE_MID:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+		break;
+	case PM_PROFILE_HIGH:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+		break;
+	}
+
+	if (rdev->pm.active_crtc_count == 0) {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+	} else {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+	}
+}
+
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
+{
+	struct radeon_bo *bo, *n;
+
+	if (list_empty(&rdev->gem.objects))
+		return;
+
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+			ttm_bo_unmap_virtual(&bo->tbo);
+	}
+}
+
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
+{
+	if (rdev->pm.active_crtcs) {
+		rdev->pm.vblank_sync = false;
+		wait_event_timeout(
+			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+	}
+}
+
+static void radeon_set_power_state(struct radeon_device *rdev)
+{
+	u32 sclk, mclk;
+	bool misc_after = false;
+
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+		return;
+
+	if (radeon_gui_idle(rdev)) {
+		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			clock_info[rdev->pm.requested_clock_mode_index].sclk;
+		if (sclk > rdev->pm.default_sclk)
+			sclk = rdev->pm.default_sclk;
+
+		/* starting with BTC, there is one state that is used for both
+		 * MH and SH.  Difference is that we always use the high clock index for
+		 * mclk and vddci.
+		 */
+		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+		    (rdev->family >= CHIP_BARTS) &&
+		    rdev->pm.active_crtc_count &&
+		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
+		else
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.requested_clock_mode_index].mclk;
+
+		if (mclk > rdev->pm.default_mclk)
+			mclk = rdev->pm.default_mclk;
+
+		/* upvolt before raising clocks, downvolt after lowering clocks */
+		if (sclk < rdev->pm.current_sclk)
+			misc_after = true;
+
+		radeon_sync_with_vblank(rdev);
+
+		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			if (!radeon_pm_in_vbl(rdev))
+				return;
+		}
+
+		radeon_pm_prepare(rdev);
+
+		if (!misc_after)
+			/* voltage, pcie lanes, etc.*/
+			radeon_pm_misc(rdev);
+
+		/* set engine clock */
+		if (sclk != rdev->pm.current_sclk) {
+			radeon_pm_debug_check_in_vbl(rdev, false);
+			radeon_set_engine_clock(rdev, sclk);
+			radeon_pm_debug_check_in_vbl(rdev, true);
+			rdev->pm.current_sclk = sclk;
+			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
+		}
+
+		/* set memory clock */
+		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+			radeon_pm_debug_check_in_vbl(rdev, false);
+			radeon_set_memory_clock(rdev, mclk);
+			radeon_pm_debug_check_in_vbl(rdev, true);
+			rdev->pm.current_mclk = mclk;
+			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
+		}
+
+		if (misc_after)
+			/* voltage, pcie lanes, etc.*/
+			radeon_pm_misc(rdev);
+
+		radeon_pm_finish(rdev);
+
+		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+	} else
+		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+	struct drm_crtc *crtc;
+	int i, r;
+
+	/* no need to take locks, etc. if nothing's going to change */
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+		return;
+
+	down_write(&rdev->pm.mclk_lock);
+	mutex_lock(&rdev->ring_lock);
+
+	/* wait for the rings to drain */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		struct radeon_ring *ring = &rdev->ring[i];
+		if (!ring->ready) {
+			continue;
+		}
+		r = radeon_fence_wait_empty(rdev, i);
+		if (r) {
+			/* needs a GPU reset dont reset here */
+			mutex_unlock(&rdev->ring_lock);
+			up_write(&rdev->pm.mclk_lock);
+			return;
+		}
+	}
+
+	radeon_unmap_vram_bos(rdev);
+
+	if (rdev->irq.installed) {
+		i = 0;
+		drm_for_each_crtc(crtc, rdev->ddev) {
+			if (rdev->pm.active_crtcs & (1 << i)) {
+				/* This can fail if a modeset is in progress */
+				if (drm_crtc_vblank_get(crtc) == 0)
+					rdev->pm.req_vblank |= (1 << i);
+				else
+					DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
+							 i);
+			}
+			i++;
+		}
+	}
+
+	radeon_set_power_state(rdev);
+
+	if (rdev->irq.installed) {
+		i = 0;
+		drm_for_each_crtc(crtc, rdev->ddev) {
+			if (rdev->pm.req_vblank & (1 << i)) {
+				rdev->pm.req_vblank &= ~(1 << i);
+				drm_crtc_vblank_put(crtc);
+			}
+			i++;
+		}
+	}
+
+	/* update display watermarks based on new power state */
+	radeon_update_bandwidth_info(rdev);
+	if (rdev->pm.active_crtc_count)
+		radeon_bandwidth_update(rdev);
+
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+	mutex_unlock(&rdev->ring_lock);
+	up_write(&rdev->pm.mclk_lock);
+}
+
+static void radeon_pm_print_states(struct radeon_device *rdev)
+{
+	int i, j;
+	struct radeon_power_state *power_state;
+	struct radeon_pm_clock_info *clock_info;
+
+	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
+	for (i = 0; i < rdev->pm.num_power_states; i++) {
+		power_state = &rdev->pm.power_state[i];
+		DRM_DEBUG_DRIVER("State %d: %s\n", i,
+			radeon_pm_state_type_name[power_state->type]);
+		if (i == rdev->pm.default_power_state_index)
+			DRM_DEBUG_DRIVER("\tDefault");
+		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+			DRM_DEBUG_DRIVER("\tSingle display only\n");
+		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+		for (j = 0; j < power_state->num_clock_modes; j++) {
+			clock_info = &(power_state->clock_info[j]);
+			if (rdev->flags & RADEON_IS_IGP)
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
+						 j,
+						 clock_info->sclk * 10);
+			else
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
+						 j,
+						 clock_info->sclk * 10,
+						 clock_info->mclk * 10,
+						 clock_info->voltage.voltage);
+		}
+	}
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+	int cp = rdev->pm.profile;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(cp == PM_PROFILE_AUTO) ? "auto" :
+			(cp == PM_PROFILE_LOW) ? "low" :
+			(cp == PM_PROFILE_MID) ? "mid" :
+			(cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+
+	/* Can't set profile when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return -EINVAL;
+
+	mutex_lock(&rdev->pm.mutex);
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (strncmp("default", buf, strlen("default")) == 0)
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+		else if (strncmp("auto", buf, strlen("auto")) == 0)
+			rdev->pm.profile = PM_PROFILE_AUTO;
+		else if (strncmp("low", buf, strlen("low")) == 0)
+			rdev->pm.profile = PM_PROFILE_LOW;
+		else if (strncmp("mid", buf, strlen("mid")) == 0)
+			rdev->pm.profile = PM_PROFILE_MID;
+		else if (strncmp("high", buf, strlen("high")) == 0)
+			rdev->pm.profile = PM_PROFILE_HIGH;
+		else {
+			count = -EINVAL;
+			goto fail;
+		}
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	} else
+		count = -EINVAL;
+
+fail:
+	mutex_unlock(&rdev->pm.mutex);
+
+	return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+	int pm = rdev->pm.pm_method;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(pm == PM_METHOD_DYNPM) ? "dynpm" :
+			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+
+	/* Can't set method when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+		count = -EINVAL;
+		goto fail;
+	}
+
+	/* we don't support the legacy modes with dpm */
+	if (rdev->pm.pm_method == PM_METHOD_DPM) {
+		count = -EINVAL;
+		goto fail;
+	}
+
+	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+		mutex_lock(&rdev->pm.mutex);
+		rdev->pm.pm_method = PM_METHOD_DYNPM;
+		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+		mutex_unlock(&rdev->pm.mutex);
+	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
+		mutex_lock(&rdev->pm.mutex);
+		/* disable dynpm */
+		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+		rdev->pm.pm_method = PM_METHOD_PROFILE;
+		mutex_unlock(&rdev->pm.mutex);
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+	} else {
+		count = -EINVAL;
+		goto fail;
+	}
+	radeon_pm_compute_clocks(rdev);
+fail:
+	return count;
+}
+
+static ssize_t radeon_get_dpm_state(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+}
+
+static ssize_t radeon_set_dpm_state(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+
+	mutex_lock(&rdev->pm.mutex);
+	if (strncmp("battery", buf, strlen("battery")) == 0)
+		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
+	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
+		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	else if (strncmp("performance", buf, strlen("performance")) == 0)
+		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
+	else {
+		mutex_unlock(&rdev->pm.mutex);
+		count = -EINVAL;
+		goto fail;
+	}
+	mutex_unlock(&rdev->pm.mutex);
+
+	/* Can't set dpm state when the card is off */
+	if (!(rdev->flags & RADEON_IS_PX) ||
+	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
+		radeon_pm_compute_clocks(rdev);
+
+fail:
+	return count;
+}
+
+static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
+						       struct device_attribute *attr,
+						       char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return snprintf(buf, PAGE_SIZE, "off\n");
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
+}
+
+static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
+						       struct device_attribute *attr,
+						       const char *buf,
+						       size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+	enum radeon_dpm_forced_level level;
+	int ret = 0;
+
+	/* Can't force performance level when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return -EINVAL;
+
+	mutex_lock(&rdev->pm.mutex);
+	if (strncmp("low", buf, strlen("low")) == 0) {
+		level = RADEON_DPM_FORCED_LEVEL_LOW;
+	} else if (strncmp("high", buf, strlen("high")) == 0) {
+		level = RADEON_DPM_FORCED_LEVEL_HIGH;
+	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
+		level = RADEON_DPM_FORCED_LEVEL_AUTO;
+	} else {
+		count = -EINVAL;
+		goto fail;
+	}
+	if (rdev->asic->dpm.force_performance_level) {
+		if (rdev->pm.dpm.thermal_active) {
+			count = -EINVAL;
+			goto fail;
+		}
+		ret = radeon_dpm_force_performance_level(rdev, level);
+		if (ret)
+			count = -EINVAL;
+	}
+fail:
+	mutex_unlock(&rdev->pm.mutex);
+
+	return count;
+}
+
+static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct radeon_device *rdev = dev_get_drvdata(dev);
+	u32 pwm_mode = 0;
+
+	if (rdev->asic->dpm.fan_ctrl_get_mode)
+		pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
+
+	/* never 0 (full-speed), fuse or smc-controlled always */
+	return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
+}
+
+static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t count)
+{
+	struct radeon_device *rdev = dev_get_drvdata(dev);
+	int err;
+	int value;
+
+	if(!rdev->asic->dpm.fan_ctrl_set_mode)
+		return -EINVAL;
+
+	err = kstrtoint(buf, 10, &value);
+	if (err)
+		return err;
+
+	switch (value) {
+	case 1: /* manual, percent-based */
+		rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
+		break;
+	default: /* disable */
+		rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
+		break;
+	}
+
+	return count;
+}
+
+static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	return sprintf(buf, "%i\n", 0);
+}
+
+static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	return sprintf(buf, "%i\n", 255);
+}
+
+static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct radeon_device *rdev = dev_get_drvdata(dev);
+	int err;
+	u32 value;
+
+	err = kstrtou32(buf, 10, &value);
+	if (err)
+		return err;
+
+	value = (value * 100) / 255;
+
+	err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
+	if (err)
+		return err;
+
+	return count;
+}
+
+static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct radeon_device *rdev = dev_get_drvdata(dev);
+	int err;
+	u32 speed;
+
+	err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
+	if (err)
+		return err;
+
+	speed = (speed * 255) / 100;
+
+	return sprintf(buf, "%i\n", speed);
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
+static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
+		   radeon_get_dpm_forced_performance_level,
+		   radeon_set_dpm_forced_performance_level);
+
+static ssize_t radeon_hwmon_show_temp(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct radeon_device *rdev = dev_get_drvdata(dev);
+	struct drm_device *ddev = rdev->ddev;
+	int temp;
+
+	/* Can't get temperature when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return -EINVAL;
+
+	if (rdev->asic->pm.get_temperature)
+		temp = radeon_get_temperature(rdev);
+	else
+		temp = 0;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct radeon_device *rdev = dev_get_drvdata(dev);
+	int hyst = to_sensor_dev_attr(attr)->index;
+	int temp;
+
+	if (hyst)
+		temp = rdev->pm.dpm.thermal.min_temp;
+	else
+		temp = rdev->pm.dpm.thermal.max_temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
+
+
+static struct attribute *hwmon_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1_min.dev_attr.attr,
+	&sensor_dev_attr_pwm1_max.dev_attr.attr,
+	NULL
+};
+
+static umode_t hwmon_attributes_visible(struct kobject *kobj,
+					struct attribute *attr, int index)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct radeon_device *rdev = dev_get_drvdata(dev);
+	umode_t effective_mode = attr->mode;
+
+	/* Skip attributes if DPM is not enabled */
+	if (rdev->pm.pm_method != PM_METHOD_DPM &&
+	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+		return 0;
+
+	/* Skip fan attributes if fan is not present */
+	if (rdev->pm.no_fan &&
+	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+		return 0;
+
+	/* mask fan attributes if we have no bindings for this asic to expose */
+	if ((!rdev->asic->dpm.get_fan_speed_percent &&
+	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+	    (!rdev->asic->dpm.fan_ctrl_get_mode &&
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+		effective_mode &= ~S_IRUGO;
+
+	if ((!rdev->asic->dpm.set_fan_speed_percent &&
+	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+	    (!rdev->asic->dpm.fan_ctrl_set_mode &&
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+		effective_mode &= ~S_IWUSR;
+
+	/* hide max/min values if we can't both query and manage the fan */
+	if ((!rdev->asic->dpm.set_fan_speed_percent &&
+	     !rdev->asic->dpm.get_fan_speed_percent) &&
+	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+		return 0;
+
+	return effective_mode;
+}
+
+static const struct attribute_group hwmon_attrgroup = {
+	.attrs = hwmon_attributes,
+	.is_visible = hwmon_attributes_visible,
+};
+
+static const struct attribute_group *hwmon_groups[] = {
+	&hwmon_attrgroup,
+	NULL
+};
+
+static int radeon_hwmon_init(struct radeon_device *rdev)
+{
+	int err = 0;
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_RV6XX:
+	case THERMAL_TYPE_RV770:
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_NI:
+	case THERMAL_TYPE_SUMO:
+	case THERMAL_TYPE_SI:
+	case THERMAL_TYPE_CI:
+	case THERMAL_TYPE_KV:
+		if (rdev->asic->pm.get_temperature == NULL)
+			return err;
+		rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
+									   "radeon", rdev,
+									   hwmon_groups);
+		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+			err = PTR_ERR(rdev->pm.int_hwmon_dev);
+			dev_err(rdev->dev,
+				"Unable to register hwmon device: %d\n", err);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return err;
+}
+
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+	if (rdev->pm.int_hwmon_dev)
+		hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+}
+
+static void radeon_dpm_thermal_work_handler(struct work_struct *work)
+{
+	struct radeon_device *rdev =
+		container_of(work, struct radeon_device,
+			     pm.dpm.thermal.work);
+	/* switch to the thermal state */
+	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+
+	if (!rdev->pm.dpm_enabled)
+		return;
+
+	if (rdev->asic->pm.get_temperature) {
+		int temp = radeon_get_temperature(rdev);
+
+		if (temp < rdev->pm.dpm.thermal.min_temp)
+			/* switch back the user state */
+			dpm_state = rdev->pm.dpm.user_state;
+	} else {
+		if (rdev->pm.dpm.thermal.high_to_low)
+			/* switch back the user state */
+			dpm_state = rdev->pm.dpm.user_state;
+	}
+	mutex_lock(&rdev->pm.mutex);
+	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+		rdev->pm.dpm.thermal_active = true;
+	else
+		rdev->pm.dpm.thermal_active = false;
+	rdev->pm.dpm.state = dpm_state;
+	mutex_unlock(&rdev->pm.mutex);
+
+	radeon_pm_compute_clocks(rdev);
+}
+
+static bool radeon_dpm_single_display(struct radeon_device *rdev)
+{
+	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
+		true : false;
+
+	/* check if the vblank period is too short to adjust the mclk */
+	if (single_display && rdev->asic->dpm.vblank_too_short) {
+		if (radeon_dpm_vblank_too_short(rdev))
+			single_display = false;
+	}
+
+	/* 120hz tends to be problematic even if they are under the
+	 * vblank limit.
+	 */
+	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
+		single_display = false;
+
+	return single_display;
+}
+
+static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+						     enum radeon_pm_state_type dpm_state)
+{
+	int i;
+	struct radeon_ps *ps;
+	u32 ui_class;
+	bool single_display = radeon_dpm_single_display(rdev);
+
+	/* certain older asics have a separare 3D performance state,
+	 * so try that first if the user selected performance
+	 */
+	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
+		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+	/* balanced states don't exist at the moment */
+	if (dpm_state == POWER_STATE_TYPE_BALANCED)
+		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+restart_search:
+	/* Pick the best power state based on current conditions */
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		ps = &rdev->pm.dpm.ps[i];
+		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
+		switch (dpm_state) {
+		/* user states */
+		case POWER_STATE_TYPE_BATTERY:
+			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
+				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+					if (single_display)
+						return ps;
+				} else
+					return ps;
+			}
+			break;
+		case POWER_STATE_TYPE_BALANCED:
+			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
+				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+					if (single_display)
+						return ps;
+				} else
+					return ps;
+			}
+			break;
+		case POWER_STATE_TYPE_PERFORMANCE:
+			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+					if (single_display)
+						return ps;
+				} else
+					return ps;
+			}
+			break;
+		/* internal states */
+		case POWER_STATE_TYPE_INTERNAL_UVD:
+			if (rdev->pm.dpm.uvd_ps)
+				return rdev->pm.dpm.uvd_ps;
+			else
+				break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_BOOT:
+			return rdev->pm.dpm.boot_ps;
+		case POWER_STATE_TYPE_INTERNAL_THERMAL:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_ACPI:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_ULV:
+			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_3DPERF:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+				return ps;
+			break;
+		default:
+			break;
+		}
+	}
+	/* use a fallback state if we didn't match */
+	switch (dpm_state) {
+	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+		goto restart_search;
+	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+		if (rdev->pm.dpm.uvd_ps) {
+			return rdev->pm.dpm.uvd_ps;
+		} else {
+			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+			goto restart_search;
+		}
+	case POWER_STATE_TYPE_INTERNAL_THERMAL:
+		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
+		goto restart_search;
+	case POWER_STATE_TYPE_INTERNAL_ACPI:
+		dpm_state = POWER_STATE_TYPE_BATTERY;
+		goto restart_search;
+	case POWER_STATE_TYPE_BATTERY:
+	case POWER_STATE_TYPE_BALANCED:
+	case POWER_STATE_TYPE_INTERNAL_3DPERF:
+		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+		goto restart_search;
+	default:
+		break;
+	}
+
+	return NULL;
+}
+
+static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
+{
+	int i;
+	struct radeon_ps *ps;
+	enum radeon_pm_state_type dpm_state;
+	int ret;
+	bool single_display = radeon_dpm_single_display(rdev);
+
+	/* if dpm init failed */
+	if (!rdev->pm.dpm_enabled)
+		return;
+
+	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
+		/* add other state override checks here */
+		if ((!rdev->pm.dpm.thermal_active) &&
+		    (!rdev->pm.dpm.uvd_active))
+			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
+	}
+	dpm_state = rdev->pm.dpm.state;
+
+	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
+	if (ps)
+		rdev->pm.dpm.requested_ps = ps;
+	else
+		return;
+
+	/* no need to reprogram if nothing changed unless we are on BTC+ */
+	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
+		/* vce just modifies an existing state so force a change */
+		if (ps->vce_active != rdev->pm.dpm.vce_active)
+			goto force;
+		/* user has made a display change (such as timing) */
+		if (rdev->pm.dpm.single_display != single_display)
+			goto force;
+		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
+			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
+			 * all we need to do is update the display configuration.
+			 */
+			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
+				/* update display watermarks based on new power state */
+				radeon_bandwidth_update(rdev);
+				/* update displays */
+				radeon_dpm_display_configuration_changed(rdev);
+				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+			}
+			return;
+		} else {
+			/* for BTC+ if the num crtcs hasn't changed and state is the same,
+			 * nothing to do, if the num crtcs is > 1 and state is the same,
+			 * update display configuration.
+			 */
+			if (rdev->pm.dpm.new_active_crtcs ==
+			    rdev->pm.dpm.current_active_crtcs) {
+				return;
+			} else {
+				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
+				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
+					/* update display watermarks based on new power state */
+					radeon_bandwidth_update(rdev);
+					/* update displays */
+					radeon_dpm_display_configuration_changed(rdev);
+					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+					return;
+				}
+			}
+		}
+	}
+
+force:
+	if (radeon_dpm == 1) {
+		printk("switching from power state:\n");
+		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
+		printk("switching to power state:\n");
+		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
+	}
+
+	down_write(&rdev->pm.mclk_lock);
+	mutex_lock(&rdev->ring_lock);
+
+	/* update whether vce is active */
+	ps->vce_active = rdev->pm.dpm.vce_active;
+
+	ret = radeon_dpm_pre_set_power_state(rdev);
+	if (ret)
+		goto done;
+
+	/* update display watermarks based on new power state */
+	radeon_bandwidth_update(rdev);
+	/* update displays */
+	radeon_dpm_display_configuration_changed(rdev);
+
+	/* wait for the rings to drain */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		struct radeon_ring *ring = &rdev->ring[i];
+		if (ring->ready)
+			radeon_fence_wait_empty(rdev, i);
+	}
+
+	/* program the new power state */
+	radeon_dpm_set_power_state(rdev);
+
+	/* update current power state */
+	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
+
+	radeon_dpm_post_set_power_state(rdev);
+
+	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+	rdev->pm.dpm.single_display = single_display;
+
+	if (rdev->asic->dpm.force_performance_level) {
+		if (rdev->pm.dpm.thermal_active) {
+			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+			/* force low perf level for thermal */
+			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+			/* save the user's level */
+			rdev->pm.dpm.forced_level = level;
+		} else {
+			/* otherwise, user selected level */
+			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
+		}
+	}
+
+done:
+	mutex_unlock(&rdev->ring_lock);
+	up_write(&rdev->pm.mclk_lock);
+}
+
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
+{
+	enum radeon_pm_state_type dpm_state;
+
+	if (rdev->asic->dpm.powergate_uvd) {
+		mutex_lock(&rdev->pm.mutex);
+		/* don't powergate anything if we
+		   have active but pause streams */
+		enable |= rdev->pm.dpm.sd > 0;
+		enable |= rdev->pm.dpm.hd > 0;
+		/* enable/disable UVD */
+		radeon_dpm_powergate_uvd(rdev, !enable);
+		mutex_unlock(&rdev->pm.mutex);
+	} else {
+		if (enable) {
+			mutex_lock(&rdev->pm.mutex);
+			rdev->pm.dpm.uvd_active = true;
+			/* disable this for now */
+#if 0
+			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
+				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
+			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
+				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
+				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
+				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
+			else
+#endif
+				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
+			rdev->pm.dpm.state = dpm_state;
+			mutex_unlock(&rdev->pm.mutex);
+		} else {
+			mutex_lock(&rdev->pm.mutex);
+			rdev->pm.dpm.uvd_active = false;
+			mutex_unlock(&rdev->pm.mutex);
+		}
+
+		radeon_pm_compute_clocks(rdev);
+	}
+}
+
+void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
+{
+	if (enable) {
+		mutex_lock(&rdev->pm.mutex);
+		rdev->pm.dpm.vce_active = true;
+		/* XXX select vce level based on ring/task */
+		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
+		mutex_unlock(&rdev->pm.mutex);
+	} else {
+		mutex_lock(&rdev->pm.mutex);
+		rdev->pm.dpm.vce_active = false;
+		mutex_unlock(&rdev->pm.mutex);
+	}
+
+	radeon_pm_compute_clocks(rdev);
+}
+
+static void radeon_pm_suspend_old(struct radeon_device *rdev)
+{
+	mutex_lock(&rdev->pm.mutex);
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
+			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
+	}
+	mutex_unlock(&rdev->pm.mutex);
+
+	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+}
+
+static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
+{
+	mutex_lock(&rdev->pm.mutex);
+	/* disable dpm */
+	radeon_dpm_disable(rdev);
+	/* reset the power state */
+	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
+	rdev->pm.dpm_enabled = false;
+	mutex_unlock(&rdev->pm.mutex);
+}
+
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_suspend_dpm(rdev);
+	else
+		radeon_pm_suspend_old(rdev);
+}
+
+static void radeon_pm_resume_old(struct radeon_device *rdev)
+{
+	/* set up the default clocks if the MC ucode is loaded */
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+						SET_VOLTAGE_TYPE_ASIC_VDDC);
+		if (rdev->pm.default_vddci)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+						SET_VOLTAGE_TYPE_ASIC_VDDCI);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
+	/* asic init will reset the default power state */
+	mutex_lock(&rdev->pm.mutex);
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	rdev->pm.current_sclk = rdev->pm.default_sclk;
+	rdev->pm.current_mclk = rdev->pm.default_mclk;
+	if (rdev->pm.power_state) {
+		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+	}
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM
+	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+	}
+	mutex_unlock(&rdev->pm.mutex);
+	radeon_pm_compute_clocks(rdev);
+}
+
+static void radeon_pm_resume_dpm(struct radeon_device *rdev)
+{
+	int ret;
+
+	/* asic init will reset to the boot state */
+	mutex_lock(&rdev->pm.mutex);
+	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
+	radeon_dpm_setup_asic(rdev);
+	ret = radeon_dpm_enable(rdev);
+	mutex_unlock(&rdev->pm.mutex);
+	if (ret)
+		goto dpm_resume_fail;
+	rdev->pm.dpm_enabled = true;
+	return;
+
+dpm_resume_fail:
+	DRM_ERROR("radeon: dpm resume failed\n");
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+						SET_VOLTAGE_TYPE_ASIC_VDDC);
+		if (rdev->pm.default_vddci)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+						SET_VOLTAGE_TYPE_ASIC_VDDCI);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_resume_dpm(rdev);
+	else
+		radeon_pm_resume_old(rdev);
+}
+
+static int radeon_pm_init_old(struct radeon_device *rdev)
+{
+	int ret;
+
+	rdev->pm.profile = PM_PROFILE_DEFAULT;
+	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+	rdev->pm.default_sclk = rdev->clock.default_sclk;
+	rdev->pm.default_mclk = rdev->clock.default_mclk;
+	rdev->pm.current_sclk = rdev->clock.default_sclk;
+	rdev->pm.current_mclk = rdev->clock.default_mclk;
+	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (rdev->bios) {
+		if (rdev->is_atom_bios)
+			radeon_atombios_get_power_modes(rdev);
+		else
+			radeon_combios_get_power_modes(rdev);
+		radeon_pm_print_states(rdev);
+		radeon_pm_init_profile(rdev);
+		/* set up the default clocks if the MC ucode is loaded */
+		if ((rdev->family >= CHIP_BARTS) &&
+		    (rdev->family <= CHIP_CAYMAN) &&
+		    rdev->mc_fw) {
+			if (rdev->pm.default_vddc)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+							SET_VOLTAGE_TYPE_ASIC_VDDC);
+			if (rdev->pm.default_vddci)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+							SET_VOLTAGE_TYPE_ASIC_VDDCI);
+			if (rdev->pm.default_sclk)
+				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+			if (rdev->pm.default_mclk)
+				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+		}
+	}
+
+	/* set up the internal thermal sensor if applicable */
+	ret = radeon_hwmon_init(rdev);
+	if (ret)
+		return ret;
+
+	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
+	if (rdev->pm.num_power_states > 1) {
+		if (radeon_debugfs_pm_init(rdev)) {
+			DRM_ERROR("Failed to register debugfs file for PM!\n");
+		}
+
+		DRM_INFO("radeon: power management initialized\n");
+	}
+
+	return 0;
+}
+
+static void radeon_dpm_print_power_states(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		printk("== power state %d ==\n", i);
+		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
+	}
+}
+
+static int radeon_pm_init_dpm(struct radeon_device *rdev)
+{
+	int ret;
+
+	/* default to balanced state */
+	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
+	rdev->pm.default_sclk = rdev->clock.default_sclk;
+	rdev->pm.default_mclk = rdev->clock.default_mclk;
+	rdev->pm.current_sclk = rdev->clock.default_sclk;
+	rdev->pm.current_mclk = rdev->clock.default_mclk;
+	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (rdev->bios && rdev->is_atom_bios)
+		radeon_atombios_get_power_modes(rdev);
+	else
+		return -EINVAL;
+
+	/* set up the internal thermal sensor if applicable */
+	ret = radeon_hwmon_init(rdev);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
+	mutex_lock(&rdev->pm.mutex);
+	radeon_dpm_init(rdev);
+	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
+	if (radeon_dpm == 1)
+		radeon_dpm_print_power_states(rdev);
+	radeon_dpm_setup_asic(rdev);
+	ret = radeon_dpm_enable(rdev);
+	mutex_unlock(&rdev->pm.mutex);
+	if (ret)
+		goto dpm_failed;
+	rdev->pm.dpm_enabled = true;
+
+	if (radeon_debugfs_pm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for dpm!\n");
+	}
+
+	DRM_INFO("radeon: dpm initialized\n");
+
+	return 0;
+
+dpm_failed:
+	rdev->pm.dpm_enabled = false;
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+						SET_VOLTAGE_TYPE_ASIC_VDDC);
+		if (rdev->pm.default_vddci)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+						SET_VOLTAGE_TYPE_ASIC_VDDCI);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
+	DRM_ERROR("radeon: dpm initialization failed\n");
+	return ret;
+}
+
+struct radeon_dpm_quirk {
+	u32 chip_vendor;
+	u32 chip_device;
+	u32 subsys_vendor;
+	u32 subsys_device;
+};
+
+/* cards with dpm stability problems */
+static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
+	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
+	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
+	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
+	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
+	{ 0, 0, 0, 0 },
+};
+
+int radeon_pm_init(struct radeon_device *rdev)
+{
+	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
+	bool disable_dpm = false;
+
+	/* Apply dpm quirks */
+	while (p && p->chip_device != 0) {
+		if (rdev->pdev->vendor == p->chip_vendor &&
+		    rdev->pdev->device == p->chip_device &&
+		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+		    rdev->pdev->subsystem_device == p->subsys_device) {
+			disable_dpm = true;
+			break;
+		}
+		++p;
+	}
+
+	/* enable dpm on rv6xx+ */
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RV670:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV770:
+		/* DPM requires the RLC, RV770+ dGPU requires SMC */
+		if (!rdev->rlc_fw)
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if ((rdev->family >= CHIP_RV770) &&
+			 (!(rdev->flags & RADEON_IS_IGP)) &&
+			 (!rdev->smc_fw))
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if (radeon_dpm == 1)
+			rdev->pm.pm_method = PM_METHOD_DPM;
+		else
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		break;
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+	case CHIP_CAYMAN:
+	case CHIP_ARUBA:
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+	case CHIP_HAINAN:
+	case CHIP_BONAIRE:
+	case CHIP_KABINI:
+	case CHIP_KAVERI:
+	case CHIP_HAWAII:
+	case CHIP_MULLINS:
+		/* DPM requires the RLC, RV770+ dGPU requires SMC */
+		if (!rdev->rlc_fw)
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if ((rdev->family >= CHIP_RV770) &&
+			 (!(rdev->flags & RADEON_IS_IGP)) &&
+			 (!rdev->smc_fw))
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if (disable_dpm && (radeon_dpm == -1))
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if (radeon_dpm == 0)
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else
+			rdev->pm.pm_method = PM_METHOD_DPM;
+		break;
+	default:
+		/* default to profile method */
+		rdev->pm.pm_method = PM_METHOD_PROFILE;
+		break;
+	}
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		return radeon_pm_init_dpm(rdev);
+	else
+		return radeon_pm_init_old(rdev);
+}
+
+int radeon_pm_late_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM) {
+		if (rdev->pm.dpm_enabled) {
+			if (!rdev->pm.sysfs_initialized) {
+				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+				if (ret)
+					DRM_ERROR("failed to create device file for dpm state\n");
+				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+				if (ret)
+					DRM_ERROR("failed to create device file for dpm state\n");
+				/* XXX: these are noops for dpm but are here for backwards compat */
+				ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+				if (ret)
+					DRM_ERROR("failed to create device file for power profile\n");
+				ret = device_create_file(rdev->dev, &dev_attr_power_method);
+				if (ret)
+					DRM_ERROR("failed to create device file for power method\n");
+				rdev->pm.sysfs_initialized = true;
+			}
+
+			mutex_lock(&rdev->pm.mutex);
+			ret = radeon_dpm_late_enable(rdev);
+			mutex_unlock(&rdev->pm.mutex);
+			if (ret) {
+				rdev->pm.dpm_enabled = false;
+				DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+			} else {
+				/* set the dpm state for PX since there won't be
+				 * a modeset to call this.
+				 */
+				radeon_pm_compute_clocks(rdev);
+			}
+		}
+	} else {
+		if ((rdev->pm.num_power_states > 1) &&
+		    (!rdev->pm.sysfs_initialized)) {
+			/* where's the best place to put these? */
+			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+			if (ret)
+				DRM_ERROR("failed to create device file for power profile\n");
+			ret = device_create_file(rdev->dev, &dev_attr_power_method);
+			if (ret)
+				DRM_ERROR("failed to create device file for power method\n");
+			if (!ret)
+				rdev->pm.sysfs_initialized = true;
+		}
+	}
+	return ret;
+}
+
+static void radeon_pm_fini_old(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states > 1) {
+		mutex_lock(&rdev->pm.mutex);
+		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			/* reset default clocks */
+			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+			radeon_pm_set_clocks(rdev);
+		}
+		mutex_unlock(&rdev->pm.mutex);
+
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+
+		device_remove_file(rdev->dev, &dev_attr_power_profile);
+		device_remove_file(rdev->dev, &dev_attr_power_method);
+	}
+
+	radeon_hwmon_fini(rdev);
+	kfree(rdev->pm.power_state);
+}
+
+static void radeon_pm_fini_dpm(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states > 1) {
+		mutex_lock(&rdev->pm.mutex);
+		radeon_dpm_disable(rdev);
+		mutex_unlock(&rdev->pm.mutex);
+
+		device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
+		device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+		/* XXX backwards compat */
+		device_remove_file(rdev->dev, &dev_attr_power_profile);
+		device_remove_file(rdev->dev, &dev_attr_power_method);
+	}
+	radeon_dpm_fini(rdev);
+
+	radeon_hwmon_fini(rdev);
+	kfree(rdev->pm.power_state);
+}
+
+void radeon_pm_fini(struct radeon_device *rdev)
+{
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_fini_dpm(rdev);
+	else
+		radeon_pm_fini_old(rdev);
+}
+
+static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+
+	if (rdev->pm.num_power_states < 2)
+		return;
+
+	mutex_lock(&rdev->pm.mutex);
+
+	rdev->pm.active_crtcs = 0;
+	rdev->pm.active_crtc_count = 0;
+	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc,
+				    &ddev->mode_config.crtc_list, head) {
+			radeon_crtc = to_radeon_crtc(crtc);
+			if (radeon_crtc->enabled) {
+				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+				rdev->pm.active_crtc_count++;
+			}
+		}
+	}
+
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+			if (rdev->pm.active_crtc_count > 1) {
+				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+
+					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
+				}
+			} else if (rdev->pm.active_crtc_count == 1) {
+				/* TODO: Increase clocks if needed for current mode */
+
+				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
+				}
+			} else { /* count == 0 */
+				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+				}
+			}
+		}
+	}
+
+	mutex_unlock(&rdev->pm.mutex);
+}
+
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+
+	if (!rdev->pm.dpm_enabled)
+		return;
+
+	mutex_lock(&rdev->pm.mutex);
+
+	/* update active crtc counts */
+	rdev->pm.dpm.new_active_crtcs = 0;
+	rdev->pm.dpm.new_active_crtc_count = 0;
+	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc,
+				    &ddev->mode_config.crtc_list, head) {
+			radeon_crtc = to_radeon_crtc(crtc);
+			if (crtc->enabled) {
+				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+				rdev->pm.dpm.new_active_crtc_count++;
+			}
+		}
+	}
+
+	/* update battery/ac status */
+	if (power_supply_is_system_supplied() > 0)
+		rdev->pm.dpm.ac_power = true;
+	else
+		rdev->pm.dpm.ac_power = false;
+
+	radeon_dpm_change_power_state_locked(rdev);
+
+	mutex_unlock(&rdev->pm.mutex);
+
+}
+
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
+{
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_compute_clocks_dpm(rdev);
+	else
+		radeon_pm_compute_clocks_old(rdev);
+}
+
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
+{
+	int  crtc, vpos, hpos, vbl_status;
+	bool in_vbl = true;
+
+	/* Iterate over all active crtc's. All crtc's must be in vblank,
+	 * otherwise return in_vbl == false.
+	 */
+	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
+		if (rdev->pm.active_crtcs & (1 << crtc)) {
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
+								crtc,
+								USE_REAL_VBLANKSTART,
+								&vpos, &hpos, NULL, NULL,
+								&rdev->mode_info.crtcs[crtc]->base.hwmode);
+			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
+				in_vbl = false;
+		}
+	}
+
+	return in_vbl;
+}
+
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+	u32 stat_crtc = 0;
+	bool in_vbl = radeon_pm_in_vbl(rdev);
+
+	if (in_vbl == false)
+		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
+			 finish ? "exit" : "entry");
+	return in_vbl;
+}
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
+{
+	struct radeon_device *rdev;
+	int resched;
+	rdev = container_of(work, struct radeon_device,
+				pm.dynpm_idle_work.work);
+
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+	mutex_lock(&rdev->pm.mutex);
+	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+		int not_processed = 0;
+		int i;
+
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			struct radeon_ring *ring = &rdev->ring[i];
+
+			if (ring->ready) {
+				not_processed += radeon_fence_count_emitted(rdev, i);
+				if (not_processed >= 3)
+					break;
+			}
+		}
+
+		if (not_processed >= 3) { /* should upclock */
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_upclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_UPCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
+				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+			}
+		} else if (not_processed == 0) { /* should downclock */
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_downclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_DOWNCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
+				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+			}
+		}
+
+		/* Note, radeon_pm_set_clocks is called with static_switch set
+		 * to false since we want to wait for vbl to avoid flicker.
+		 */
+		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+		    jiffies > rdev->pm.dynpm_action_timeout) {
+			radeon_pm_get_dynpm_state(rdev);
+			radeon_pm_set_clocks(rdev);
+		}
+
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+	}
+	mutex_unlock(&rdev->pm.mutex);
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_device *ddev = rdev->ddev;
+
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+		seq_printf(m, "PX asic powered off\n");
+	} else if (rdev->pm.dpm_enabled) {
+		mutex_lock(&rdev->pm.mutex);
+		if (rdev->asic->dpm.debugfs_print_current_performance_level)
+			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
+		else
+			seq_printf(m, "Debugfs support not implemented for this asic\n");
+		mutex_unlock(&rdev->pm.mutex);
+	} else {
+		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+		else
+			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+		if (rdev->asic->pm.get_memory_clock)
+			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+		if (rdev->pm.current_vddc)
+			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
+		if (rdev->asic->pm.get_pcie_lanes)
+			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
+	}
+
+	return 0;
+}
+
+static struct drm_info_list radeon_pm_info_list[] = {
+	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
+};
+#endif
+
+static int radeon_debugfs_pm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
+#else
+	return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
new file mode 100644
index 0000000..7110d40
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+#include <linux/dma-buf.h>
+
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int npages = bo->tbo.num_pages;
+
+	return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+}
+
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int ret;
+
+	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+			  &bo->dma_buf_vmap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return bo->dma_buf_vmap.virtual;
+}
+
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+
+	ttm_bo_kunmap(&bo->dma_buf_vmap);
+}
+
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+							struct dma_buf_attachment *attach,
+							struct sg_table *sg)
+{
+	struct reservation_object *resv = attach->dmabuf->resv;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_bo *bo;
+	int ret;
+
+	ww_mutex_lock(&resv->lock, NULL);
+	ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
+			       RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+	ww_mutex_unlock(&resv->lock);
+	if (ret)
+		return ERR_PTR(ret);
+
+	mutex_lock(&rdev->gem.mutex);
+	list_add_tail(&bo->list, &rdev->gem.objects);
+	mutex_unlock(&rdev->gem.mutex);
+
+	bo->prime_shared_count = 1;
+	return &bo->gem_base;
+}
+
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int ret = 0;
+
+	ret = radeon_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* pin buffer into GTT */
+	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+	if (likely(ret == 0))
+		bo->prime_shared_count++;
+
+	radeon_bo_unreserve(bo);
+	return ret;
+}
+
+void radeon_gem_prime_unpin(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int ret = 0;
+
+	ret = radeon_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return;
+
+	radeon_bo_unpin(bo);
+	if (bo->prime_shared_count)
+		bo->prime_shared_count--;
+	radeon_bo_unreserve(bo);
+}
+
+
+struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+
+	return bo->tbo.resv;
+}
+
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+					struct drm_gem_object *gobj,
+					int flags)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(gobj);
+	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
+		return ERR_PTR(-EPERM);
+	return drm_gem_prime_export(dev, gobj, flags);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
new file mode 100644
index 0000000..62d5497
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -0,0 +1,3725 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ *   Kevin E. Martin <martin@xfree86.org>
+ *   Rickard E. Faith <faith@valinux.com>
+ *   Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ * References:
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
+ *   Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
+ *   1999.
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 Software Development Manual (Technical Reference Manual P/N
+ *   SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
+ *
+ */
+
+/* !!!! FIXME !!!!  NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
+ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
+ * ON THE RADEON.  A FULL AUDIT OF THIS CODE IS NEEDED!  */
+#ifndef _RADEON_REG_H_
+#define _RADEON_REG_H_
+
+#include "r300_reg.h"
+#include "r500_reg.h"
+#include "r600_reg.h"
+#include "evergreen_reg.h"
+#include "ni_reg.h"
+#include "si_reg.h"
+#include "cik_reg.h"
+
+#define RADEON_MC_AGP_LOCATION		0x014c
+#define		RADEON_MC_AGP_START_MASK	0x0000FFFF
+#define		RADEON_MC_AGP_START_SHIFT	0
+#define		RADEON_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RADEON_MC_AGP_TOP_SHIFT		16
+#define RADEON_MC_FB_LOCATION		0x0148
+#define		RADEON_MC_FB_START_MASK		0x0000FFFF
+#define		RADEON_MC_FB_START_SHIFT	0
+#define		RADEON_MC_FB_TOP_MASK		0xFFFF0000
+#define		RADEON_MC_FB_TOP_SHIFT		16
+#define RADEON_AGP_BASE_2		0x015c /* r200+ only */
+#define RADEON_AGP_BASE			0x0170
+
+#define ATI_DATATYPE_VQ				0
+#define ATI_DATATYPE_CI4			1
+#define ATI_DATATYPE_CI8			2
+#define ATI_DATATYPE_ARGB1555			3
+#define ATI_DATATYPE_RGB565			4
+#define ATI_DATATYPE_RGB888			5
+#define ATI_DATATYPE_ARGB8888			6
+#define ATI_DATATYPE_RGB332			7
+#define ATI_DATATYPE_Y8				8
+#define ATI_DATATYPE_RGB8			9
+#define ATI_DATATYPE_CI16			10
+#define ATI_DATATYPE_VYUY_422			11
+#define ATI_DATATYPE_YVYU_422			12
+#define ATI_DATATYPE_AYUV_444			14
+#define ATI_DATATYPE_ARGB4444			15
+
+				/* Registers for 2D/Video/Overlay */
+#define RADEON_ADAPTER_ID                   0x0f2c /* PCI */
+#define RADEON_AGP_BASE                     0x0170
+#define RADEON_AGP_CNTL                     0x0174
+#       define RADEON_AGP_APER_SIZE_256MB   (0x00 << 0)
+#       define RADEON_AGP_APER_SIZE_128MB   (0x20 << 0)
+#       define RADEON_AGP_APER_SIZE_64MB    (0x30 << 0)
+#       define RADEON_AGP_APER_SIZE_32MB    (0x38 << 0)
+#       define RADEON_AGP_APER_SIZE_16MB    (0x3c << 0)
+#       define RADEON_AGP_APER_SIZE_8MB     (0x3e << 0)
+#       define RADEON_AGP_APER_SIZE_4MB     (0x3f << 0)
+#       define RADEON_AGP_APER_SIZE_MASK    (0x3f << 0)
+#define RADEON_STATUS_PCI_CONFIG            0x06
+#       define RADEON_CAP_LIST              0x100000
+#define RADEON_CAPABILITIES_PTR_PCI_CONFIG  0x34 /* offset in PCI config*/
+#       define RADEON_CAP_PTR_MASK          0xfc /* mask off reserved bits of CAP_PTR */
+#       define RADEON_CAP_ID_NULL           0x00 /* End of capability list */
+#       define RADEON_CAP_ID_AGP            0x02 /* AGP capability ID */
+#       define RADEON_CAP_ID_EXP            0x10 /* PCI Express */
+#define RADEON_AGP_COMMAND                  0x0f60 /* PCI */
+#define RADEON_AGP_COMMAND_PCI_CONFIG       0x0060 /* offset in PCI config*/
+#       define RADEON_AGP_ENABLE            (1<<8)
+#define RADEON_AGP_PLL_CNTL                 0x000b /* PLL */
+#define RADEON_AGP_STATUS                   0x0f5c /* PCI */
+#       define RADEON_AGP_1X_MODE           0x01
+#       define RADEON_AGP_2X_MODE           0x02
+#       define RADEON_AGP_4X_MODE           0x04
+#       define RADEON_AGP_FW_MODE           0x10
+#       define RADEON_AGP_MODE_MASK         0x17
+#       define RADEON_AGPv3_MODE            0x08
+#       define RADEON_AGPv3_4X_MODE         0x01
+#       define RADEON_AGPv3_8X_MODE         0x02
+#define RADEON_ATTRDR                       0x03c1 /* VGA */
+#define RADEON_ATTRDW                       0x03c0 /* VGA */
+#define RADEON_ATTRX                        0x03c0 /* VGA */
+#define RADEON_AUX_SC_CNTL                  0x1660
+#       define RADEON_AUX1_SC_EN            (1 << 0)
+#       define RADEON_AUX1_SC_MODE_OR       (0 << 1)
+#       define RADEON_AUX1_SC_MODE_NAND     (1 << 1)
+#       define RADEON_AUX2_SC_EN            (1 << 2)
+#       define RADEON_AUX2_SC_MODE_OR       (0 << 3)
+#       define RADEON_AUX2_SC_MODE_NAND     (1 << 3)
+#       define RADEON_AUX3_SC_EN            (1 << 4)
+#       define RADEON_AUX3_SC_MODE_OR       (0 << 5)
+#       define RADEON_AUX3_SC_MODE_NAND     (1 << 5)
+#define RADEON_AUX1_SC_BOTTOM               0x1670
+#define RADEON_AUX1_SC_LEFT                 0x1664
+#define RADEON_AUX1_SC_RIGHT                0x1668
+#define RADEON_AUX1_SC_TOP                  0x166c
+#define RADEON_AUX2_SC_BOTTOM               0x1680
+#define RADEON_AUX2_SC_LEFT                 0x1674
+#define RADEON_AUX2_SC_RIGHT                0x1678
+#define RADEON_AUX2_SC_TOP                  0x167c
+#define RADEON_AUX3_SC_BOTTOM               0x1690
+#define RADEON_AUX3_SC_LEFT                 0x1684
+#define RADEON_AUX3_SC_RIGHT                0x1688
+#define RADEON_AUX3_SC_TOP                  0x168c
+#define RADEON_AUX_WINDOW_HORZ_CNTL         0x02d8
+#define RADEON_AUX_WINDOW_VERT_CNTL         0x02dc
+
+#define RADEON_BASE_CODE                    0x0f0b
+#define RADEON_BIOS_0_SCRATCH               0x0010
+#       define RADEON_FP_PANEL_SCALABLE     (1 << 16)
+#       define RADEON_FP_PANEL_SCALE_EN     (1 << 17)
+#       define RADEON_FP_CHIP_SCALE_EN      (1 << 18)
+#       define RADEON_DRIVER_BRIGHTNESS_EN  (1 << 26)
+#       define RADEON_DISPLAY_ROT_MASK      (3 << 28)
+#       define RADEON_DISPLAY_ROT_00        (0 << 28)
+#       define RADEON_DISPLAY_ROT_90        (1 << 28)
+#       define RADEON_DISPLAY_ROT_180       (2 << 28)
+#       define RADEON_DISPLAY_ROT_270       (3 << 28)
+#define RADEON_BIOS_1_SCRATCH               0x0014
+#define RADEON_BIOS_2_SCRATCH               0x0018
+#define RADEON_BIOS_3_SCRATCH               0x001c
+#define RADEON_BIOS_4_SCRATCH               0x0020
+#       define RADEON_CRT1_ATTACHED_MASK    (3 << 0)
+#       define RADEON_CRT1_ATTACHED_MONO    (1 << 0)
+#       define RADEON_CRT1_ATTACHED_COLOR   (2 << 0)
+#       define RADEON_LCD1_ATTACHED         (1 << 2)
+#       define RADEON_DFP1_ATTACHED         (1 << 3)
+#       define RADEON_TV1_ATTACHED_MASK     (3 << 4)
+#       define RADEON_TV1_ATTACHED_COMP     (1 << 4)
+#       define RADEON_TV1_ATTACHED_SVIDEO   (2 << 4)
+#       define RADEON_CRT2_ATTACHED_MASK    (3 << 8)
+#       define RADEON_CRT2_ATTACHED_MONO    (1 << 8)
+#       define RADEON_CRT2_ATTACHED_COLOR   (2 << 8)
+#       define RADEON_DFP2_ATTACHED         (1 << 11)
+#define RADEON_BIOS_5_SCRATCH               0x0024
+#       define RADEON_LCD1_ON               (1 << 0)
+#       define RADEON_CRT1_ON               (1 << 1)
+#       define RADEON_TV1_ON                (1 << 2)
+#       define RADEON_DFP1_ON               (1 << 3)
+#       define RADEON_CRT2_ON               (1 << 5)
+#       define RADEON_CV1_ON                (1 << 6)
+#       define RADEON_DFP2_ON               (1 << 7)
+#       define RADEON_LCD1_CRTC_MASK        (1 << 8)
+#       define RADEON_LCD1_CRTC_SHIFT       8
+#       define RADEON_CRT1_CRTC_MASK        (1 << 9)
+#       define RADEON_CRT1_CRTC_SHIFT       9
+#       define RADEON_TV1_CRTC_MASK         (1 << 10)
+#       define RADEON_TV1_CRTC_SHIFT        10
+#       define RADEON_DFP1_CRTC_MASK        (1 << 11)
+#       define RADEON_DFP1_CRTC_SHIFT       11
+#       define RADEON_CRT2_CRTC_MASK        (1 << 12)
+#       define RADEON_CRT2_CRTC_SHIFT       12
+#       define RADEON_CV1_CRTC_MASK         (1 << 13)
+#       define RADEON_CV1_CRTC_SHIFT        13
+#       define RADEON_DFP2_CRTC_MASK        (1 << 14)
+#       define RADEON_DFP2_CRTC_SHIFT       14
+#       define RADEON_ACC_REQ_LCD1          (1 << 16)
+#       define RADEON_ACC_REQ_CRT1          (1 << 17)
+#       define RADEON_ACC_REQ_TV1           (1 << 18)
+#       define RADEON_ACC_REQ_DFP1          (1 << 19)
+#       define RADEON_ACC_REQ_CRT2          (1 << 21)
+#       define RADEON_ACC_REQ_TV2           (1 << 22)
+#       define RADEON_ACC_REQ_DFP2          (1 << 23)
+#define RADEON_BIOS_6_SCRATCH               0x0028
+#       define RADEON_ACC_MODE_CHANGE       (1 << 2)
+#       define RADEON_EXT_DESKTOP_MODE      (1 << 3)
+#       define RADEON_LCD_DPMS_ON           (1 << 20)
+#       define RADEON_CRT_DPMS_ON           (1 << 21)
+#       define RADEON_TV_DPMS_ON            (1 << 22)
+#       define RADEON_DFP_DPMS_ON           (1 << 23)
+#       define RADEON_DPMS_MASK             (3 << 24)
+#       define RADEON_DPMS_ON               (0 << 24)
+#       define RADEON_DPMS_STANDBY          (1 << 24)
+#       define RADEON_DPMS_SUSPEND          (2 << 24)
+#       define RADEON_DPMS_OFF              (3 << 24)
+#       define RADEON_SCREEN_BLANKING       (1 << 26)
+#       define RADEON_DRIVER_CRITICAL       (1 << 27)
+#       define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
+#define RADEON_BIOS_7_SCRATCH               0x002c
+#       define RADEON_SYS_HOTKEY            (1 << 10)
+#       define RADEON_DRV_LOADED            (1 << 12)
+#define RADEON_BIOS_ROM                     0x0f30 /* PCI */
+#define RADEON_BIST                         0x0f0f /* PCI */
+#define RADEON_BRUSH_DATA0                  0x1480
+#define RADEON_BRUSH_DATA1                  0x1484
+#define RADEON_BRUSH_DATA10                 0x14a8
+#define RADEON_BRUSH_DATA11                 0x14ac
+#define RADEON_BRUSH_DATA12                 0x14b0
+#define RADEON_BRUSH_DATA13                 0x14b4
+#define RADEON_BRUSH_DATA14                 0x14b8
+#define RADEON_BRUSH_DATA15                 0x14bc
+#define RADEON_BRUSH_DATA16                 0x14c0
+#define RADEON_BRUSH_DATA17                 0x14c4
+#define RADEON_BRUSH_DATA18                 0x14c8
+#define RADEON_BRUSH_DATA19                 0x14cc
+#define RADEON_BRUSH_DATA2                  0x1488
+#define RADEON_BRUSH_DATA20                 0x14d0
+#define RADEON_BRUSH_DATA21                 0x14d4
+#define RADEON_BRUSH_DATA22                 0x14d8
+#define RADEON_BRUSH_DATA23                 0x14dc
+#define RADEON_BRUSH_DATA24                 0x14e0
+#define RADEON_BRUSH_DATA25                 0x14e4
+#define RADEON_BRUSH_DATA26                 0x14e8
+#define RADEON_BRUSH_DATA27                 0x14ec
+#define RADEON_BRUSH_DATA28                 0x14f0
+#define RADEON_BRUSH_DATA29                 0x14f4
+#define RADEON_BRUSH_DATA3                  0x148c
+#define RADEON_BRUSH_DATA30                 0x14f8
+#define RADEON_BRUSH_DATA31                 0x14fc
+#define RADEON_BRUSH_DATA32                 0x1500
+#define RADEON_BRUSH_DATA33                 0x1504
+#define RADEON_BRUSH_DATA34                 0x1508
+#define RADEON_BRUSH_DATA35                 0x150c
+#define RADEON_BRUSH_DATA36                 0x1510
+#define RADEON_BRUSH_DATA37                 0x1514
+#define RADEON_BRUSH_DATA38                 0x1518
+#define RADEON_BRUSH_DATA39                 0x151c
+#define RADEON_BRUSH_DATA4                  0x1490
+#define RADEON_BRUSH_DATA40                 0x1520
+#define RADEON_BRUSH_DATA41                 0x1524
+#define RADEON_BRUSH_DATA42                 0x1528
+#define RADEON_BRUSH_DATA43                 0x152c
+#define RADEON_BRUSH_DATA44                 0x1530
+#define RADEON_BRUSH_DATA45                 0x1534
+#define RADEON_BRUSH_DATA46                 0x1538
+#define RADEON_BRUSH_DATA47                 0x153c
+#define RADEON_BRUSH_DATA48                 0x1540
+#define RADEON_BRUSH_DATA49                 0x1544
+#define RADEON_BRUSH_DATA5                  0x1494
+#define RADEON_BRUSH_DATA50                 0x1548
+#define RADEON_BRUSH_DATA51                 0x154c
+#define RADEON_BRUSH_DATA52                 0x1550
+#define RADEON_BRUSH_DATA53                 0x1554
+#define RADEON_BRUSH_DATA54                 0x1558
+#define RADEON_BRUSH_DATA55                 0x155c
+#define RADEON_BRUSH_DATA56                 0x1560
+#define RADEON_BRUSH_DATA57                 0x1564
+#define RADEON_BRUSH_DATA58                 0x1568
+#define RADEON_BRUSH_DATA59                 0x156c
+#define RADEON_BRUSH_DATA6                  0x1498
+#define RADEON_BRUSH_DATA60                 0x1570
+#define RADEON_BRUSH_DATA61                 0x1574
+#define RADEON_BRUSH_DATA62                 0x1578
+#define RADEON_BRUSH_DATA63                 0x157c
+#define RADEON_BRUSH_DATA7                  0x149c
+#define RADEON_BRUSH_DATA8                  0x14a0
+#define RADEON_BRUSH_DATA9                  0x14a4
+#define RADEON_BRUSH_SCALE                  0x1470
+#define RADEON_BRUSH_Y_X                    0x1474
+#define RADEON_BUS_CNTL                     0x0030
+#       define RADEON_BUS_MASTER_DIS         (1 << 6)
+#       define RADEON_BUS_BIOS_DIS_ROM       (1 << 12)
+#	define RS600_BUS_MASTER_DIS	     (1 << 14)
+#	define RS600_MSI_REARM		     (1 << 20) /* rs600/rs690/rs740 */
+#       define RADEON_BUS_RD_DISCARD_EN      (1 << 24)
+#       define RADEON_BUS_RD_ABORT_EN        (1 << 25)
+#       define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
+#       define RADEON_BUS_WRT_BURST          (1 << 29)
+#       define RADEON_BUS_READ_BURST         (1 << 30)
+#define RADEON_BUS_CNTL1                    0x0034
+#       define RADEON_BUS_WAIT_ON_LOCK_EN    (1 << 4)
+#define RV370_BUS_CNTL                      0x004c
+#       define RV370_BUS_BIOS_DIS_ROM        (1 << 2)
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#define RADEON_MSI_REARM_EN		    0x0160
+#	define RV370_MSI_REARM_EN	     (1 << 0)
+
+/* #define RADEON_PCIE_INDEX                   0x0030 */
+/* #define RADEON_PCIE_DATA                    0x0034 */
+#define RADEON_PCIE_LC_LINK_WIDTH_CNTL             0xa2 /* PCIE */
+#       define RADEON_PCIE_LC_LINK_WIDTH_SHIFT     0
+#       define RADEON_PCIE_LC_LINK_WIDTH_MASK      0x7
+#       define RADEON_PCIE_LC_LINK_WIDTH_X0        0
+#       define RADEON_PCIE_LC_LINK_WIDTH_X1        1
+#       define RADEON_PCIE_LC_LINK_WIDTH_X2        2
+#       define RADEON_PCIE_LC_LINK_WIDTH_X4        3
+#       define RADEON_PCIE_LC_LINK_WIDTH_X8        4
+#       define RADEON_PCIE_LC_LINK_WIDTH_X12       5
+#       define RADEON_PCIE_LC_LINK_WIDTH_X16       6
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT  4
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK   0x70
+#       define RADEON_PCIE_LC_RECONFIG_NOW         (1 << 8)
+#       define RADEON_PCIE_LC_RECONFIG_LATER       (1 << 9)
+#       define RADEON_PCIE_LC_SHORT_RECONFIG_EN    (1 << 10)
+#       define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE   (1 << 7)
+#       define R600_PCIE_LC_RENEGOTIATION_SUPPORT  (1 << 9)
+#       define R600_PCIE_LC_RENEGOTIATE_EN         (1 << 10)
+#       define R600_PCIE_LC_SHORT_RECONFIG_EN      (1 << 11)
+#       define R600_PCIE_LC_UPCONFIGURE_SUPPORT    (1 << 12)
+#       define R600_PCIE_LC_UPCONFIGURE_DIS        (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX      0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX      0x66c
+
+#define RADEON_CACHE_CNTL                   0x1724
+#define RADEON_CACHE_LINE                   0x0f0c /* PCI */
+#define RADEON_CAPABILITIES_ID              0x0f50 /* PCI */
+#define RADEON_CAPABILITIES_PTR             0x0f34 /* PCI */
+#define RADEON_CLK_PIN_CNTL                 0x0001 /* PLL */
+#       define RADEON_DONT_USE_XTALIN       (1 << 4)
+#       define RADEON_SCLK_DYN_START_CNTL   (1 << 15)
+#define RADEON_CLOCK_CNTL_DATA              0x000c
+#define RADEON_CLOCK_CNTL_INDEX             0x0008
+#       define RADEON_PLL_WR_EN             (1 << 7)
+#       define RADEON_PLL_DIV_SEL           (3 << 8)
+#       define RADEON_PLL2_DIV_SEL_MASK     (~(3 << 8))
+#define RADEON_CLK_PWRMGT_CNTL              0x0014
+#       define RADEON_ENGIN_DYNCLK_MODE     (1 << 12)
+#       define RADEON_ACTIVE_HILO_LAT_MASK  (3 << 13)
+#       define RADEON_ACTIVE_HILO_LAT_SHIFT 13
+#       define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
+#       define RADEON_MC_BUSY               (1 << 16)
+#       define RADEON_DLL_READY             (1 << 19)
+#       define RADEON_CG_NO1_DEBUG_0        (1 << 24)
+#       define RADEON_CG_NO1_DEBUG_MASK     (0x1f << 24)
+#       define RADEON_DYN_STOP_MODE_MASK    (7 << 21)
+#       define RADEON_TVPLL_PWRMGT_OFF      (1 << 30)
+#       define RADEON_TVCLK_TURNOFF         (1 << 31)
+#define RADEON_PLL_PWRMGT_CNTL              0x0015 /* PLL */
+#	define RADEON_PM_MODE_SEL           (1 << 13)
+#       define RADEON_TCL_BYPASS_DISABLE    (1 << 20)
+#define RADEON_CLR_CMP_CLR_3D               0x1a24
+#define RADEON_CLR_CMP_CLR_DST              0x15c8
+#define RADEON_CLR_CMP_CLR_SRC              0x15c4
+#define RADEON_CLR_CMP_CNTL                 0x15c0
+#       define RADEON_SRC_CMP_EQ_COLOR      (4 <<  0)
+#       define RADEON_SRC_CMP_NEQ_COLOR     (5 <<  0)
+#       define RADEON_CLR_CMP_SRC_SOURCE    (1 << 24)
+#define RADEON_CLR_CMP_MASK                 0x15cc
+#       define RADEON_CLR_CMP_MSK           0xffffffff
+#define RADEON_CLR_CMP_MASK_3D              0x1A28
+#define RADEON_COMMAND                      0x0f04 /* PCI */
+#define RADEON_COMPOSITE_SHADOW_ID          0x1a0c
+#define RADEON_CONFIG_APER_0_BASE           0x0100
+#define RADEON_CONFIG_APER_1_BASE           0x0104
+#define RADEON_CONFIG_APER_SIZE             0x0108
+#define RADEON_CONFIG_BONDS                 0x00e8
+#define RADEON_CONFIG_CNTL                  0x00e0
+#       define RADEON_CFG_VGA_RAM_EN        (1 << 8)
+#       define RADEON_CFG_VGA_IO_DIS        (1 << 9)
+#       define RADEON_CFG_ATI_REV_A11       (0   << 16)
+#       define RADEON_CFG_ATI_REV_A12       (1   << 16)
+#       define RADEON_CFG_ATI_REV_A13       (2   << 16)
+#       define RADEON_CFG_ATI_REV_ID_MASK   (0xf << 16)
+#define RADEON_CONFIG_MEMSIZE               0x00f8
+#define RADEON_CONFIG_MEMSIZE_EMBEDDED      0x0114
+#define RADEON_CONFIG_REG_1_BASE            0x010c
+#define RADEON_CONFIG_REG_APER_SIZE         0x0110
+#define RADEON_CONFIG_XSTRAP                0x00e4
+#define RADEON_CONSTANT_COLOR_C             0x1d34
+#       define RADEON_CONSTANT_COLOR_MASK   0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ONE    0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ZERO   0x00000000
+#define RADEON_CRC_CMDFIFO_ADDR             0x0740
+#define RADEON_CRC_CMDFIFO_DOUT             0x0744
+#define RADEON_GRPH_BUFFER_CNTL             0x02f0
+#       define RADEON_GRPH_START_REQ_MASK          (0x7f)
+#       define RADEON_GRPH_START_REQ_SHIFT         0
+#       define RADEON_GRPH_STOP_REQ_MASK           (0x7f<<8)
+#       define RADEON_GRPH_STOP_REQ_SHIFT          8
+#       define RADEON_GRPH_CRITICAL_POINT_MASK     (0x7f<<16)
+#       define RADEON_GRPH_CRITICAL_POINT_SHIFT    16
+#       define RADEON_GRPH_CRITICAL_CNTL           (1<<28)
+#       define RADEON_GRPH_BUFFER_SIZE             (1<<29)
+#       define RADEON_GRPH_CRITICAL_AT_SOF         (1<<30)
+#       define RADEON_GRPH_STOP_CNTL               (1<<31)
+#define RADEON_GRPH2_BUFFER_CNTL            0x03f0
+#       define RADEON_GRPH2_START_REQ_MASK         (0x7f)
+#       define RADEON_GRPH2_START_REQ_SHIFT         0
+#       define RADEON_GRPH2_STOP_REQ_MASK          (0x7f<<8)
+#       define RADEON_GRPH2_STOP_REQ_SHIFT         8
+#       define RADEON_GRPH2_CRITICAL_POINT_MASK    (0x7f<<16)
+#       define RADEON_GRPH2_CRITICAL_POINT_SHIFT   16
+#       define RADEON_GRPH2_CRITICAL_CNTL          (1<<28)
+#       define RADEON_GRPH2_BUFFER_SIZE            (1<<29)
+#       define RADEON_GRPH2_CRITICAL_AT_SOF        (1<<30)
+#       define RADEON_GRPH2_STOP_CNTL              (1<<31)
+#define RADEON_CRTC_CRNT_FRAME              0x0214
+#define RADEON_CRTC_EXT_CNTL                0x0054
+#       define RADEON_CRTC_VGA_XOVERSCAN    (1 <<  0)
+#       define RADEON_VGA_ATI_LINEAR        (1 <<  3)
+#       define RADEON_XCRT_CNT_EN           (1 <<  6)
+#       define RADEON_CRTC_HSYNC_DIS        (1 <<  8)
+#       define RADEON_CRTC_VSYNC_DIS        (1 <<  9)
+#       define RADEON_CRTC_DISPLAY_DIS      (1 << 10)
+#       define RADEON_CRTC_SYNC_TRISTAT     (1 << 11)
+#       define RADEON_CRTC_CRT_ON           (1 << 15)
+#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE      0x0055
+#       define RADEON_CRTC_HSYNC_DIS_BYTE   (1 <<  0)
+#       define RADEON_CRTC_VSYNC_DIS_BYTE   (1 <<  1)
+#       define RADEON_CRTC_DISPLAY_DIS_BYTE (1 <<  2)
+#define RADEON_CRTC_GEN_CNTL                0x0050
+#       define RADEON_CRTC_DBL_SCAN_EN      (1 <<  0)
+#       define RADEON_CRTC_INTERLACE_EN     (1 <<  1)
+#       define RADEON_CRTC_CSYNC_EN         (1 <<  4)
+#       define RADEON_CRTC_ICON_EN          (1 << 15)
+#       define RADEON_CRTC_CUR_EN           (1 << 16)
+#       define RADEON_CRTC_VSTAT_MODE_MASK  (3 << 17)
+#       define RADEON_CRTC_CUR_MODE_MASK    (7 << 20)
+#       define RADEON_CRTC_CUR_MODE_SHIFT   20
+#       define RADEON_CRTC_CUR_MODE_MONO    0
+#       define RADEON_CRTC_CUR_MODE_24BPP   2
+#       define RADEON_CRTC_EXT_DISP_EN      (1 << 24)
+#       define RADEON_CRTC_EN               (1 << 25)
+#       define RADEON_CRTC_DISP_REQ_EN_B    (1 << 26)
+#define RADEON_CRTC2_GEN_CNTL               0x03f8
+#       define RADEON_CRTC2_DBL_SCAN_EN     (1 <<  0)
+#       define RADEON_CRTC2_INTERLACE_EN    (1 <<  1)
+#       define RADEON_CRTC2_SYNC_TRISTAT    (1 <<  4)
+#       define RADEON_CRTC2_HSYNC_TRISTAT   (1 <<  5)
+#       define RADEON_CRTC2_VSYNC_TRISTAT   (1 <<  6)
+#       define RADEON_CRTC2_CRT2_ON         (1 <<  7)
+#       define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
+#       define RADEON_CRTC2_PIX_WIDTH_MASK  (0xf << 8)
+#       define RADEON_CRTC2_ICON_EN         (1 << 15)
+#       define RADEON_CRTC2_CUR_EN          (1 << 16)
+#       define RADEON_CRTC2_CUR_MODE_MASK   (7 << 20)
+#       define RADEON_CRTC2_DISP_DIS        (1 << 23)
+#       define RADEON_CRTC2_EN              (1 << 25)
+#       define RADEON_CRTC2_DISP_REQ_EN_B   (1 << 26)
+#       define RADEON_CRTC2_CSYNC_EN        (1 << 27)
+#       define RADEON_CRTC2_HSYNC_DIS       (1 << 28)
+#       define RADEON_CRTC2_VSYNC_DIS       (1 << 29)
+#define RADEON_CRTC_MORE_CNTL               0x27c
+#       define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
+#       define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
+#       define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
+#       define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
+#define RADEON_CRTC_GUI_TRIG_VLINE          0x0218
+#define RADEON_CRTC_H_SYNC_STRT_WID         0x0204
+#       define RADEON_CRTC_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC2_H_SYNC_STRT_WID        0x0304
+#       define RADEON_CRTC2_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC2_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC2_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC2_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC_H_TOTAL_DISP            0x0200
+#       define RADEON_CRTC_H_TOTAL          (0x03ff << 0)
+#       define RADEON_CRTC_H_TOTAL_SHIFT    0
+#       define RADEON_CRTC_H_DISP           (0x01ff << 16)
+#       define RADEON_CRTC_H_DISP_SHIFT     16
+#define RADEON_CRTC2_H_TOTAL_DISP           0x0300
+#       define RADEON_CRTC2_H_TOTAL         (0x03ff << 0)
+#       define RADEON_CRTC2_H_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_H_DISP          (0x01ff << 16)
+#       define RADEON_CRTC2_H_DISP_SHIFT    16
+
+#define RADEON_CRTC_OFFSET_RIGHT	    0x0220
+#define RADEON_CRTC_OFFSET                  0x0224
+#	define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#	define RADEON_CRTC_OFFSET__OFFSET_LOCK	   (1<<31)
+
+#define RADEON_CRTC2_OFFSET                 0x0324
+#	define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#	define RADEON_CRTC2_OFFSET__OFFSET_LOCK	    (1<<31)
+#define RADEON_CRTC_OFFSET_CNTL             0x0228
+#       define RADEON_CRTC_TILE_LINE_SHIFT              0
+#       define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT        4
+#	define R300_CRTC_X_Y_MODE_EN_RIGHT		(1 << 6)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK   (3 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO   (0 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS    (3 << 7)
+#	define R300_CRTC_X_Y_MODE_EN			(1 << 9)
+#	define R300_CRTC_MICRO_TILE_BUFFER_MASK		(3 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_AUTO		(0 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_SINGLE	(1 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE	(2 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_DIS		(3 << 10)
+#	define R300_CRTC_MICRO_TILE_EN_RIGHT		(1 << 12)
+#	define R300_CRTC_MICRO_TILE_EN			(1 << 13)
+#	define R300_CRTC_MACRO_TILE_EN_RIGHT		(1 << 14)
+#       define R300_CRTC_MACRO_TILE_EN                  (1 << 15)
+#       define RADEON_CRTC_TILE_EN_RIGHT                (1 << 14)
+#       define RADEON_CRTC_TILE_EN                      (1 << 15)
+#       define RADEON_CRTC_OFFSET_FLIP_CNTL             (1 << 16)
+#       define RADEON_CRTC_STEREO_OFFSET_EN             (1 << 17)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN      (1 << 28)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN     (1 << 29)
+
+#define R300_CRTC_TILE_X0_Y0	            0x0350
+#define R300_CRTC2_TILE_X0_Y0	            0x0358
+
+#define RADEON_CRTC2_OFFSET_CNTL            0x0328
+#       define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
+#       define RADEON_CRTC2_TILE_EN         (1 << 15)
+#define RADEON_CRTC_PITCH                   0x022c
+#	define RADEON_CRTC_PITCH__SHIFT		 0
+#	define RADEON_CRTC_PITCH__RIGHT_SHIFT	16
+
+#define RADEON_CRTC2_PITCH                  0x032c
+#define RADEON_CRTC_STATUS                  0x005c
+#       define RADEON_CRTC_VBLANK_CUR       (1 <<  0)
+#       define RADEON_CRTC_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC2_STATUS                  0x03fc
+#       define RADEON_CRTC2_VBLANK_CUR       (1 <<  0)
+#       define RADEON_CRTC2_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC2_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC_V_SYNC_STRT_WID         0x020c
+#       define RADEON_CRTC_V_SYNC_STRT        (0x7ff <<  0)
+#       define RADEON_CRTC_V_SYNC_STRT_SHIFT  0
+#       define RADEON_CRTC_V_SYNC_WID         (0x1f  << 16)
+#       define RADEON_CRTC_V_SYNC_WID_SHIFT   16
+#       define RADEON_CRTC_V_SYNC_POL         (1     << 23)
+#define RADEON_CRTC2_V_SYNC_STRT_WID        0x030c
+#       define RADEON_CRTC2_V_SYNC_STRT       (0x7ff <<  0)
+#       define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
+#       define RADEON_CRTC2_V_SYNC_WID        (0x1f  << 16)
+#       define RADEON_CRTC2_V_SYNC_WID_SHIFT  16
+#       define RADEON_CRTC2_V_SYNC_POL        (1     << 23)
+#define RADEON_CRTC_V_TOTAL_DISP            0x0208
+#       define RADEON_CRTC_V_TOTAL          (0x07ff << 0)
+#       define RADEON_CRTC_V_TOTAL_SHIFT    0
+#       define RADEON_CRTC_V_DISP           (0x07ff << 16)
+#       define RADEON_CRTC_V_DISP_SHIFT     16
+#define RADEON_CRTC2_V_TOTAL_DISP           0x0308
+#       define RADEON_CRTC2_V_TOTAL         (0x07ff << 0)
+#       define RADEON_CRTC2_V_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_V_DISP          (0x07ff << 16)
+#       define RADEON_CRTC2_V_DISP_SHIFT    16
+#define RADEON_CRTC_VLINE_CRNT_VLINE        0x0210
+#       define RADEON_CRTC_CRNT_VLINE_MASK  (0x7ff << 16)
+#define RADEON_CRTC2_CRNT_FRAME             0x0314
+#define RADEON_CRTC2_GUI_TRIG_VLINE         0x0318
+#define RADEON_CRTC2_VLINE_CRNT_VLINE       0x0310
+#define RADEON_CRTC8_DATA                   0x03d5 /* VGA, 0x3b5 */
+#define RADEON_CRTC8_IDX                    0x03d4 /* VGA, 0x3b4 */
+#define RADEON_CUR_CLR0                     0x026c
+#define RADEON_CUR_CLR1                     0x0270
+#define RADEON_CUR_HORZ_VERT_OFF            0x0268
+#define RADEON_CUR_HORZ_VERT_POSN           0x0264
+#define RADEON_CUR_OFFSET                   0x0260
+#       define RADEON_CUR_LOCK              (1 << 31)
+#define RADEON_CUR2_CLR0                    0x036c
+#define RADEON_CUR2_CLR1                    0x0370
+#define RADEON_CUR2_HORZ_VERT_OFF           0x0368
+#define RADEON_CUR2_HORZ_VERT_POSN          0x0364
+#define RADEON_CUR2_OFFSET                  0x0360
+#       define RADEON_CUR2_LOCK             (1 << 31)
+
+#define RADEON_DAC_CNTL                     0x0058
+#       define RADEON_DAC_RANGE_CNTL        (3 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_PS2    (2 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_MASK   0x03
+#       define RADEON_DAC_BLANKING          (1 <<  2)
+#       define RADEON_DAC_CMP_EN            (1 <<  3)
+#       define RADEON_DAC_CMP_OUTPUT        (1 <<  7)
+#       define RADEON_DAC_8BIT_EN           (1 <<  8)
+#       define RADEON_DAC_TVO_EN            (1 << 10)
+#       define RADEON_DAC_VGA_ADR_EN        (1 << 13)
+#       define RADEON_DAC_PDWN              (1 << 15)
+#       define RADEON_DAC_MASK_ALL          (0xff << 24)
+#define RADEON_DAC_CNTL2                    0x007c
+#       define RADEON_DAC2_TV_CLK_SEL       (0 <<  1)
+#       define RADEON_DAC2_DAC_CLK_SEL      (1 <<  0)
+#       define RADEON_DAC2_DAC2_CLK_SEL     (1 <<  1)
+#       define RADEON_DAC2_PALETTE_ACC_CTL  (1 <<  5)
+#       define RADEON_DAC2_CMP_EN           (1 <<  7)
+#       define RADEON_DAC2_CMP_OUT_R        (1 <<  8)
+#       define RADEON_DAC2_CMP_OUT_G        (1 <<  9)
+#       define RADEON_DAC2_CMP_OUT_B        (1 << 10)
+#       define RADEON_DAC2_CMP_OUTPUT       (1 << 11)
+#define RADEON_DAC_EXT_CNTL                 0x0280
+#       define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
+#       define RADEON_DAC2_FORCE_DATA_EN      (1 << 1)
+#       define RADEON_DAC_FORCE_BLANK_OFF_EN  (1 << 4)
+#       define RADEON_DAC_FORCE_DATA_EN       (1 << 5)
+#       define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_R    (0 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_G    (1 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_B    (2 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_RGB  (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_MASK   0x0003ff00
+#       define RADEON_DAC_FORCE_DATA_SHIFT  8
+#define RADEON_DAC_MACRO_CNTL               0x0d04
+#       define RADEON_DAC_PDWN_R            (1 << 16)
+#       define RADEON_DAC_PDWN_G            (1 << 17)
+#       define RADEON_DAC_PDWN_B            (1 << 18)
+#define RADEON_DISP_PWR_MAN                 0x0d08
+#       define RADEON_DISP_PWR_MAN_D3_CRTC_EN      (1 << 0)
+#       define RADEON_DISP_PWR_MAN_D3_CRTC2_EN     (1 << 4)
+#       define RADEON_DISP_PWR_MAN_DPMS_ON  (0 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_STANDBY    (1 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_SUSPEND    (2 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
+#       define RADEON_DISP_D3_RST           (1 << 16)
+#       define RADEON_DISP_D3_REG_RST       (1 << 17)
+#       define RADEON_DISP_D3_GRPH_RST      (1 << 18)
+#       define RADEON_DISP_D3_SUBPIC_RST    (1 << 19)
+#       define RADEON_DISP_D3_OV0_RST       (1 << 20)
+#       define RADEON_DISP_D1D2_GRPH_RST    (1 << 21)
+#       define RADEON_DISP_D1D2_SUBPIC_RST  (1 << 22)
+#       define RADEON_DISP_D1D2_OV0_RST     (1 << 23)
+#       define RADEON_DIG_TMDS_ENABLE_RST   (1 << 24)
+#       define RADEON_TV_ENABLE_RST         (1 << 25)
+#       define RADEON_AUTO_PWRUP_EN         (1 << 26)
+#define RADEON_TV_DAC_CNTL                  0x088c
+#       define RADEON_TV_DAC_NBLANK         (1 << 0)
+#       define RADEON_TV_DAC_NHOLD          (1 << 1)
+#       define RADEON_TV_DAC_PEDESTAL       (1 <<  2)
+#       define RADEON_TV_MONITOR_DETECT_EN  (1 <<  4)
+#       define RADEON_TV_DAC_CMPOUT         (1 <<  5)
+#       define RADEON_TV_DAC_STD_MASK       (3 <<  8)
+#       define RADEON_TV_DAC_STD_PAL        (0 <<  8)
+#       define RADEON_TV_DAC_STD_NTSC       (1 <<  8)
+#       define RADEON_TV_DAC_STD_PS2        (2 <<  8)
+#       define RADEON_TV_DAC_STD_RS343      (3 <<  8)
+#       define RADEON_TV_DAC_BGSLEEP        (1 <<  6)
+#       define RADEON_TV_DAC_BGADJ_MASK     (0xf <<  16)
+#       define RADEON_TV_DAC_BGADJ_SHIFT    16
+#       define RADEON_TV_DAC_DACADJ_MASK    (0xf <<  20)
+#       define RADEON_TV_DAC_DACADJ_SHIFT   20
+#       define RADEON_TV_DAC_RDACPD         (1 <<  24)
+#       define RADEON_TV_DAC_GDACPD         (1 <<  25)
+#       define RADEON_TV_DAC_BDACPD         (1 <<  26)
+#       define RADEON_TV_DAC_RDACDET        (1 << 29)
+#       define RADEON_TV_DAC_GDACDET        (1 << 30)
+#       define RADEON_TV_DAC_BDACDET        (1 << 31)
+#       define R420_TV_DAC_DACADJ_MASK      (0x1f <<  20)
+#       define R420_TV_DAC_RDACPD           (1 <<  25)
+#       define R420_TV_DAC_GDACPD           (1 <<  26)
+#       define R420_TV_DAC_BDACPD           (1 <<  27)
+#       define R420_TV_DAC_TVENABLE         (1 <<  28)
+#define RADEON_DISP_HW_DEBUG                0x0d14
+#       define RADEON_CRT2_DISP1_SEL        (1 <<  5)
+#define RADEON_DISP_OUTPUT_CNTL             0x0d64
+#       define RADEON_DISP_DAC_SOURCE_MASK  0x03
+#       define RADEON_DISP_DAC2_SOURCE_MASK  0x0c
+#       define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
+#       define RADEON_DISP_DAC_SOURCE_RMX   0x02
+#       define RADEON_DISP_DAC_SOURCE_LTU   0x03
+#       define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
+#       define RADEON_DISP_TVDAC_SOURCE_MASK  (0x03 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC  0x0
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_RMX   (0x02 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_LTU   (0x03 << 2)
+#       define RADEON_DISP_TRANS_MATRIX_MASK  (0x03 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_GRAPHICS  (0x01 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_VIDEO     (0x02 << 4)
+#       define RADEON_DISP_TV_SOURCE_CRTC   (1 << 16) /* crtc1 or crtc2 */
+#       define RADEON_DISP_TV_SOURCE_LTU    (0 << 16) /* linear transform unit */
+#define RADEON_DISP_TV_OUT_CNTL             0x0d6c
+#       define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
+#       define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
+#define RADEON_DAC_CRC_SIG                  0x02cc
+#define RADEON_DAC_DATA                     0x03c9 /* VGA */
+#define RADEON_DAC_MASK                     0x03c6 /* VGA */
+#define RADEON_DAC_R_INDEX                  0x03c7 /* VGA */
+#define RADEON_DAC_W_INDEX                  0x03c8 /* VGA */
+#define RADEON_DDA_CONFIG                   0x02e0
+#define RADEON_DDA_ON_OFF                   0x02e4
+#define RADEON_DEFAULT_OFFSET               0x16e0
+#define RADEON_DEFAULT_PITCH                0x16e4
+#define RADEON_DEFAULT_SC_BOTTOM_RIGHT      0x16e8
+#       define RADEON_DEFAULT_SC_RIGHT_MAX  (0x1fff <<  0)
+#       define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
+#define RADEON_DESTINATION_3D_CLR_CMP_VAL   0x1820
+#define RADEON_DESTINATION_3D_CLR_CMP_MSK   0x1824
+#define RADEON_DEVICE_ID                    0x0f02 /* PCI */
+#define RADEON_DISP_MISC_CNTL               0x0d00
+#       define RADEON_SOFT_RESET_GRPH_PP    (1 << 0)
+#define RADEON_DISP_MERGE_CNTL		  0x0d60
+#       define RADEON_DISP_ALPHA_MODE_MASK  0x03
+#       define RADEON_DISP_ALPHA_MODE_KEY   0
+#       define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
+#       define RADEON_DISP_ALPHA_MODE_GLOBAL 2
+#       define RADEON_DISP_RGB_OFFSET_EN    (1 << 8)
+#       define RADEON_DISP_GRPH_ALPHA_MASK  (0xff << 16)
+#       define RADEON_DISP_OV0_ALPHA_MASK   (0xff << 24)
+#	define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
+#define RADEON_DISP2_MERGE_CNTL		    0x0d68
+#       define RADEON_DISP2_RGB_OFFSET_EN   (1 << 8)
+#define RADEON_DISP_LIN_TRANS_GRPH_A        0x0d80
+#define RADEON_DISP_LIN_TRANS_GRPH_B        0x0d84
+#define RADEON_DISP_LIN_TRANS_GRPH_C        0x0d88
+#define RADEON_DISP_LIN_TRANS_GRPH_D        0x0d8c
+#define RADEON_DISP_LIN_TRANS_GRPH_E        0x0d90
+#define RADEON_DISP_LIN_TRANS_GRPH_F        0x0d98
+#define RADEON_DP_BRUSH_BKGD_CLR            0x1478
+#define RADEON_DP_BRUSH_FRGD_CLR            0x147c
+#define RADEON_DP_CNTL                      0x16c0
+#       define RADEON_DST_X_LEFT_TO_RIGHT   (1 <<  0)
+#       define RADEON_DST_Y_TOP_TO_BOTTOM   (1 <<  1)
+#       define RADEON_DP_DST_TILE_LINEAR    (0 <<  3)
+#       define RADEON_DP_DST_TILE_MACRO     (1 <<  3)
+#       define RADEON_DP_DST_TILE_MICRO     (2 <<  3)
+#       define RADEON_DP_DST_TILE_BOTH      (3 <<  3)
+#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR     0x16d0
+#       define RADEON_DST_Y_MAJOR             (1 <<  2)
+#       define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
+#       define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
+#define RADEON_DP_DATATYPE                  0x16c4
+#       define RADEON_HOST_BIG_ENDIAN_EN    (1 << 29)
+#define RADEON_DP_GUI_MASTER_CNTL           0x146c
+#       define RADEON_GMC_SRC_PITCH_OFFSET_CNTL   (1    <<  0)
+#       define RADEON_GMC_DST_PITCH_OFFSET_CNTL   (1    <<  1)
+#       define RADEON_GMC_SRC_CLIPPING            (1    <<  2)
+#       define RADEON_GMC_DST_CLIPPING            (1    <<  3)
+#       define RADEON_GMC_BRUSH_DATATYPE_MASK     (0x0f <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_BG    (0    <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_LA    (1    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_BG    (4    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_LA    (5    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_BG   (6    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_LA   (7    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_BG  (8    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_LA  (9    <<  4)
+#       define RADEON_GMC_BRUSH_8x8_COLOR         (10   <<  4)
+#       define RADEON_GMC_BRUSH_1X8_COLOR         (12   <<  4)
+#       define RADEON_GMC_BRUSH_SOLID_COLOR       (13   <<  4)
+#       define RADEON_GMC_BRUSH_NONE              (15   <<  4)
+#       define RADEON_GMC_DST_8BPP_CI             (2    <<  8)
+#       define RADEON_GMC_DST_15BPP               (3    <<  8)
+#       define RADEON_GMC_DST_16BPP               (4    <<  8)
+#       define RADEON_GMC_DST_24BPP               (5    <<  8)
+#       define RADEON_GMC_DST_32BPP               (6    <<  8)
+#       define RADEON_GMC_DST_8BPP_RGB            (7    <<  8)
+#       define RADEON_GMC_DST_Y8                  (8    <<  8)
+#       define RADEON_GMC_DST_RGB8                (9    <<  8)
+#       define RADEON_GMC_DST_VYUY                (11   <<  8)
+#       define RADEON_GMC_DST_YVYU                (12   <<  8)
+#       define RADEON_GMC_DST_AYUV444             (14   <<  8)
+#       define RADEON_GMC_DST_ARGB4444            (15   <<  8)
+#       define RADEON_GMC_DST_DATATYPE_MASK       (0x0f <<  8)
+#       define RADEON_GMC_DST_DATATYPE_SHIFT      8
+#       define RADEON_GMC_SRC_DATATYPE_MASK       (3    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_COLOR      (3    << 12)
+#       define RADEON_GMC_BYTE_PIX_ORDER          (1    << 14)
+#       define RADEON_GMC_BYTE_MSB_TO_LSB         (0    << 14)
+#       define RADEON_GMC_BYTE_LSB_TO_MSB         (1    << 14)
+#       define RADEON_GMC_CONVERSION_TEMP         (1    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_6500    (0    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_9300    (1    << 15)
+#       define RADEON_GMC_ROP3_MASK               (0xff << 16)
+#       define RADEON_DP_SRC_SOURCE_MASK          (7    << 24)
+#       define RADEON_DP_SRC_SOURCE_MEMORY        (2    << 24)
+#       define RADEON_DP_SRC_SOURCE_HOST_DATA     (3    << 24)
+#       define RADEON_GMC_3D_FCN_EN               (1    << 27)
+#       define RADEON_GMC_CLR_CMP_CNTL_DIS        (1    << 28)
+#       define RADEON_GMC_AUX_CLIP_DIS            (1    << 29)
+#       define RADEON_GMC_WR_MSK_DIS              (1    << 30)
+#       define RADEON_GMC_LD_BRUSH_Y_X            (1    << 31)
+#       define RADEON_ROP3_ZERO             0x00000000
+#       define RADEON_ROP3_DSa              0x00880000
+#       define RADEON_ROP3_SDna             0x00440000
+#       define RADEON_ROP3_S                0x00cc0000
+#       define RADEON_ROP3_DSna             0x00220000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DSx              0x00660000
+#       define RADEON_ROP3_DSo              0x00ee0000
+#       define RADEON_ROP3_DSon             0x00110000
+#       define RADEON_ROP3_DSxn             0x00990000
+#       define RADEON_ROP3_Dn               0x00550000
+#       define RADEON_ROP3_SDno             0x00dd0000
+#       define RADEON_ROP3_Sn               0x00330000
+#       define RADEON_ROP3_DSno             0x00bb0000
+#       define RADEON_ROP3_DSan             0x00770000
+#       define RADEON_ROP3_ONE              0x00ff0000
+#       define RADEON_ROP3_DPa              0x00a00000
+#       define RADEON_ROP3_PDna             0x00500000
+#       define RADEON_ROP3_P                0x00f00000
+#       define RADEON_ROP3_DPna             0x000a0000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DPx              0x005a0000
+#       define RADEON_ROP3_DPo              0x00fa0000
+#       define RADEON_ROP3_DPon             0x00050000
+#       define RADEON_ROP3_PDxn             0x00a50000
+#       define RADEON_ROP3_PDno             0x00f50000
+#       define RADEON_ROP3_Pn               0x000f0000
+#       define RADEON_ROP3_DPno             0x00af0000
+#       define RADEON_ROP3_DPan             0x005f0000
+#define RADEON_DP_GUI_MASTER_CNTL_C         0x1c84
+#define RADEON_DP_MIX                       0x16c8
+#define RADEON_DP_SRC_BKGD_CLR              0x15dc
+#define RADEON_DP_SRC_FRGD_CLR              0x15d8
+#define RADEON_DP_WRITE_MASK                0x16cc
+#define RADEON_DST_BRES_DEC                 0x1630
+#define RADEON_DST_BRES_ERR                 0x1628
+#define RADEON_DST_BRES_INC                 0x162c
+#define RADEON_DST_BRES_LNTH                0x1634
+#define RADEON_DST_BRES_LNTH_SUB            0x1638
+#define RADEON_DST_HEIGHT                   0x1410
+#define RADEON_DST_HEIGHT_WIDTH             0x143c
+#define RADEON_DST_HEIGHT_WIDTH_8           0x158c
+#define RADEON_DST_HEIGHT_WIDTH_BW          0x15b4
+#define RADEON_DST_HEIGHT_Y                 0x15a0
+#define RADEON_DST_LINE_START               0x1600
+#define RADEON_DST_LINE_END                 0x1604
+#define RADEON_DST_LINE_PATCOUNT            0x1608
+#       define RADEON_BRES_CNTL_SHIFT       8
+#define RADEON_DST_OFFSET                   0x1404
+#define RADEON_DST_PITCH                    0x1408
+#define RADEON_DST_PITCH_OFFSET             0x142c
+#define RADEON_DST_PITCH_OFFSET_C           0x1c80
+#       define RADEON_PITCH_SHIFT           21
+#       define RADEON_DST_TILE_LINEAR       (0 << 30)
+#       define RADEON_DST_TILE_MACRO        (1 << 30)
+#       define RADEON_DST_TILE_MICRO        (2 << 30)
+#       define RADEON_DST_TILE_BOTH         (3 << 30)
+#define RADEON_DST_WIDTH                    0x140c
+#define RADEON_DST_WIDTH_HEIGHT             0x1598
+#define RADEON_DST_WIDTH_X                  0x1588
+#define RADEON_DST_WIDTH_X_INCY             0x159c
+#define RADEON_DST_X                        0x141c
+#define RADEON_DST_X_SUB                    0x15a4
+#define RADEON_DST_X_Y                      0x1594
+#define RADEON_DST_Y                        0x1420
+#define RADEON_DST_Y_SUB                    0x15a8
+#define RADEON_DST_Y_X                      0x1438
+
+#define RADEON_FCP_CNTL                     0x0910
+#      define RADEON_FCP0_SRC_PCICLK             0
+#      define RADEON_FCP0_SRC_PCLK               1
+#      define RADEON_FCP0_SRC_PCLKb              2
+#      define RADEON_FCP0_SRC_HREF               3
+#      define RADEON_FCP0_SRC_GND                4
+#      define RADEON_FCP0_SRC_HREFb              5
+#define RADEON_FLUSH_1                      0x1704
+#define RADEON_FLUSH_2                      0x1708
+#define RADEON_FLUSH_3                      0x170c
+#define RADEON_FLUSH_4                      0x1710
+#define RADEON_FLUSH_5                      0x1714
+#define RADEON_FLUSH_6                      0x1718
+#define RADEON_FLUSH_7                      0x171c
+#define RADEON_FOG_3D_TABLE_START           0x1810
+#define RADEON_FOG_3D_TABLE_END             0x1814
+#define RADEON_FOG_3D_TABLE_DENSITY         0x181c
+#define RADEON_FOG_TABLE_INDEX              0x1a14
+#define RADEON_FOG_TABLE_DATA               0x1a18
+#define RADEON_FP_CRTC_H_TOTAL_DISP         0x0250
+#define RADEON_FP_CRTC_V_TOTAL_DISP         0x0254
+#       define RADEON_FP_CRTC_H_TOTAL_MASK      0x000003ff
+#       define RADEON_FP_CRTC_H_DISP_MASK       0x01ff0000
+#       define RADEON_FP_CRTC_V_TOTAL_MASK      0x00000fff
+#       define RADEON_FP_CRTC_V_DISP_MASK       0x0fff0000
+#       define RADEON_FP_H_SYNC_STRT_CHAR_MASK  0x00001ff8
+#       define RADEON_FP_H_SYNC_WID_MASK        0x003f0000
+#       define RADEON_FP_V_SYNC_STRT_MASK       0x00000fff
+#       define RADEON_FP_V_SYNC_WID_MASK        0x001f0000
+#       define RADEON_FP_CRTC_H_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_H_DISP_SHIFT      0x00000010
+#       define RADEON_FP_CRTC_V_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_V_DISP_SHIFT      0x00000010
+#       define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
+#       define RADEON_FP_H_SYNC_WID_SHIFT       0x00000010
+#       define RADEON_FP_V_SYNC_STRT_SHIFT      0x00000000
+#       define RADEON_FP_V_SYNC_WID_SHIFT       0x00000010
+#define RADEON_FP_GEN_CNTL                  0x0284
+#       define RADEON_FP_FPON                  (1 <<  0)
+#       define RADEON_FP_BLANK_EN              (1 <<  1)
+#       define RADEON_FP_TMDS_EN               (1 <<  2)
+#       define RADEON_FP_PANEL_FORMAT          (1 <<  3)
+#       define RADEON_FP_EN_TMDS               (1 <<  7)
+#       define RADEON_FP_DETECT_SENSE          (1 <<  8)
+#       define RADEON_FP_DETECT_INT_POL        (1 <<  9)
+#       define R200_FP_SOURCE_SEL_MASK         (3 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC1        (0 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC2        (1 <<  10)
+#       define R200_FP_SOURCE_SEL_RMX          (2 <<  10)
+#       define R200_FP_SOURCE_SEL_TRANS        (3 <<  10)
+#       define RADEON_FP_SEL_CRTC1             (0 << 13)
+#       define RADEON_FP_SEL_CRTC2             (1 << 13)
+#       define R300_HPD_SEL(x)                 ((x) << 13)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+#       define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+#       define RADEON_FP_CRTC_USE_SHADOW_VEND  (1 << 18)
+#       define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
+#       define RADEON_FP_DFP_SYNC_SEL          (1 << 21)
+#       define RADEON_FP_CRTC_LOCK_8DOT        (1 << 22)
+#       define RADEON_FP_CRT_SYNC_SEL          (1 << 23)
+#       define RADEON_FP_USE_SHADOW_EN         (1 << 24)
+#       define RADEON_FP_CRT_SYNC_ALT          (1 << 26)
+#define RADEON_FP2_GEN_CNTL                 0x0288
+#       define RADEON_FP2_BLANK_EN             (1 <<  1)
+#       define RADEON_FP2_ON                   (1 <<  2)
+#       define RADEON_FP2_PANEL_FORMAT         (1 <<  3)
+#       define RADEON_FP2_DETECT_SENSE         (1 <<  8)
+#       define RADEON_FP2_DETECT_INT_POL       (1 <<  9)
+#       define R200_FP2_SOURCE_SEL_MASK        (3 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define R200_FP2_SOURCE_SEL_RMX         (2 << 10)
+#       define R200_FP2_SOURCE_SEL_TRANS_UNIT  (3 << 10)
+#       define RADEON_FP2_SRC_SEL_MASK         (3 << 13)
+#       define RADEON_FP2_SRC_SEL_CRTC2        (1 << 13)
+#       define RADEON_FP2_FP_POL               (1 << 16)
+#       define RADEON_FP2_LP_POL               (1 << 17)
+#       define RADEON_FP2_SCK_POL              (1 << 18)
+#       define RADEON_FP2_LCD_CNTL_MASK        (7 << 19)
+#       define RADEON_FP2_PAD_FLOP_EN          (1 << 22)
+#       define RADEON_FP2_CRC_EN               (1 << 23)
+#       define RADEON_FP2_CRC_READ_EN          (1 << 24)
+#       define RADEON_FP2_DVO_EN               (1 << 25)
+#       define RADEON_FP2_DVO_RATE_SEL_SDR     (1 << 26)
+#       define R200_FP2_DVO_RATE_SEL_SDR       (1 << 27)
+#       define R300_FP2_DVO_CLOCK_MODE_SINGLE  (1 << 28)
+#       define R300_FP2_DVO_DUAL_CHANNEL_EN    (1 << 29)
+#define RADEON_FP_H_SYNC_STRT_WID           0x02c4
+#define RADEON_FP_H2_SYNC_STRT_WID          0x03c4
+#define RADEON_FP_HORZ_STRETCH              0x028c
+#define RADEON_FP_HORZ2_STRETCH             0x038c
+#       define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
+#       define RADEON_HORZ_STRETCH_RATIO_MAX  4096
+#       define RADEON_HORZ_PANEL_SIZE         (0x1ff   << 16)
+#       define RADEON_HORZ_PANEL_SHIFT        16
+#       define RADEON_HORZ_STRETCH_PIXREP     (0      << 25)
+#       define RADEON_HORZ_STRETCH_BLEND      (1      << 26)
+#       define RADEON_HORZ_STRETCH_ENABLE     (1      << 25)
+#       define RADEON_HORZ_AUTO_RATIO         (1      << 27)
+#       define RADEON_HORZ_FP_LOOP_STRETCH    (0x7    << 28)
+#       define RADEON_HORZ_AUTO_RATIO_INC     (1      << 31)
+#define RADEON_FP_HORZ_VERT_ACTIVE          0x0278
+#define RADEON_FP_V_SYNC_STRT_WID           0x02c8
+#define RADEON_FP_VERT_STRETCH              0x0290
+#define RADEON_FP_V2_SYNC_STRT_WID          0x03c8
+#define RADEON_FP_VERT2_STRETCH             0x0390
+#       define RADEON_VERT_PANEL_SIZE          (0xfff << 12)
+#       define RADEON_VERT_PANEL_SHIFT         12
+#       define RADEON_VERT_STRETCH_RATIO_MASK  0xfff
+#       define RADEON_VERT_STRETCH_RATIO_SHIFT 0
+#       define RADEON_VERT_STRETCH_RATIO_MAX   4096
+#       define RADEON_VERT_STRETCH_ENABLE      (1     << 25)
+#       define RADEON_VERT_STRETCH_LINEREP     (0     << 26)
+#       define RADEON_VERT_STRETCH_BLEND       (1     << 26)
+#       define RADEON_VERT_AUTO_RATIO_EN       (1     << 27)
+#	define RADEON_VERT_AUTO_RATIO_INC      (1     << 31)
+#       define RADEON_VERT_STRETCH_RESERVED    0x71000000
+#define RS400_FP_2ND_GEN_CNTL               0x0384
+#       define RS400_FP_2ND_ON              (1 << 0)
+#       define RS400_FP_2ND_BLANK_EN        (1 << 1)
+#       define RS400_TMDS_2ND_EN            (1 << 2)
+#       define RS400_PANEL_FORMAT_2ND       (1 << 3)
+#       define RS400_FP_2ND_EN_TMDS         (1 << 7)
+#       define RS400_FP_2ND_DETECT_SENSE    (1 << 8)
+#       define RS400_FP_2ND_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP_2ND_DETECT_EN       (1 << 12)
+#       define RS400_HPD_2ND_SEL            (1 << 13)
+#define RS400_FP2_2_GEN_CNTL                0x0388
+#       define RS400_FP2_2_BLANK_EN         (1 << 1)
+#       define RS400_FP2_2_ON               (1 << 2)
+#       define RS400_FP2_2_PANEL_FORMAT     (1 << 3)
+#       define RS400_FP2_2_DETECT_SENSE     (1 << 8)
+#       define RS400_FP2_2_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP2_2_DVO2_EN          (1 << 25)
+#define RS400_TMDS2_CNTL                    0x0394
+#define RS400_TMDS2_TRANSMITTER_CNTL        0x03a4
+#       define RS400_TMDS2_PLLEN            (1 << 0)
+#       define RS400_TMDS2_PLLRST           (1 << 1)
+
+#define RADEON_GEN_INT_CNTL                 0x0040
+#	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_FP_DETECT_MASK		(1 << 4)
+#	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_FP2_DETECT_MASK		(1 << 10)
+#	define RADEON_GUI_IDLE_MASK		(1 << 19)
+#	define RADEON_SW_INT_ENABLE		(1 << 25)
+#define RADEON_GEN_INT_STATUS               0x0044
+#	define AVIVO_DISPLAY_INT_STATUS		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_FP_DETECT_STAT		(1 << 4)
+#	define RADEON_FP_DETECT_STAT_ACK	(1 << 4)
+#	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+#	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_FP2_DETECT_STAT		(1 << 10)
+#	define RADEON_FP2_DETECT_STAT_ACK	(1 << 10)
+#	define RADEON_GUI_IDLE_STAT		(1 << 19)
+#	define RADEON_GUI_IDLE_STAT_ACK		(1 << 19)
+#	define RADEON_SW_INT_FIRE		(1 << 26)
+#	define RADEON_SW_INT_TEST		(1 << 25)
+#	define RADEON_SW_INT_TEST_ACK		(1 << 25)
+#define RADEON_GENENB                       0x03c3 /* VGA */
+#define RADEON_GENFC_RD                     0x03ca /* VGA */
+#define RADEON_GENFC_WT                     0x03da /* VGA, 0x03ba */
+#define RADEON_GENMO_RD                     0x03cc /* VGA */
+#define RADEON_GENMO_WT                     0x03c2 /* VGA */
+#define RADEON_GENS0                        0x03c2 /* VGA */
+#define RADEON_GENS1                        0x03da /* VGA, 0x03ba */
+#define RADEON_GPIO_MONID                   0x0068 /* DDC interface via I2C */ /* DDC3 */
+#define RADEON_GPIO_MONIDB                  0x006c
+#define RADEON_GPIO_CRT2_DDC                0x006c
+#define RADEON_GPIO_DVI_DDC                 0x0064 /* DDC2 */
+#define RADEON_GPIO_VGA_DDC                 0x0060 /* DDC1 */
+#       define RADEON_GPIO_A_0              (1 <<  0)
+#       define RADEON_GPIO_A_1              (1 <<  1)
+#       define RADEON_GPIO_Y_0              (1 <<  8)
+#       define RADEON_GPIO_Y_1              (1 <<  9)
+#       define RADEON_GPIO_Y_SHIFT_0        8
+#       define RADEON_GPIO_Y_SHIFT_1        9
+#       define RADEON_GPIO_EN_0             (1 << 16)
+#       define RADEON_GPIO_EN_1             (1 << 17)
+#       define RADEON_GPIO_MASK_0           (1 << 24) /*??*/
+#       define RADEON_GPIO_MASK_1           (1 << 25) /*??*/
+#define RADEON_GRPH8_DATA                   0x03cf /* VGA */
+#define RADEON_GRPH8_IDX                    0x03ce /* VGA */
+#define RADEON_GUI_SCRATCH_REG0             0x15e0
+#define RADEON_GUI_SCRATCH_REG1             0x15e4
+#define RADEON_GUI_SCRATCH_REG2             0x15e8
+#define RADEON_GUI_SCRATCH_REG3             0x15ec
+#define RADEON_GUI_SCRATCH_REG4             0x15f0
+#define RADEON_GUI_SCRATCH_REG5             0x15f4
+
+#define RADEON_HEADER                       0x0f0e /* PCI */
+#define RADEON_HOST_DATA0                   0x17c0
+#define RADEON_HOST_DATA1                   0x17c4
+#define RADEON_HOST_DATA2                   0x17c8
+#define RADEON_HOST_DATA3                   0x17cc
+#define RADEON_HOST_DATA4                   0x17d0
+#define RADEON_HOST_DATA5                   0x17d4
+#define RADEON_HOST_DATA6                   0x17d8
+#define RADEON_HOST_DATA7                   0x17dc
+#define RADEON_HOST_DATA_LAST               0x17e0
+#define RADEON_HOST_PATH_CNTL               0x0130
+#	define RADEON_HP_LIN_RD_CACHE_DIS   (1 << 24)
+#	define RADEON_HDP_READ_BUFFER_INVALIDATE   (1 << 27)
+#       define RADEON_HDP_SOFT_RESET        (1 << 26)
+#       define RADEON_HDP_APER_CNTL         (1 << 23)
+#define RADEON_HTOTAL_CNTL                  0x0009 /* PLL */
+#       define RADEON_HTOT_CNTL_VGA_EN      (1 << 28)
+#define RADEON_HTOTAL2_CNTL                 0x002e /* PLL */
+
+       /* Multimedia I2C bus */
+#define RADEON_I2C_CNTL_0		    0x0090
+#       define RADEON_I2C_DONE              (1 << 0)
+#       define RADEON_I2C_NACK              (1 << 1)
+#       define RADEON_I2C_HALT              (1 << 2)
+#       define RADEON_I2C_SOFT_RST          (1 << 5)
+#       define RADEON_I2C_DRIVE_EN          (1 << 6)
+#       define RADEON_I2C_DRIVE_SEL         (1 << 7)
+#       define RADEON_I2C_START             (1 << 8)
+#       define RADEON_I2C_STOP              (1 << 9)
+#       define RADEON_I2C_RECEIVE           (1 << 10)
+#       define RADEON_I2C_ABORT             (1 << 11)
+#       define RADEON_I2C_GO                (1 << 12)
+#       define RADEON_I2C_PRESCALE_SHIFT    16
+#define RADEON_I2C_CNTL_1                   0x0094
+#       define RADEON_I2C_DATA_COUNT_SHIFT  0
+#       define RADEON_I2C_ADDR_COUNT_SHIFT  4
+#       define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT   8
+#       define RADEON_I2C_SEL               (1 << 16)
+#       define RADEON_I2C_EN                (1 << 17)
+#       define RADEON_I2C_TIME_LIMIT_SHIFT  24
+#define RADEON_I2C_DATA			    0x0098
+
+#define RADEON_DVI_I2C_CNTL_0		    0x02e0
+#       define R200_DVI_I2C_PIN_SEL(x)      ((x) << 3)
+#       define R200_SEL_DDC1                0 /* depends on asic */
+#       define R200_SEL_DDC2                1 /* depends on asic */
+#       define R200_SEL_DDC3                2 /* depends on asic */
+#	define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
+#	define RADEON_SW_CAN_USE_DVI_I2C      (1 << 13)
+#	define RADEON_SW_DONE_USING_DVI_I2C   (1 << 14)
+#	define RADEON_HW_NEEDS_DVI_I2C        (1 << 14)
+#	define RADEON_ABORT_HW_DVI_I2C        (1 << 15)
+#	define RADEON_HW_USING_DVI_I2C        (1 << 15)
+#define RADEON_DVI_I2C_CNTL_1               0x02e4
+#define RADEON_DVI_I2C_DATA		    0x02e8
+
+#define RADEON_INTERRUPT_LINE               0x0f3c /* PCI */
+#define RADEON_INTERRUPT_PIN                0x0f3d /* PCI */
+#define RADEON_IO_BASE                      0x0f14 /* PCI */
+
+#define RADEON_LATENCY                      0x0f0d /* PCI */
+#define RADEON_LEAD_BRES_DEC                0x1608
+#define RADEON_LEAD_BRES_LNTH               0x161c
+#define RADEON_LEAD_BRES_LNTH_SUB           0x1624
+#define RADEON_LVDS_GEN_CNTL                0x02d0
+#       define RADEON_LVDS_ON               (1   <<  0)
+#       define RADEON_LVDS_DISPLAY_DIS      (1   <<  1)
+#       define RADEON_LVDS_PANEL_TYPE       (1   <<  2)
+#       define RADEON_LVDS_PANEL_FORMAT     (1   <<  3)
+#       define RADEON_LVDS_NO_FM            (0   <<  4)
+#       define RADEON_LVDS_2_GREY           (1   <<  4)
+#       define RADEON_LVDS_4_GREY           (2   <<  4)
+#       define RADEON_LVDS_RST_FM           (1   <<  6)
+#       define RADEON_LVDS_EN               (1   <<  7)
+#       define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
+#       define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
+#       define RADEON_LVDS_BL_MOD_EN        (1   << 16)
+#       define RADEON_LVDS_BL_CLK_SEL       (1   << 17)
+#       define RADEON_LVDS_DIGON            (1   << 18)
+#       define RADEON_LVDS_BLON             (1   << 19)
+#       define RADEON_LVDS_FP_POL_LOW       (1   << 20)
+#       define RADEON_LVDS_LP_POL_LOW       (1   << 21)
+#       define RADEON_LVDS_DTM_POL_LOW      (1   << 22)
+#       define RADEON_LVDS_SEL_CRTC2        (1   << 23)
+#       define RADEON_LVDS_FPDI_EN          (1   << 27)
+#       define RADEON_LVDS_HSYNC_DELAY_SHIFT        28
+#define RADEON_LVDS_PLL_CNTL                0x02d4
+#       define RADEON_HSYNC_DELAY_SHIFT     28
+#       define RADEON_HSYNC_DELAY_MASK      (0xf << 28)
+#       define RADEON_LVDS_PLL_EN           (1   << 16)
+#       define RADEON_LVDS_PLL_RESET        (1   << 17)
+#       define R300_LVDS_SRC_SEL_MASK       (3   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC1      (0   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC2      (1   << 18)
+#       define R300_LVDS_SRC_SEL_RMX        (2   << 18)
+#define RADEON_LVDS_SS_GEN_CNTL             0x02ec
+#       define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT     16
+#       define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT     20
+
+#define RADEON_MAX_LATENCY                  0x0f3f /* PCI */
+#define RADEON_DISPLAY_BASE_ADDR            0x23c
+#define RADEON_DISPLAY2_BASE_ADDR           0x33c
+#define RADEON_OV0_BASE_ADDR                0x43c
+#define RADEON_NB_TOM                       0x15c
+#define R300_MC_INIT_MISC_LAT_TIMER         0x180
+#       define R300_MC_DISP0R_INIT_LAT_SHIFT 8
+#       define R300_MC_DISP0R_INIT_LAT_MASK  0xf
+#       define R300_MC_DISP1R_INIT_LAT_SHIFT 12
+#       define R300_MC_DISP1R_INIT_LAT_MASK  0xf
+#define RADEON_MCLK_CNTL                    0x0012 /* PLL */
+#       define RADEON_MCLKA_SRC_SEL_MASK    0x7
+#       define RADEON_FORCEON_MCLKA         (1 << 16)
+#       define RADEON_FORCEON_MCLKB         (1 << 17)
+#       define RADEON_FORCEON_YCLKA         (1 << 18)
+#       define RADEON_FORCEON_YCLKB         (1 << 19)
+#       define RADEON_FORCEON_MC            (1 << 20)
+#       define RADEON_FORCEON_AIC           (1 << 21)
+#       define R300_DISABLE_MC_MCLKA        (1 << 21)
+#       define R300_DISABLE_MC_MCLKB        (1 << 21)
+#define RADEON_MCLK_MISC                    0x001f /* PLL */
+#       define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
+#       define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
+#       define RADEON_MC_MCLK_DYN_ENABLE    (1 << 14)
+#       define RADEON_IO_MCLK_DYN_ENABLE    (1 << 15)
+
+#define RADEON_GPIOPAD_MASK                 0x0198
+#define RADEON_GPIOPAD_A		    0x019c
+#define RADEON_GPIOPAD_EN                   0x01a0
+#define RADEON_GPIOPAD_Y                    0x01a4
+#define RADEON_MDGPIO_MASK                  0x01a8
+#define RADEON_MDGPIO_A                     0x01ac
+#define RADEON_MDGPIO_EN                    0x01b0
+#define RADEON_MDGPIO_Y                     0x01b4
+
+#define RADEON_MEM_ADDR_CONFIG              0x0148
+#define RADEON_MEM_BASE                     0x0f10 /* PCI */
+#define RADEON_MEM_CNTL                     0x0140
+#       define RADEON_MEM_NUM_CHANNELS_MASK 0x01
+#       define RADEON_MEM_USE_B_CH_ONLY     (1 <<  1)
+#       define RV100_HALF_MODE              (1 <<  3)
+#       define R300_MEM_NUM_CHANNELS_MASK   0x03
+#       define R300_MEM_USE_CD_CH_ONLY      (1 <<  2)
+#define RADEON_MEM_TIMING_CNTL              0x0144 /* EXT_MEM_CNTL */
+#define RADEON_MEM_INIT_LAT_TIMER           0x0154
+#define RADEON_MEM_INTF_CNTL                0x014c
+#define RADEON_MEM_SDRAM_MODE_REG           0x0158
+#       define RADEON_SDRAM_MODE_MASK       0xffff0000
+#       define RADEON_B3MEM_RESET_MASK      0x6fffffff
+#       define RADEON_MEM_CFG_TYPE_DDR      (1 << 30)
+#define RADEON_MEM_STR_CNTL                 0x0150
+#       define RADEON_MEM_PWRUP_COMPL_A     (1 <<  0)
+#       define RADEON_MEM_PWRUP_COMPL_B     (1 <<  1)
+#       define R300_MEM_PWRUP_COMPL_C       (1 <<  2)
+#       define R300_MEM_PWRUP_COMPL_D       (1 <<  3)
+#       define RADEON_MEM_PWRUP_COMPLETE    0x03
+#       define R300_MEM_PWRUP_COMPLETE      0x0f
+#define RADEON_MC_STATUS                    0x0150
+#       define RADEON_MC_IDLE               (1 << 2)
+#       define R300_MC_IDLE                 (1 << 4)
+#define RADEON_MEM_VGA_RP_SEL               0x003c
+#define RADEON_MEM_VGA_WP_SEL               0x0038
+#define RADEON_MIN_GRANT                    0x0f3e /* PCI */
+#define RADEON_MM_DATA                      0x0004
+#define RADEON_MM_INDEX                     0x0000
+#	define RADEON_MM_APER		(1 << 31)
+#define RADEON_MPLL_CNTL                    0x000e /* PLL */
+#define RADEON_MPP_TB_CONFIG                0x01c0 /* ? */
+#define RADEON_MPP_GP_CONFIG                0x01c8 /* ? */
+#define RADEON_SEPROM_CNTL1                 0x01c0
+#       define RADEON_SCK_PRESCALE_SHIFT    24
+#       define RADEON_SCK_PRESCALE_MASK     (0xff << 24)
+#define R300_MC_IND_INDEX                   0x01f8
+#       define R300_MC_IND_ADDR_MASK        0x3f
+#       define R300_MC_IND_WR_EN            (1 << 8)
+#define R300_MC_IND_DATA                    0x01fc
+#define R300_MC_READ_CNTL_AB                0x017c
+#       define R300_MEM_RBS_POSITION_A_MASK 0x03
+#define R300_MC_READ_CNTL_CD_mcind	    0x24
+#       define R300_MEM_RBS_POSITION_C_MASK 0x03
+
+#define RADEON_N_VIF_COUNT                  0x0248
+
+#define RADEON_OV0_AUTO_FLIP_CNTL           0x0470
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM        0x00000007
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD   0x00000008
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD        0x00000010
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE     0x00000040
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT     0x00000300
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN  0x00010000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN     0x00040000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN      0x00080000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE    0x00800000
+
+#define RADEON_OV0_COLOUR_CNTL              0x04E0
+#define RADEON_OV0_DEINTERLACE_PATTERN      0x0474
+#define RADEON_OV0_EXCLUSIVE_HORZ           0x0408
+#       define  RADEON_EXCL_HORZ_START_MASK        0x000000ff
+#       define  RADEON_EXCL_HORZ_END_MASK          0x0000ff00
+#       define  RADEON_EXCL_HORZ_BACK_PORCH_MASK   0x00ff0000
+#       define  RADEON_EXCL_HORZ_EXCLUSIVE_EN      0x80000000
+#define RADEON_OV0_EXCLUSIVE_VERT           0x040C
+#       define  RADEON_EXCL_VERT_START_MASK        0x000003ff
+#       define  RADEON_EXCL_VERT_END_MASK          0x03ff0000
+#define RADEON_OV0_FILTER_CNTL              0x04A0
+#       define RADEON_FILTER_PROGRAMMABLE_COEF            0x0
+#       define RADEON_FILTER_HC_COEF_HORZ_Y               0x1
+#       define RADEON_FILTER_HC_COEF_HORZ_UV              0x2
+#       define RADEON_FILTER_HC_COEF_VERT_Y               0x4
+#       define RADEON_FILTER_HC_COEF_VERT_UV              0x8
+#       define RADEON_FILTER_HARDCODED_COEF               0xf
+#       define RADEON_FILTER_COEF_MASK                    0xf
+
+#define RADEON_OV0_FOUR_TAP_COEF_0          0x04B0
+#define RADEON_OV0_FOUR_TAP_COEF_1          0x04B4
+#define RADEON_OV0_FOUR_TAP_COEF_2          0x04B8
+#define RADEON_OV0_FOUR_TAP_COEF_3          0x04BC
+#define RADEON_OV0_FOUR_TAP_COEF_4          0x04C0
+#define RADEON_OV0_FLAG_CNTL                0x04DC
+#define RADEON_OV0_GAMMA_000_00F            0x0d40
+#define RADEON_OV0_GAMMA_010_01F            0x0d44
+#define RADEON_OV0_GAMMA_020_03F            0x0d48
+#define RADEON_OV0_GAMMA_040_07F            0x0d4c
+#define RADEON_OV0_GAMMA_080_0BF            0x0e00
+#define RADEON_OV0_GAMMA_0C0_0FF            0x0e04
+#define RADEON_OV0_GAMMA_100_13F            0x0e08
+#define RADEON_OV0_GAMMA_140_17F            0x0e0c
+#define RADEON_OV0_GAMMA_180_1BF            0x0e10
+#define RADEON_OV0_GAMMA_1C0_1FF            0x0e14
+#define RADEON_OV0_GAMMA_200_23F            0x0e18
+#define RADEON_OV0_GAMMA_240_27F            0x0e1c
+#define RADEON_OV0_GAMMA_280_2BF            0x0e20
+#define RADEON_OV0_GAMMA_2C0_2FF            0x0e24
+#define RADEON_OV0_GAMMA_300_33F            0x0e28
+#define RADEON_OV0_GAMMA_340_37F            0x0e2c
+#define RADEON_OV0_GAMMA_380_3BF            0x0d50
+#define RADEON_OV0_GAMMA_3C0_3FF            0x0d54
+#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW     0x04EC
+#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH    0x04F0
+#define RADEON_OV0_H_INC                    0x0480
+#define RADEON_OV0_KEY_CNTL                 0x04F4
+#       define  RADEON_VIDEO_KEY_FN_MASK    0x00000003L
+#       define  RADEON_VIDEO_KEY_FN_FALSE   0x00000000L
+#       define  RADEON_VIDEO_KEY_FN_TRUE    0x00000001L
+#       define  RADEON_VIDEO_KEY_FN_EQ      0x00000002L
+#       define  RADEON_VIDEO_KEY_FN_NE      0x00000003L
+#       define  RADEON_GRAPHIC_KEY_FN_MASK  0x00000030L
+#       define  RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
+#       define  RADEON_GRAPHIC_KEY_FN_TRUE  0x00000010L
+#       define  RADEON_GRAPHIC_KEY_FN_EQ    0x00000020L
+#       define  RADEON_GRAPHIC_KEY_FN_NE    0x00000030L
+#       define  RADEON_CMP_MIX_MASK         0x00000100L
+#       define  RADEON_CMP_MIX_OR           0x00000000L
+#       define  RADEON_CMP_MIX_AND          0x00000100L
+#define RADEON_OV0_LIN_TRANS_A              0x0d20
+#define RADEON_OV0_LIN_TRANS_B              0x0d24
+#define RADEON_OV0_LIN_TRANS_C              0x0d28
+#define RADEON_OV0_LIN_TRANS_D              0x0d2c
+#define RADEON_OV0_LIN_TRANS_E              0x0d30
+#define RADEON_OV0_LIN_TRANS_F              0x0d34
+#define RADEON_OV0_P1_BLANK_LINES_AT_TOP    0x0430
+#       define  RADEON_P1_BLNK_LN_AT_TOP_M1_MASK   0x00000fffL
+#       define  RADEON_P1_ACTIVE_LINES_M1          0x0fff0000L
+#define RADEON_OV0_P1_H_ACCUM_INIT          0x0488
+#define RADEON_OV0_P1_V_ACCUM_INIT          0x0428
+#       define  RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
+#       define  RADEON_OV0_P1_V_ACCUM_INIT_MASK    0x01ff8000L
+#define RADEON_OV0_P1_X_START_END           0x0494
+#define RADEON_OV0_P2_X_START_END           0x0498
+#define RADEON_OV0_P23_BLANK_LINES_AT_TOP   0x0434
+#       define  RADEON_P23_BLNK_LN_AT_TOP_M1_MASK  0x000007ffL
+#       define  RADEON_P23_ACTIVE_LINES_M1         0x07ff0000L
+#define RADEON_OV0_P23_H_ACCUM_INIT         0x048C
+#define RADEON_OV0_P23_V_ACCUM_INIT         0x042C
+#define RADEON_OV0_P3_X_START_END           0x049C
+#define RADEON_OV0_REG_LOAD_CNTL            0x0410
+#       define  RADEON_REG_LD_CTL_LOCK                 0x00000001L
+#       define  RADEON_REG_LD_CTL_VBLANK_DURING_LOCK   0x00000002L
+#       define  RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
+#       define  RADEON_REG_LD_CTL_LOCK_READBACK        0x00000008L
+#       define  RADEON_REG_LD_CTL_FLIP_READBACK        0x00000010L
+#define RADEON_OV0_SCALE_CNTL               0x0420
+#       define  RADEON_SCALER_HORZ_PICK_NEAREST    0x00000004L
+#       define  RADEON_SCALER_VERT_PICK_NEAREST    0x00000008L
+#       define  RADEON_SCALER_SIGNED_UV            0x00000010L
+#       define  RADEON_SCALER_GAMMA_SEL_MASK       0x00000060L
+#       define  RADEON_SCALER_GAMMA_SEL_BRIGHT     0x00000000L
+#       define  RADEON_SCALER_GAMMA_SEL_G22        0x00000020L
+#       define  RADEON_SCALER_GAMMA_SEL_G18        0x00000040L
+#       define  RADEON_SCALER_GAMMA_SEL_G14        0x00000060L
+#       define  RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
+#       define  RADEON_SCALER_SURFAC_FORMAT        0x00000f00L
+#       define  RADEON_SCALER_SOURCE_15BPP         0x00000300L
+#       define  RADEON_SCALER_SOURCE_16BPP         0x00000400L
+#       define  RADEON_SCALER_SOURCE_32BPP         0x00000600L
+#       define  RADEON_SCALER_SOURCE_YUV9          0x00000900L
+#       define  RADEON_SCALER_SOURCE_YUV12         0x00000A00L
+#       define  RADEON_SCALER_SOURCE_VYUY422       0x00000B00L
+#       define  RADEON_SCALER_SOURCE_YVYU422       0x00000C00L
+#       define  RADEON_SCALER_ADAPTIVE_DEINT       0x00001000L
+#       define  RADEON_SCALER_TEMPORAL_DEINT       0x00002000L
+#       define  RADEON_SCALER_CRTC_SEL             0x00004000L
+#       define  RADEON_SCALER_SMART_SWITCH         0x00008000L
+#       define  RADEON_SCALER_BURST_PER_PLANE      0x007F0000L
+#       define  RADEON_SCALER_DOUBLE_BUFFER        0x01000000L
+#       define  RADEON_SCALER_DIS_LIMIT            0x08000000L
+#       define  RADEON_SCALER_LIN_TRANS_BYPASS     0x10000000L
+#       define  RADEON_SCALER_INT_EMU              0x20000000L
+#       define  RADEON_SCALER_ENABLE               0x40000000L
+#       define  RADEON_SCALER_SOFT_RESET           0x80000000L
+#define RADEON_OV0_STEP_BY                  0x0484
+#define RADEON_OV0_TEST                     0x04F8
+#define RADEON_OV0_V_INC                    0x0424
+#define RADEON_OV0_VID_BUF_PITCH0_VALUE     0x0460
+#define RADEON_OV0_VID_BUF_PITCH1_VALUE     0x0464
+#define RADEON_OV0_VID_BUF0_BASE_ADRS       0x0440
+#       define  RADEON_VIF_BUF0_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF0_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF0_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF1_BASE_ADRS       0x0444
+#       define  RADEON_VIF_BUF1_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF1_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF1_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF2_BASE_ADRS       0x0448
+#       define  RADEON_VIF_BUF2_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF2_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF2_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF3_BASE_ADRS       0x044C
+#define RADEON_OV0_VID_BUF4_BASE_ADRS       0x0450
+#define RADEON_OV0_VID_BUF5_BASE_ADRS       0x0454
+#define RADEON_OV0_VIDEO_KEY_CLR_HIGH       0x04E8
+#define RADEON_OV0_VIDEO_KEY_CLR_LOW        0x04E4
+#define RADEON_OV0_Y_X_START                0x0400
+#define RADEON_OV0_Y_X_END                  0x0404
+#define RADEON_OV1_Y_X_START                0x0600
+#define RADEON_OV1_Y_X_END                  0x0604
+#define RADEON_OVR_CLR                      0x0230
+#define RADEON_OVR_WID_LEFT_RIGHT           0x0234
+#define RADEON_OVR_WID_TOP_BOTTOM           0x0238
+#define RADEON_OVR2_CLR                     0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT          0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM          0x0338
+
+/* first capture unit */
+
+#define RADEON_CAP0_BUF0_OFFSET           0x0920
+#define RADEON_CAP0_BUF1_OFFSET           0x0924
+#define RADEON_CAP0_BUF0_EVEN_OFFSET      0x0928
+#define RADEON_CAP0_BUF1_EVEN_OFFSET      0x092C
+
+#define RADEON_CAP0_BUF_PITCH             0x0930
+#define RADEON_CAP0_V_WINDOW              0x0934
+#define RADEON_CAP0_H_WINDOW              0x0938
+#define RADEON_CAP0_VBI0_OFFSET           0x093C
+#define RADEON_CAP0_VBI1_OFFSET           0x0940
+#define RADEON_CAP0_VBI_V_WINDOW          0x0944
+#define RADEON_CAP0_VBI_H_WINDOW          0x0948
+#define RADEON_CAP0_PORT_MODE_CNTL        0x094C
+#define RADEON_CAP0_TRIG_CNTL             0x0950
+#define RADEON_CAP0_DEBUG                 0x0954
+#define RADEON_CAP0_CONFIG                0x0958
+#       define RADEON_CAP0_CONFIG_CONTINUOS          0x00000001
+#       define RADEON_CAP0_CONFIG_START_FIELD_EVEN   0x00000002
+#       define RADEON_CAP0_CONFIG_START_BUF_GET      0x00000004
+#       define RADEON_CAP0_CONFIG_START_BUF_SET      0x00000008
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_ALT       0x00000010
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME     0x00000020
+#       define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
+#       define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE    0x00000080
+#       define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE    0x00000100
+#       define RADEON_CAP0_CONFIG_MIRROR_EN          0x00000200
+#       define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN  0x00000400
+#       define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV    0x00000800
+#       define RADEON_CAP0_CONFIG_ANC_DECODE_EN      0x00001000
+#       define RADEON_CAP0_CONFIG_VBI_EN             0x00002000
+#       define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN  0x00004000
+#       define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
+#       define RADEON_CAP0_CONFIG_FAKE_FIELD_EN      0x00010000
+#       define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE  0x00020000
+#       define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2      0x00080000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4      0x00100000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_2      0x00200000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_4      0x00400000
+#       define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE   0x00000000
+#       define RADEON_CAP0_CONFIG_FORMAT_CCIR656     0x00800000
+#       define RADEON_CAP0_CONFIG_FORMAT_ZV          0x01000000
+#       define RADEON_CAP0_CONFIG_FORMAT_VIP         0x01800000
+#       define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT   0x02000000
+#       define RADEON_CAP0_CONFIG_HORZ_DECIMATOR     0x04000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422   0x00000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422   0x20000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_2       0x40000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_4       0x80000000
+#define RADEON_CAP0_ANC_ODD_OFFSET        0x095C
+#define RADEON_CAP0_ANC_EVEN_OFFSET       0x0960
+#define RADEON_CAP0_ANC_H_WINDOW          0x0964
+#define RADEON_CAP0_VIDEO_SYNC_TEST       0x0968
+#define RADEON_CAP0_ONESHOT_BUF_OFFSET    0x096C
+#define RADEON_CAP0_BUF_STATUS            0x0970
+/* #define RADEON_CAP0_DWNSC_XRATIO       0x0978 */
+/* #define RADEON_CAP0_XSHARPNESS                 0x097C */
+#define RADEON_CAP0_VBI2_OFFSET           0x0980
+#define RADEON_CAP0_VBI3_OFFSET           0x0984
+#define RADEON_CAP0_ANC2_OFFSET           0x0988
+#define RADEON_CAP0_ANC3_OFFSET           0x098C
+#define RADEON_VID_BUFFER_CONTROL         0x0900
+
+/* second capture unit */
+
+#define RADEON_CAP1_BUF0_OFFSET           0x0990
+#define RADEON_CAP1_BUF1_OFFSET           0x0994
+#define RADEON_CAP1_BUF0_EVEN_OFFSET      0x0998
+#define RADEON_CAP1_BUF1_EVEN_OFFSET      0x099C
+
+#define RADEON_CAP1_BUF_PITCH             0x09A0
+#define RADEON_CAP1_V_WINDOW              0x09A4
+#define RADEON_CAP1_H_WINDOW              0x09A8
+#define RADEON_CAP1_VBI_ODD_OFFSET        0x09AC
+#define RADEON_CAP1_VBI_EVEN_OFFSET       0x09B0
+#define RADEON_CAP1_VBI_V_WINDOW                  0x09B4
+#define RADEON_CAP1_VBI_H_WINDOW                  0x09B8
+#define RADEON_CAP1_PORT_MODE_CNTL        0x09BC
+#define RADEON_CAP1_TRIG_CNTL             0x09C0
+#define RADEON_CAP1_DEBUG                         0x09C4
+#define RADEON_CAP1_CONFIG                0x09C8
+#define RADEON_CAP1_ANC_ODD_OFFSET        0x09CC
+#define RADEON_CAP1_ANC_EVEN_OFFSET       0x09D0
+#define RADEON_CAP1_ANC_H_WINDOW                  0x09D4
+#define RADEON_CAP1_VIDEO_SYNC_TEST       0x09D8
+#define RADEON_CAP1_ONESHOT_BUF_OFFSET    0x09DC
+#define RADEON_CAP1_BUF_STATUS            0x09E0
+#define RADEON_CAP1_DWNSC_XRATIO                  0x09E8
+#define RADEON_CAP1_XSHARPNESS            0x09EC
+
+/* misc multimedia registers */
+
+#define RADEON_IDCT_RUNS                  0x1F80
+#define RADEON_IDCT_LEVELS                0x1F84
+#define RADEON_IDCT_CONTROL               0x1FBC
+#define RADEON_IDCT_AUTH_CONTROL          0x1F88
+#define RADEON_IDCT_AUTH                  0x1F8C
+
+#define RADEON_P2PLL_CNTL                   0x002a /* P2PLL */
+#       define RADEON_P2PLL_RESET                (1 <<  0)
+#       define RADEON_P2PLL_SLEEP                (1 <<  1)
+#       define RADEON_P2PLL_PVG_MASK             (7 << 11)
+#       define RADEON_P2PLL_PVG_SHIFT            11
+#       define RADEON_P2PLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_P2PLL_DIV_0                  0x002c
+#       define RADEON_P2PLL_FB0_DIV_MASK    0x07ff
+#       define RADEON_P2PLL_POST0_DIV_MASK  0x00070000
+#define RADEON_P2PLL_REF_DIV                0x002B /* PLL */
+#       define RADEON_P2PLL_REF_DIV_MASK    0x03ff
+#       define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+#       define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#       define R300_PPLL_REF_DIV_ACC_MASK   (0x3ff << 18)
+#       define R300_PPLL_REF_DIV_ACC_SHIFT  18
+#define RADEON_PALETTE_DATA                 0x00b4
+#define RADEON_PALETTE_30_DATA              0x00b8
+#define RADEON_PALETTE_INDEX                0x00b0
+#define RADEON_PCI_GART_PAGE                0x017c
+#define RADEON_PIXCLKS_CNTL                 0x002d
+#       define RADEON_PIX2CLK_SRC_SEL_MASK     0x03
+#       define RADEON_PIX2CLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_PIX2CLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
+#       define RADEON_PIX2CLK_ALWAYS_ONb       (1<<6)
+#       define RADEON_PIX2CLK_DAC_ALWAYS_ONb   (1<<7)
+#       define RADEON_PIXCLK_TV_SRC_SEL        (1 << 8)
+#       define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
+#       define R300_DVOCLK_ALWAYS_ONb          (1 << 10)
+#       define RADEON_PIXCLK_BLEND_ALWAYS_ONb  (1 << 11)
+#       define RADEON_PIXCLK_GV_ALWAYS_ONb     (1 << 12)
+#       define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
+#       define R300_PIXCLK_DVO_ALWAYS_ONb      (1 << 13)
+#       define RADEON_PIXCLK_LVDS_ALWAYS_ONb   (1 << 14)
+#       define RADEON_PIXCLK_TMDS_ALWAYS_ONb   (1 << 15)
+#       define R300_PIXCLK_TRANS_ALWAYS_ONb    (1 << 16)
+#       define R300_PIXCLK_TVO_ALWAYS_ONb      (1 << 17)
+#       define R300_P2G2CLK_ALWAYS_ONb         (1 << 18)
+#       define R300_P2G2CLK_DAC_ALWAYS_ONb     (1 << 19)
+#       define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
+#define RADEON_PLANE_3D_MASK_C              0x1d44
+#define RADEON_PLL_TEST_CNTL                0x0013 /* PLL */
+#       define RADEON_PLL_MASK_READ_B          (1 << 9)
+#define RADEON_PMI_CAP_ID                   0x0f5c /* PCI */
+#define RADEON_PMI_DATA                     0x0f63 /* PCI */
+#define RADEON_PMI_NXT_CAP_PTR              0x0f5d /* PCI */
+#define RADEON_PMI_PMC_REG                  0x0f5e /* PCI */
+#define RADEON_PMI_PMCSR_REG                0x0f60 /* PCI */
+#define RADEON_PMI_REGISTER                 0x0f5c /* PCI */
+#define RADEON_PPLL_CNTL                    0x0002 /* PLL */
+#       define RADEON_PPLL_RESET                (1 <<  0)
+#       define RADEON_PPLL_SLEEP                (1 <<  1)
+#       define RADEON_PPLL_PVG_MASK             (7 << 11)
+#       define RADEON_PPLL_PVG_SHIFT            11
+#       define RADEON_PPLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_PPLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_PPLL_DIV_0                   0x0004 /* PLL */
+#define RADEON_PPLL_DIV_1                   0x0005 /* PLL */
+#define RADEON_PPLL_DIV_2                   0x0006 /* PLL */
+#define RADEON_PPLL_DIV_3                   0x0007 /* PLL */
+#       define RADEON_PPLL_FB3_DIV_MASK     0x07ff
+#       define RADEON_PPLL_POST3_DIV_MASK   0x00070000
+#define RADEON_PPLL_REF_DIV                 0x0003 /* PLL */
+#       define RADEON_PPLL_REF_DIV_MASK     0x03ff
+#       define RADEON_PPLL_ATOMIC_UPDATE_R  (1 << 15) /* same as _W */
+#       define RADEON_PPLL_ATOMIC_UPDATE_W  (1 << 15) /* same as _R */
+#define RADEON_PWR_MNGMT_CNTL_STATUS        0x0f60 /* PCI */
+
+#define RADEON_RBBM_GUICNTL                 0x172c
+#       define RADEON_HOST_DATA_SWAP_NONE   (0 << 0)
+#       define RADEON_HOST_DATA_SWAP_16BIT  (1 << 0)
+#       define RADEON_HOST_DATA_SWAP_32BIT  (2 << 0)
+#       define RADEON_HOST_DATA_SWAP_HDW    (3 << 0)
+#define RADEON_RBBM_SOFT_RESET              0x00f0
+#       define RADEON_SOFT_RESET_CP         (1 <<  0)
+#       define RADEON_SOFT_RESET_HI         (1 <<  1)
+#       define RADEON_SOFT_RESET_SE         (1 <<  2)
+#       define RADEON_SOFT_RESET_RE         (1 <<  3)
+#       define RADEON_SOFT_RESET_PP         (1 <<  4)
+#       define RADEON_SOFT_RESET_E2         (1 <<  5)
+#       define RADEON_SOFT_RESET_RB         (1 <<  6)
+#       define RADEON_SOFT_RESET_HDP        (1 <<  7)
+#define RADEON_RBBM_STATUS                  0x0e40
+#       define RADEON_RBBM_FIFOCNT_MASK     0x007f
+#       define RADEON_RBBM_ACTIVE           (1 << 31)
+#define RADEON_RB2D_DSTCACHE_CTLSTAT        0x342c
+#       define RADEON_RB2D_DC_FLUSH         (3 << 0)
+#       define RADEON_RB2D_DC_FREE          (3 << 2)
+#       define RADEON_RB2D_DC_FLUSH_ALL     0xf
+#       define RADEON_RB2D_DC_BUSY          (1 << 31)
+#define RADEON_RB2D_DSTCACHE_MODE           0x3428
+#define RADEON_DSTCACHE_CTLSTAT             0x1714
+
+#define RADEON_RB3D_ZCACHE_MODE             0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT          0x3254
+#       define RADEON_RB3D_ZC_FLUSH_ALL     0x5
+#define RADEON_RB3D_DSTCACHE_MODE           0x3258
+# define RADEON_RB3D_DC_CACHE_ENABLE            (0)
+# define RADEON_RB3D_DC_2D_CACHE_DISABLE        (1)
+# define RADEON_RB3D_DC_3D_CACHE_DISABLE        (2)
+# define RADEON_RB3D_DC_CACHE_DISABLE           (3)
+# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128   (1 << 2)
+# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128   (2 << 2)
+# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH      (1 << 8)
+# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH      (2 << 8)
+# define R200_RB3D_DC_2D_CACHE_AUTOFREE         (1 << 10)
+# define R200_RB3D_DC_3D_CACHE_AUTOFREE         (2 << 10)
+# define RADEON_RB3D_DC_FORCE_RMW               (1 << 16)
+# define RADEON_RB3D_DC_DISABLE_RI_FILL         (1 << 24)
+# define RADEON_RB3D_DC_DISABLE_RI_READ         (1 << 25)
+
+#define RADEON_RB3D_DSTCACHE_CTLSTAT            0x325C
+# define RADEON_RB3D_DC_FLUSH                   (3 << 0)
+# define RADEON_RB3D_DC_FREE                    (3 << 2)
+# define RADEON_RB3D_DC_FLUSH_ALL               0xf
+# define RADEON_RB3D_DC_BUSY                    (1 << 31)
+
+#define RADEON_REG_BASE                     0x0f18 /* PCI */
+#define RADEON_REGPROG_INF                  0x0f09 /* PCI */
+#define RADEON_REVISION_ID                  0x0f08 /* PCI */
+
+#define RADEON_SC_BOTTOM                    0x164c
+#define RADEON_SC_BOTTOM_RIGHT              0x16f0
+#define RADEON_SC_BOTTOM_RIGHT_C            0x1c8c
+#define RADEON_SC_LEFT                      0x1640
+#define RADEON_SC_RIGHT                     0x1644
+#define RADEON_SC_TOP                       0x1648
+#define RADEON_SC_TOP_LEFT                  0x16ec
+#define RADEON_SC_TOP_LEFT_C                0x1c88
+#       define RADEON_SC_SIGN_MASK_LO       0x8000
+#       define RADEON_SC_SIGN_MASK_HI       0x80000000
+#define RADEON_M_SPLL_REF_FB_DIV            0x000a /* PLL */
+#	define RADEON_M_SPLL_REF_DIV_SHIFT  0
+#	define RADEON_M_SPLL_REF_DIV_MASK   0xff
+#	define RADEON_MPLL_FB_DIV_SHIFT     8
+#	define RADEON_MPLL_FB_DIV_MASK      0xff
+#	define RADEON_SPLL_FB_DIV_SHIFT     16
+#	define RADEON_SPLL_FB_DIV_MASK      0xff
+#define RADEON_SPLL_CNTL                    0x000c /* PLL */
+#       define RADEON_SPLL_SLEEP            (1 << 0)
+#       define RADEON_SPLL_RESET            (1 << 1)
+#       define RADEON_SPLL_PCP_MASK         0x7
+#       define RADEON_SPLL_PCP_SHIFT        8
+#       define RADEON_SPLL_PVG_MASK         0x7
+#       define RADEON_SPLL_PVG_SHIFT        11
+#       define RADEON_SPLL_PDC_MASK         0x3
+#       define RADEON_SPLL_PDC_SHIFT        14
+#define RADEON_SCLK_CNTL                    0x000d /* PLL */
+#       define RADEON_SCLK_SRC_SEL_MASK     0x0007
+#       define RADEON_DYN_STOP_LAT_MASK     0x00007ff8
+#       define RADEON_CP_MAX_DYN_STOP_LAT   0x0008
+#       define RADEON_SCLK_FORCEON_MASK     0xffff8000
+#       define RADEON_SCLK_FORCE_DISP2      (1<<15)
+#       define RADEON_SCLK_FORCE_CP         (1<<16)
+#       define RADEON_SCLK_FORCE_HDP        (1<<17)
+#       define RADEON_SCLK_FORCE_DISP1      (1<<18)
+#       define RADEON_SCLK_FORCE_TOP        (1<<19)
+#       define RADEON_SCLK_FORCE_E2         (1<<20)
+#       define RADEON_SCLK_FORCE_SE         (1<<21)
+#       define RADEON_SCLK_FORCE_IDCT       (1<<22)
+#       define RADEON_SCLK_FORCE_VIP        (1<<23)
+#       define RADEON_SCLK_FORCE_RE         (1<<24)
+#       define RADEON_SCLK_FORCE_PB         (1<<25)
+#       define RADEON_SCLK_FORCE_TAM        (1<<26)
+#       define RADEON_SCLK_FORCE_TDM        (1<<27)
+#       define RADEON_SCLK_FORCE_RB         (1<<28)
+#       define RADEON_SCLK_FORCE_TV_SCLK    (1<<29)
+#       define RADEON_SCLK_FORCE_SUBPIC     (1<<30)
+#       define RADEON_SCLK_FORCE_OV0        (1<<31)
+#       define R300_SCLK_FORCE_VAP          (1<<21)
+#       define R300_SCLK_FORCE_SR           (1<<25)
+#       define R300_SCLK_FORCE_PX           (1<<26)
+#       define R300_SCLK_FORCE_TX           (1<<27)
+#       define R300_SCLK_FORCE_US           (1<<28)
+#       define R300_SCLK_FORCE_SU           (1<<30)
+#define R300_SCLK_CNTL2                     0x1e   /* PLL */
+#       define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
+#       define R300_SCLK_GA_MAX_DYN_STOP_LAT  (1<<11)
+#       define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
+#       define R300_SCLK_FORCE_TCL          (1<<13)
+#       define R300_SCLK_FORCE_CBA          (1<<14)
+#       define R300_SCLK_FORCE_GA           (1<<15)
+#define RADEON_SCLK_MORE_CNTL               0x0035 /* PLL */
+#       define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
+#       define RADEON_SCLK_MORE_FORCEON     0x0700
+#define RADEON_SDRAM_MODE_REG               0x0158
+#define RADEON_SEQ8_DATA                    0x03c5 /* VGA */
+#define RADEON_SEQ8_IDX                     0x03c4 /* VGA */
+#define RADEON_SNAPSHOT_F_COUNT             0x0244
+#define RADEON_SNAPSHOT_VH_COUNTS           0x0240
+#define RADEON_SNAPSHOT_VIF_COUNT           0x024c
+#define RADEON_SRC_OFFSET                   0x15ac
+#define RADEON_SRC_PITCH                    0x15b0
+#define RADEON_SRC_PITCH_OFFSET             0x1428
+#define RADEON_SRC_SC_BOTTOM                0x165c
+#define RADEON_SRC_SC_BOTTOM_RIGHT          0x16f4
+#define RADEON_SRC_SC_RIGHT                 0x1654
+#define RADEON_SRC_X                        0x1414
+#define RADEON_SRC_X_Y                      0x1590
+#define RADEON_SRC_Y                        0x1418
+#define RADEON_SRC_Y_X                      0x1434
+#define RADEON_STATUS                       0x0f06 /* PCI */
+#define RADEON_SUBPIC_CNTL                  0x0540 /* ? */
+#define RADEON_SUB_CLASS                    0x0f0a /* PCI */
+#define RADEON_SURFACE_CNTL                 0x0b00
+#       define RADEON_SURF_TRANSLATION_DIS  (1 << 8)
+#       define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
+#       define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
+#       define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
+#       define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_INFO                0x0b0c
+#       define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
+#       define RADEON_SURF_TILE_COLOR_BOTH  (1 << 16)
+#       define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
+#       define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
+#       define R200_SURF_TILE_NONE          (0 << 16)
+#       define R200_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R200_SURF_TILE_COLOR_MICRO   (2 << 16)
+#       define R200_SURF_TILE_COLOR_BOTH    (3 << 16)
+#       define R200_SURF_TILE_DEPTH_32BPP   (4 << 16)
+#       define R200_SURF_TILE_DEPTH_16BPP   (5 << 16)
+#       define R300_SURF_TILE_NONE          (0 << 16)
+#       define R300_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R300_SURF_TILE_DEPTH_32BPP   (2 << 16)
+#       define RADEON_SURF_AP0_SWP_16BPP    (1 << 20)
+#       define RADEON_SURF_AP0_SWP_32BPP    (1 << 21)
+#       define RADEON_SURF_AP1_SWP_16BPP    (1 << 22)
+#       define RADEON_SURF_AP1_SWP_32BPP    (1 << 23)
+#define RADEON_SURFACE0_LOWER_BOUND         0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND         0x0b08
+#define RADEON_SURFACE1_INFO                0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND         0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND         0x0b18
+#define RADEON_SURFACE2_INFO                0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND         0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND         0x0b28
+#define RADEON_SURFACE3_INFO                0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND         0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND         0x0b38
+#define RADEON_SURFACE4_INFO                0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND         0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND         0x0b48
+#define RADEON_SURFACE5_INFO                0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND         0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND         0x0b58
+#define RADEON_SURFACE6_INFO                0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND         0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND         0x0b68
+#define RADEON_SURFACE7_INFO                0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND         0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND         0x0b78
+#define RADEON_SW_SEMAPHORE                 0x013c
+
+#define RADEON_TEST_DEBUG_CNTL              0x0120
+#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
+
+#define RADEON_TEST_DEBUG_MUX               0x0124
+#define RADEON_TEST_DEBUG_OUT               0x012c
+#define RADEON_TMDS_PLL_CNTL                0x02a8
+#define RADEON_TMDS_TRANSMITTER_CNTL        0x02a4
+#       define RADEON_TMDS_TRANSMITTER_PLLEN  1
+#       define RADEON_TMDS_TRANSMITTER_PLLRST 2
+#define RADEON_TRAIL_BRES_DEC               0x1614
+#define RADEON_TRAIL_BRES_ERR               0x160c
+#define RADEON_TRAIL_BRES_INC               0x1610
+#define RADEON_TRAIL_X                      0x1618
+#define RADEON_TRAIL_X_SUB                  0x1620
+
+#define RADEON_VCLK_ECP_CNTL                0x0008 /* PLL */
+#       define RADEON_VCLK_SRC_SEL_MASK     0x03
+#       define RADEON_VCLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_VCLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_VCLK_SRC_SEL_PPLLCLK  0x03
+#       define RADEON_PIXCLK_ALWAYS_ONb     (1<<6)
+#       define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
+#       define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
+
+#define RADEON_VENDOR_ID                    0x0f00 /* PCI */
+#define RADEON_VGA_DDA_CONFIG               0x02e8
+#define RADEON_VGA_DDA_ON_OFF               0x02ec
+#define RADEON_VID_BUFFER_CONTROL           0x0900
+#define RADEON_VIDEOMUX_CNTL                0x0190
+
+/* VIP bus */
+#define RADEON_VIPH_CH0_DATA                0x0c00
+#define RADEON_VIPH_CH1_DATA                0x0c04
+#define RADEON_VIPH_CH2_DATA                0x0c08
+#define RADEON_VIPH_CH3_DATA                0x0c0c
+#define RADEON_VIPH_CH0_ADDR                0x0c10
+#define RADEON_VIPH_CH1_ADDR                0x0c14
+#define RADEON_VIPH_CH2_ADDR                0x0c18
+#define RADEON_VIPH_CH3_ADDR                0x0c1c
+#define RADEON_VIPH_CH0_SBCNT               0x0c20
+#define RADEON_VIPH_CH1_SBCNT               0x0c24
+#define RADEON_VIPH_CH2_SBCNT               0x0c28
+#define RADEON_VIPH_CH3_SBCNT               0x0c2c
+#define RADEON_VIPH_CH0_ABCNT               0x0c30
+#define RADEON_VIPH_CH1_ABCNT               0x0c34
+#define RADEON_VIPH_CH2_ABCNT               0x0c38
+#define RADEON_VIPH_CH3_ABCNT               0x0c3c
+#define RADEON_VIPH_CONTROL                 0x0c40
+#       define RADEON_VIP_BUSY 0
+#       define RADEON_VIP_IDLE 1
+#       define RADEON_VIP_RESET 2
+#       define RADEON_VIPH_EN               (1 << 21)
+#define RADEON_VIPH_DV_LAT                  0x0c44
+#define RADEON_VIPH_BM_CHUNK                0x0c48
+#define RADEON_VIPH_DV_INT                  0x0c4c
+#define RADEON_VIPH_TIMEOUT_STAT            0x0c50
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK   0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
+
+#define RADEON_VIPH_REG_DATA                0x0084
+#define RADEON_VIPH_REG_ADDR                0x0080
+
+
+#define RADEON_WAIT_UNTIL                   0x1720
+#       define RADEON_WAIT_CRTC_PFLIP       (1 << 0)
+#       define RADEON_WAIT_RE_CRTC_VLINE    (1 << 1)
+#       define RADEON_WAIT_FE_CRTC_VLINE    (1 << 2)
+#       define RADEON_WAIT_CRTC_VLINE       (1 << 3)
+#       define RADEON_WAIT_DMA_VID_IDLE     (1 << 8)
+#       define RADEON_WAIT_DMA_GUI_IDLE     (1 << 9)
+#       define RADEON_WAIT_CMDFIFO          (1 << 10) /* wait for CMDFIFO_ENTRIES */
+#       define RADEON_WAIT_OV0_FLIP         (1 << 11)
+#       define RADEON_WAIT_AGP_FLUSH        (1 << 13)
+#       define RADEON_WAIT_2D_IDLE          (1 << 14)
+#       define RADEON_WAIT_3D_IDLE          (1 << 15)
+#       define RADEON_WAIT_2D_IDLECLEAN     (1 << 16)
+#       define RADEON_WAIT_3D_IDLECLEAN     (1 << 17)
+#       define RADEON_WAIT_HOST_IDLECLEAN   (1 << 18)
+#       define RADEON_CMDFIFO_ENTRIES_SHIFT 10
+#       define RADEON_CMDFIFO_ENTRIES_MASK  0x7f
+#       define RADEON_WAIT_VAP_IDLE         (1 << 28)
+#       define RADEON_WAIT_BOTH_CRTC_PFLIP  (1 << 30)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC0    (0 << 31)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC1    (1 << 31)
+
+#define RADEON_X_MPLL_REF_FB_DIV            0x000a /* PLL */
+#define RADEON_XCLK_CNTL                    0x000d /* PLL */
+#define RADEON_XDLL_CNTL                    0x000c /* PLL */
+#define RADEON_XPLL_CNTL                    0x000b /* PLL */
+
+
+
+				/* Registers for 3D/TCL */
+#define RADEON_PP_BORDER_COLOR_0            0x1d40
+#define RADEON_PP_BORDER_COLOR_1            0x1d44
+#define RADEON_PP_BORDER_COLOR_2            0x1d48
+#define RADEON_PP_CNTL                      0x1c38
+#       define RADEON_STIPPLE_ENABLE        (1 <<  0)
+#       define RADEON_SCISSOR_ENABLE        (1 <<  1)
+#       define RADEON_PATTERN_ENABLE        (1 <<  2)
+#       define RADEON_SHADOW_ENABLE         (1 <<  3)
+#       define RADEON_TEX_ENABLE_MASK       (0xf << 4)
+#       define RADEON_TEX_0_ENABLE          (1 <<  4)
+#       define RADEON_TEX_1_ENABLE          (1 <<  5)
+#       define RADEON_TEX_2_ENABLE          (1 <<  6)
+#       define RADEON_TEX_3_ENABLE          (1 <<  7)
+#       define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
+#       define RADEON_TEX_BLEND_0_ENABLE    (1 << 12)
+#       define RADEON_TEX_BLEND_1_ENABLE    (1 << 13)
+#       define RADEON_TEX_BLEND_2_ENABLE    (1 << 14)
+#       define RADEON_TEX_BLEND_3_ENABLE    (1 << 15)
+#       define RADEON_PLANAR_YUV_ENABLE     (1 << 20)
+#       define RADEON_SPECULAR_ENABLE       (1 << 21)
+#       define RADEON_FOG_ENABLE            (1 << 22)
+#       define RADEON_ALPHA_TEST_ENABLE     (1 << 23)
+#       define RADEON_ANTI_ALIAS_NONE       (0 << 24)
+#       define RADEON_ANTI_ALIAS_LINE       (1 << 24)
+#       define RADEON_ANTI_ALIAS_POLY       (2 << 24)
+#       define RADEON_ANTI_ALIAS_LINE_POLY  (3 << 24)
+#       define RADEON_BUMP_MAP_ENABLE       (1 << 26)
+#       define RADEON_BUMPED_MAP_T0         (0 << 27)
+#       define RADEON_BUMPED_MAP_T1         (1 << 27)
+#       define RADEON_BUMPED_MAP_T2         (2 << 27)
+#       define RADEON_TEX_3D_ENABLE_0       (1 << 29)
+#       define RADEON_TEX_3D_ENABLE_1       (1 << 30)
+#       define RADEON_MC_ENABLE             (1 << 31)
+#define RADEON_PP_FOG_COLOR                 0x1c18
+#       define RADEON_FOG_COLOR_MASK        0x00ffffff
+#       define RADEON_FOG_VERTEX            (0 << 24)
+#       define RADEON_FOG_TABLE             (1 << 24)
+#       define RADEON_FOG_USE_DEPTH         (0 << 25)
+#       define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
+#       define RADEON_FOG_USE_SPEC_ALPHA    (3 << 25)
+#define RADEON_PP_LUM_MATRIX                0x1d00
+#define RADEON_PP_MISC                      0x1c14
+#       define RADEON_REF_ALPHA_MASK        0x000000ff
+#       define RADEON_ALPHA_TEST_FAIL       (0 << 8)
+#       define RADEON_ALPHA_TEST_LESS       (1 << 8)
+#       define RADEON_ALPHA_TEST_LEQUAL     (2 << 8)
+#       define RADEON_ALPHA_TEST_EQUAL      (3 << 8)
+#       define RADEON_ALPHA_TEST_GEQUAL     (4 << 8)
+#       define RADEON_ALPHA_TEST_GREATER    (5 << 8)
+#       define RADEON_ALPHA_TEST_NEQUAL     (6 << 8)
+#       define RADEON_ALPHA_TEST_PASS       (7 << 8)
+#       define RADEON_ALPHA_TEST_OP_MASK    (7 << 8)
+#       define RADEON_CHROMA_FUNC_FAIL      (0 << 16)
+#       define RADEON_CHROMA_FUNC_PASS      (1 << 16)
+#       define RADEON_CHROMA_FUNC_NEQUAL    (2 << 16)
+#       define RADEON_CHROMA_FUNC_EQUAL     (3 << 16)
+#       define RADEON_CHROMA_KEY_NEAREST    (0 << 18)
+#       define RADEON_CHROMA_KEY_ZERO       (1 << 18)
+#       define RADEON_SHADOW_ID_AUTO_INC    (1 << 20)
+#       define RADEON_SHADOW_FUNC_EQUAL     (0 << 21)
+#       define RADEON_SHADOW_FUNC_NEQUAL    (1 << 21)
+#       define RADEON_SHADOW_PASS_1         (0 << 22)
+#       define RADEON_SHADOW_PASS_2         (1 << 22)
+#       define RADEON_RIGHT_HAND_CUBE_D3D   (0 << 24)
+#       define RADEON_RIGHT_HAND_CUBE_OGL   (1 << 24)
+#define RADEON_PP_ROT_MATRIX_0              0x1d58
+#define RADEON_PP_ROT_MATRIX_1              0x1d5c
+#define RADEON_PP_TXFILTER_0                0x1c54
+#define RADEON_PP_TXFILTER_1                0x1c6c
+#define RADEON_PP_TXFILTER_2                0x1c84
+#       define RADEON_MAG_FILTER_NEAREST                   (0  <<  0)
+#       define RADEON_MAG_FILTER_LINEAR                    (1  <<  0)
+#       define RADEON_MAG_FILTER_MASK                      (1  <<  0)
+#       define RADEON_MIN_FILTER_NEAREST                   (0  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR                    (1  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST       (2  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR        (3  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR         (7  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST             (8  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_LINEAR              (9  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (11 <<  1)
+#       define RADEON_MIN_FILTER_MASK                      (15 <<  1)
+#       define RADEON_MAX_ANISO_1_TO_1                     (0  <<  5)
+#       define RADEON_MAX_ANISO_2_TO_1                     (1  <<  5)
+#       define RADEON_MAX_ANISO_4_TO_1                     (2  <<  5)
+#       define RADEON_MAX_ANISO_8_TO_1                     (3  <<  5)
+#       define RADEON_MAX_ANISO_16_TO_1                    (4  <<  5)
+#       define RADEON_MAX_ANISO_MASK                       (7  <<  5)
+#       define RADEON_LOD_BIAS_MASK                        (0xff <<  8)
+#       define RADEON_LOD_BIAS_SHIFT                       8
+#       define RADEON_MAX_MIP_LEVEL_MASK                   (0x0f << 16)
+#       define RADEON_MAX_MIP_LEVEL_SHIFT                  16
+#       define RADEON_YUV_TO_RGB                           (1  << 20)
+#       define RADEON_YUV_TEMPERATURE_COOL                 (0  << 21)
+#       define RADEON_YUV_TEMPERATURE_HOT                  (1  << 21)
+#       define RADEON_YUV_TEMPERATURE_MASK                 (1  << 21)
+#       define RADEON_WRAPEN_S                             (1  << 22)
+#       define RADEON_CLAMP_S_WRAP                         (0  << 23)
+#       define RADEON_CLAMP_S_MIRROR                       (1  << 23)
+#       define RADEON_CLAMP_S_CLAMP_LAST                   (2  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_LAST            (3  << 23)
+#       define RADEON_CLAMP_S_CLAMP_BORDER                 (4  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER          (5  << 23)
+#       define RADEON_CLAMP_S_CLAMP_GL                     (6  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_GL              (7  << 23)
+#       define RADEON_CLAMP_S_MASK                         (7  << 23)
+#       define RADEON_WRAPEN_T                             (1  << 26)
+#       define RADEON_CLAMP_T_WRAP                         (0  << 27)
+#       define RADEON_CLAMP_T_MIRROR                       (1  << 27)
+#       define RADEON_CLAMP_T_CLAMP_LAST                   (2  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_LAST            (3  << 27)
+#       define RADEON_CLAMP_T_CLAMP_BORDER                 (4  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER          (5  << 27)
+#       define RADEON_CLAMP_T_CLAMP_GL                     (6  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_GL              (7  << 27)
+#       define RADEON_CLAMP_T_MASK                         (7  << 27)
+#       define RADEON_BORDER_MODE_OGL                      (0  << 31)
+#       define RADEON_BORDER_MODE_D3D                      (1  << 31)
+#define RADEON_PP_TXFORMAT_0                0x1c58
+#define RADEON_PP_TXFORMAT_1                0x1c70
+#define RADEON_PP_TXFORMAT_2                0x1c88
+#       define RADEON_TXFORMAT_I8                 (0  <<  0)
+#       define RADEON_TXFORMAT_AI88               (1  <<  0)
+#       define RADEON_TXFORMAT_RGB332             (2  <<  0)
+#       define RADEON_TXFORMAT_ARGB1555           (3  <<  0)
+#       define RADEON_TXFORMAT_RGB565             (4  <<  0)
+#       define RADEON_TXFORMAT_ARGB4444           (5  <<  0)
+#       define RADEON_TXFORMAT_ARGB8888           (6  <<  0)
+#       define RADEON_TXFORMAT_RGBA8888           (7  <<  0)
+#       define RADEON_TXFORMAT_Y8                 (8  <<  0)
+#       define RADEON_TXFORMAT_VYUY422            (10 <<  0)
+#       define RADEON_TXFORMAT_YVYU422            (11 <<  0)
+#       define RADEON_TXFORMAT_DXT1               (12 <<  0)
+#       define RADEON_TXFORMAT_DXT23              (14 <<  0)
+#       define RADEON_TXFORMAT_DXT45              (15 <<  0)
+#	define RADEON_TXFORMAT_SHADOW16           (16 <<  0)
+#	define RADEON_TXFORMAT_SHADOW32           (17 <<  0)
+#       define RADEON_TXFORMAT_DUDV88             (18 <<  0)
+#       define RADEON_TXFORMAT_LDUDV655           (19 <<  0)
+#       define RADEON_TXFORMAT_LDUDUV8888         (20 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_MASK        (31 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_SHIFT       0
+#       define RADEON_TXFORMAT_APPLE_YUV_MODE     (1  <<  5)
+#       define RADEON_TXFORMAT_ALPHA_IN_MAP       (1  <<  6)
+#       define RADEON_TXFORMAT_NON_POWER2         (1  <<  7)
+#       define RADEON_TXFORMAT_WIDTH_MASK         (15 <<  8)
+#       define RADEON_TXFORMAT_WIDTH_SHIFT        8
+#       define RADEON_TXFORMAT_HEIGHT_MASK        (15 << 12)
+#       define RADEON_TXFORMAT_HEIGHT_SHIFT       12
+#       define RADEON_TXFORMAT_F5_WIDTH_MASK      (15 << 16)
+#       define RADEON_TXFORMAT_F5_WIDTH_SHIFT     16
+#       define RADEON_TXFORMAT_F5_HEIGHT_MASK     (15 << 20)
+#       define RADEON_TXFORMAT_F5_HEIGHT_SHIFT    20
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ0      (0  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_MASK      (3  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ1      (1  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ2      (2  << 24)
+#       define RADEON_TXFORMAT_ENDIAN_NO_SWAP     (0  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP  (1  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP  (2  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3  << 26)
+#       define RADEON_TXFORMAT_ALPHA_MASK_ENABLE  (1  << 28)
+#       define RADEON_TXFORMAT_CHROMA_KEY_ENABLE  (1  << 29)
+#       define RADEON_TXFORMAT_CUBIC_MAP_ENABLE   (1  << 30)
+#       define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1  << 31)
+#define RADEON_PP_CUBIC_FACES_0             0x1d24
+#define RADEON_PP_CUBIC_FACES_1             0x1d28
+#define RADEON_PP_CUBIC_FACES_2             0x1d2c
+#       define RADEON_FACE_WIDTH_1_SHIFT          0
+#       define RADEON_FACE_HEIGHT_1_SHIFT         4
+#       define RADEON_FACE_WIDTH_1_MASK           (0xf << 0)
+#       define RADEON_FACE_HEIGHT_1_MASK          (0xf << 4)
+#       define RADEON_FACE_WIDTH_2_SHIFT          8
+#       define RADEON_FACE_HEIGHT_2_SHIFT         12
+#       define RADEON_FACE_WIDTH_2_MASK           (0xf << 8)
+#       define RADEON_FACE_HEIGHT_2_MASK          (0xf << 12)
+#       define RADEON_FACE_WIDTH_3_SHIFT          16
+#       define RADEON_FACE_HEIGHT_3_SHIFT         20
+#       define RADEON_FACE_WIDTH_3_MASK           (0xf << 16)
+#       define RADEON_FACE_HEIGHT_3_MASK          (0xf << 20)
+#       define RADEON_FACE_WIDTH_4_SHIFT          24
+#       define RADEON_FACE_HEIGHT_4_SHIFT         28
+#       define RADEON_FACE_WIDTH_4_MASK           (0xf << 24)
+#       define RADEON_FACE_HEIGHT_4_MASK          (0xf << 28)
+
+#define RADEON_PP_TXOFFSET_0                0x1c5c
+#define RADEON_PP_TXOFFSET_1                0x1c74
+#define RADEON_PP_TXOFFSET_2                0x1c8c
+#       define RADEON_TXO_ENDIAN_NO_SWAP     (0 << 0)
+#       define RADEON_TXO_ENDIAN_BYTE_SWAP   (1 << 0)
+#       define RADEON_TXO_ENDIAN_WORD_SWAP   (2 << 0)
+#       define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+#       define RADEON_TXO_MACRO_LINEAR       (0 << 2)
+#       define RADEON_TXO_MACRO_TILE         (1 << 2)
+#       define RADEON_TXO_MICRO_LINEAR       (0 << 3)
+#       define RADEON_TXO_MICRO_TILE_X2      (1 << 3)
+#       define RADEON_TXO_MICRO_TILE_OPT     (2 << 3)
+#       define RADEON_TXO_OFFSET_MASK        0xffffffe0
+#       define RADEON_TXO_OFFSET_SHIFT       5
+
+#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0  /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T0_1         0x1dd4
+#define RADEON_PP_CUBIC_OFFSET_T0_2         0x1dd8
+#define RADEON_PP_CUBIC_OFFSET_T0_3         0x1ddc
+#define RADEON_PP_CUBIC_OFFSET_T0_4         0x1de0
+#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T1_1         0x1e04
+#define RADEON_PP_CUBIC_OFFSET_T1_2         0x1e08
+#define RADEON_PP_CUBIC_OFFSET_T1_3         0x1e0c
+#define RADEON_PP_CUBIC_OFFSET_T1_4         0x1e10
+#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
+#define RADEON_PP_CUBIC_OFFSET_T2_1         0x1e18
+#define RADEON_PP_CUBIC_OFFSET_T2_2         0x1e1c
+#define RADEON_PP_CUBIC_OFFSET_T2_3         0x1e20
+#define RADEON_PP_CUBIC_OFFSET_T2_4         0x1e24
+
+#define RADEON_PP_TEX_SIZE_0                0x1d04  /* NPOT */
+#define RADEON_PP_TEX_SIZE_1                0x1d0c
+#define RADEON_PP_TEX_SIZE_2                0x1d14
+#       define RADEON_TEX_USIZE_MASK        (0x7ff << 0)
+#       define RADEON_TEX_USIZE_SHIFT       0
+#       define RADEON_TEX_VSIZE_MASK        (0x7ff << 16)
+#       define RADEON_TEX_VSIZE_SHIFT       16
+#       define RADEON_SIGNED_RGB_MASK       (1 << 30)
+#       define RADEON_SIGNED_RGB_SHIFT      30
+#       define RADEON_SIGNED_ALPHA_MASK     (1 << 31)
+#       define RADEON_SIGNED_ALPHA_SHIFT    31
+#define RADEON_PP_TEX_PITCH_0               0x1d08  /* NPOT */
+#define RADEON_PP_TEX_PITCH_1               0x1d10  /* NPOT */
+#define RADEON_PP_TEX_PITCH_2               0x1d18  /* NPOT */
+/* note: bits 13-5: 32 byte aligned stride of texture map */
+
+#define RADEON_PP_TXCBLEND_0                0x1c60
+#define RADEON_PP_TXCBLEND_1                0x1c78
+#define RADEON_PP_TXCBLEND_2                0x1c90
+#       define RADEON_COLOR_ARG_A_SHIFT          0
+#       define RADEON_COLOR_ARG_A_MASK           (0x1f << 0)
+#       define RADEON_COLOR_ARG_A_ZERO           (0    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_COLOR  (2    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_ALPHA  (3    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_COLOR  (4    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA  (5    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_COLOR  (8    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_ALPHA  (9    << 0)
+#       define RADEON_COLOR_ARG_A_T0_COLOR       (10   << 0)
+#       define RADEON_COLOR_ARG_A_T0_ALPHA       (11   << 0)
+#       define RADEON_COLOR_ARG_A_T1_COLOR       (12   << 0)
+#       define RADEON_COLOR_ARG_A_T1_ALPHA       (13   << 0)
+#       define RADEON_COLOR_ARG_A_T2_COLOR       (14   << 0)
+#       define RADEON_COLOR_ARG_A_T2_ALPHA       (15   << 0)
+#       define RADEON_COLOR_ARG_A_T3_COLOR       (16   << 0)
+#       define RADEON_COLOR_ARG_A_T3_ALPHA       (17   << 0)
+#       define RADEON_COLOR_ARG_B_SHIFT          5
+#       define RADEON_COLOR_ARG_B_MASK           (0x1f << 5)
+#       define RADEON_COLOR_ARG_B_ZERO           (0    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_COLOR  (2    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_ALPHA  (3    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_COLOR  (4    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA  (5    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_COLOR  (8    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_ALPHA  (9    << 5)
+#       define RADEON_COLOR_ARG_B_T0_COLOR       (10   << 5)
+#       define RADEON_COLOR_ARG_B_T0_ALPHA       (11   << 5)
+#       define RADEON_COLOR_ARG_B_T1_COLOR       (12   << 5)
+#       define RADEON_COLOR_ARG_B_T1_ALPHA       (13   << 5)
+#       define RADEON_COLOR_ARG_B_T2_COLOR       (14   << 5)
+#       define RADEON_COLOR_ARG_B_T2_ALPHA       (15   << 5)
+#       define RADEON_COLOR_ARG_B_T3_COLOR       (16   << 5)
+#       define RADEON_COLOR_ARG_B_T3_ALPHA       (17   << 5)
+#       define RADEON_COLOR_ARG_C_SHIFT          10
+#       define RADEON_COLOR_ARG_C_MASK           (0x1f << 10)
+#       define RADEON_COLOR_ARG_C_ZERO           (0    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_COLOR  (2    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_ALPHA  (3    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_COLOR  (4    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA  (5    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_COLOR  (8    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_ALPHA  (9    << 10)
+#       define RADEON_COLOR_ARG_C_T0_COLOR       (10   << 10)
+#       define RADEON_COLOR_ARG_C_T0_ALPHA       (11   << 10)
+#       define RADEON_COLOR_ARG_C_T1_COLOR       (12   << 10)
+#       define RADEON_COLOR_ARG_C_T1_ALPHA       (13   << 10)
+#       define RADEON_COLOR_ARG_C_T2_COLOR       (14   << 10)
+#       define RADEON_COLOR_ARG_C_T2_ALPHA       (15   << 10)
+#       define RADEON_COLOR_ARG_C_T3_COLOR       (16   << 10)
+#       define RADEON_COLOR_ARG_C_T3_ALPHA       (17   << 10)
+#       define RADEON_COMP_ARG_A                 (1 << 15)
+#       define RADEON_COMP_ARG_A_SHIFT           15
+#       define RADEON_COMP_ARG_B                 (1 << 16)
+#       define RADEON_COMP_ARG_B_SHIFT           16
+#       define RADEON_COMP_ARG_C                 (1 << 17)
+#       define RADEON_COMP_ARG_C_SHIFT           17
+#       define RADEON_BLEND_CTL_MASK             (7 << 18)
+#       define RADEON_BLEND_CTL_ADD              (0 << 18)
+#       define RADEON_BLEND_CTL_SUBTRACT         (1 << 18)
+#       define RADEON_BLEND_CTL_ADDSIGNED        (2 << 18)
+#       define RADEON_BLEND_CTL_BLEND            (3 << 18)
+#       define RADEON_BLEND_CTL_DOT3             (4 << 18)
+#       define RADEON_SCALE_SHIFT                21
+#       define RADEON_SCALE_MASK                 (3 << 21)
+#       define RADEON_SCALE_1X                   (0 << 21)
+#       define RADEON_SCALE_2X                   (1 << 21)
+#       define RADEON_SCALE_4X                   (2 << 21)
+#       define RADEON_CLAMP_TX                   (1 << 23)
+#       define RADEON_T0_EQ_TCUR                 (1 << 24)
+#       define RADEON_T1_EQ_TCUR                 (1 << 25)
+#       define RADEON_T2_EQ_TCUR                 (1 << 26)
+#       define RADEON_T3_EQ_TCUR                 (1 << 27)
+#       define RADEON_COLOR_ARG_MASK             0x1f
+#       define RADEON_COMP_ARG_SHIFT             15
+#define RADEON_PP_TXABLEND_0                0x1c64
+#define RADEON_PP_TXABLEND_1                0x1c7c
+#define RADEON_PP_TXABLEND_2                0x1c94
+#       define RADEON_ALPHA_ARG_A_SHIFT          0
+#       define RADEON_ALPHA_ARG_A_MASK           (0xf << 0)
+#       define RADEON_ALPHA_ARG_A_ZERO           (0   << 0)
+#       define RADEON_ALPHA_ARG_A_CURRENT_ALPHA  (1   << 0)
+#       define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA  (2   << 0)
+#       define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3   << 0)
+#       define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA  (4   << 0)
+#       define RADEON_ALPHA_ARG_A_T0_ALPHA       (5   << 0)
+#       define RADEON_ALPHA_ARG_A_T1_ALPHA       (6   << 0)
+#       define RADEON_ALPHA_ARG_A_T2_ALPHA       (7   << 0)
+#       define RADEON_ALPHA_ARG_A_T3_ALPHA       (8   << 0)
+#       define RADEON_ALPHA_ARG_B_SHIFT          4
+#       define RADEON_ALPHA_ARG_B_MASK           (0xf << 4)
+#       define RADEON_ALPHA_ARG_B_ZERO           (0   << 4)
+#       define RADEON_ALPHA_ARG_B_CURRENT_ALPHA  (1   << 4)
+#       define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA  (2   << 4)
+#       define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3   << 4)
+#       define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA  (4   << 4)
+#       define RADEON_ALPHA_ARG_B_T0_ALPHA       (5   << 4)
+#       define RADEON_ALPHA_ARG_B_T1_ALPHA       (6   << 4)
+#       define RADEON_ALPHA_ARG_B_T2_ALPHA       (7   << 4)
+#       define RADEON_ALPHA_ARG_B_T3_ALPHA       (8   << 4)
+#       define RADEON_ALPHA_ARG_C_SHIFT          8
+#       define RADEON_ALPHA_ARG_C_MASK           (0xf << 8)
+#       define RADEON_ALPHA_ARG_C_ZERO           (0   << 8)
+#       define RADEON_ALPHA_ARG_C_CURRENT_ALPHA  (1   << 8)
+#       define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA  (2   << 8)
+#       define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3   << 8)
+#       define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA  (4   << 8)
+#       define RADEON_ALPHA_ARG_C_T0_ALPHA       (5   << 8)
+#       define RADEON_ALPHA_ARG_C_T1_ALPHA       (6   << 8)
+#       define RADEON_ALPHA_ARG_C_T2_ALPHA       (7   << 8)
+#       define RADEON_ALPHA_ARG_C_T3_ALPHA       (8   << 8)
+#       define RADEON_DOT_ALPHA_DONT_REPLICATE   (1   << 9)
+#       define RADEON_ALPHA_ARG_MASK             0xf
+
+#define RADEON_PP_TFACTOR_0                 0x1c68
+#define RADEON_PP_TFACTOR_1                 0x1c80
+#define RADEON_PP_TFACTOR_2                 0x1c98
+
+#define RADEON_RB3D_BLENDCNTL               0x1c20
+#       define RADEON_COMB_FCN_MASK                    (3  << 12)
+#       define RADEON_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define RADEON_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define RADEON_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define RADEON_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define RADEON_SRC_BLEND_GL_ZERO                (32 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE                 (33 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_COLOR           (34 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_COLOR           (36 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA           (38 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_ALPHA           (40 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE  (42 << 16)
+#       define RADEON_SRC_BLEND_MASK                   (63 << 16)
+#       define RADEON_DST_BLEND_GL_ZERO                (32 << 24)
+#       define RADEON_DST_BLEND_GL_ONE                 (33 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_COLOR           (34 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+#       define RADEON_DST_BLEND_GL_DST_COLOR           (36 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_ALPHA           (38 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+#       define RADEON_DST_BLEND_GL_DST_ALPHA           (40 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+#       define RADEON_DST_BLEND_MASK                   (63 << 24)
+#define RADEON_RB3D_CNTL                    0x1c3c
+#       define RADEON_ALPHA_BLEND_ENABLE       (1  <<  0)
+#       define RADEON_PLANE_MASK_ENABLE        (1  <<  1)
+#       define RADEON_DITHER_ENABLE            (1  <<  2)
+#       define RADEON_ROUND_ENABLE             (1  <<  3)
+#       define RADEON_SCALE_DITHER_ENABLE      (1  <<  4)
+#       define RADEON_DITHER_INIT              (1  <<  5)
+#       define RADEON_ROP_ENABLE               (1  <<  6)
+#       define RADEON_STENCIL_ENABLE           (1  <<  7)
+#       define RADEON_Z_ENABLE                 (1  <<  8)
+#       define RADEON_DEPTHXY_OFFSET_ENABLE    (1  <<  9)
+#       define RADEON_RB3D_COLOR_FORMAT_SHIFT  10
+
+#       define RADEON_COLOR_FORMAT_ARGB1555    3
+#       define RADEON_COLOR_FORMAT_RGB565      4
+#       define RADEON_COLOR_FORMAT_ARGB8888    6
+#       define RADEON_COLOR_FORMAT_RGB332      7
+#       define RADEON_COLOR_FORMAT_Y8          8
+#       define RADEON_COLOR_FORMAT_RGB8        9
+#       define RADEON_COLOR_FORMAT_YUV422_VYUY 11
+#       define RADEON_COLOR_FORMAT_YUV422_YVYU 12
+#       define RADEON_COLOR_FORMAT_aYUV444     14
+#       define RADEON_COLOR_FORMAT_ARGB4444    15
+
+#       define RADEON_CLRCMP_FLIP_ENABLE       (1  << 14)
+#define RADEON_RB3D_COLOROFFSET             0x1c40
+#       define RADEON_COLOROFFSET_MASK      0xfffffff0
+#define RADEON_RB3D_COLORPITCH              0x1c48
+#       define RADEON_COLORPITCH_MASK         0x000001ff8
+#       define RADEON_COLOR_TILE_ENABLE       (1 << 16)
+#       define RADEON_COLOR_MICROTILE_ENABLE  (1 << 17)
+#       define RADEON_COLOR_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_COLOR_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_DEPTHOFFSET             0x1c24
+#define RADEON_RB3D_DEPTHPITCH              0x1c28
+#       define RADEON_DEPTHPITCH_MASK         0x00001ff8
+#       define RADEON_DEPTH_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_DEPTH_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_PLANEMASK               0x1d84
+#define RADEON_RB3D_ROPCNTL                 0x1d80
+#       define RADEON_ROP_MASK              (15 << 8)
+#       define RADEON_ROP_CLEAR             (0  << 8)
+#       define RADEON_ROP_NOR               (1  << 8)
+#       define RADEON_ROP_AND_INVERTED      (2  << 8)
+#       define RADEON_ROP_COPY_INVERTED     (3  << 8)
+#       define RADEON_ROP_AND_REVERSE       (4  << 8)
+#       define RADEON_ROP_INVERT            (5  << 8)
+#       define RADEON_ROP_XOR               (6  << 8)
+#       define RADEON_ROP_NAND              (7  << 8)
+#       define RADEON_ROP_AND               (8  << 8)
+#       define RADEON_ROP_EQUIV             (9  << 8)
+#       define RADEON_ROP_NOOP              (10 << 8)
+#       define RADEON_ROP_OR_INVERTED       (11 << 8)
+#       define RADEON_ROP_COPY              (12 << 8)
+#       define RADEON_ROP_OR_REVERSE        (13 << 8)
+#       define RADEON_ROP_OR                (14 << 8)
+#       define RADEON_ROP_SET               (15 << 8)
+#define RADEON_RB3D_STENCILREFMASK          0x1d7c
+#       define RADEON_STENCIL_REF_SHIFT       0
+#       define RADEON_STENCIL_REF_MASK        (0xff << 0)
+#       define RADEON_STENCIL_MASK_SHIFT      16
+#       define RADEON_STENCIL_VALUE_MASK      (0xff << 16)
+#       define RADEON_STENCIL_WRITEMASK_SHIFT 24
+#       define RADEON_STENCIL_WRITE_MASK      (0xff << 24)
+#define RADEON_RB3D_ZSTENCILCNTL            0x1c2c
+#       define RADEON_DEPTH_FORMAT_MASK          (0xf << 0)
+#       define RADEON_DEPTH_FORMAT_16BIT_INT_Z   (0  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_INT_Z   (2  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_INT_Z   (4  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5  <<  0)
+#       define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 <<  0)
+#       define RADEON_Z_TEST_NEVER               (0  <<  4)
+#       define RADEON_Z_TEST_LESS                (1  <<  4)
+#       define RADEON_Z_TEST_LEQUAL              (2  <<  4)
+#       define RADEON_Z_TEST_EQUAL               (3  <<  4)
+#       define RADEON_Z_TEST_GEQUAL              (4  <<  4)
+#       define RADEON_Z_TEST_GREATER             (5  <<  4)
+#       define RADEON_Z_TEST_NEQUAL              (6  <<  4)
+#       define RADEON_Z_TEST_ALWAYS              (7  <<  4)
+#       define RADEON_Z_TEST_MASK                (7  <<  4)
+#       define RADEON_STENCIL_TEST_NEVER         (0  << 12)
+#       define RADEON_STENCIL_TEST_LESS          (1  << 12)
+#       define RADEON_STENCIL_TEST_LEQUAL        (2  << 12)
+#       define RADEON_STENCIL_TEST_EQUAL         (3  << 12)
+#       define RADEON_STENCIL_TEST_GEQUAL        (4  << 12)
+#       define RADEON_STENCIL_TEST_GREATER       (5  << 12)
+#       define RADEON_STENCIL_TEST_NEQUAL        (6  << 12)
+#       define RADEON_STENCIL_TEST_ALWAYS        (7  << 12)
+#       define RADEON_STENCIL_TEST_MASK          (0x7 << 12)
+#       define RADEON_STENCIL_FAIL_KEEP          (0  << 16)
+#       define RADEON_STENCIL_FAIL_ZERO          (1  << 16)
+#       define RADEON_STENCIL_FAIL_REPLACE       (2  << 16)
+#       define RADEON_STENCIL_FAIL_INC           (3  << 16)
+#       define RADEON_STENCIL_FAIL_DEC           (4  << 16)
+#       define RADEON_STENCIL_FAIL_INVERT        (5  << 16)
+#       define RADEON_STENCIL_FAIL_MASK          (0x7 << 16)
+#       define RADEON_STENCIL_ZPASS_KEEP         (0  << 20)
+#       define RADEON_STENCIL_ZPASS_ZERO         (1  << 20)
+#       define RADEON_STENCIL_ZPASS_REPLACE      (2  << 20)
+#       define RADEON_STENCIL_ZPASS_INC          (3  << 20)
+#       define RADEON_STENCIL_ZPASS_DEC          (4  << 20)
+#       define RADEON_STENCIL_ZPASS_INVERT       (5  << 20)
+#       define RADEON_STENCIL_ZPASS_MASK         (0x7 << 20)
+#       define RADEON_STENCIL_ZFAIL_KEEP         (0  << 24)
+#       define RADEON_STENCIL_ZFAIL_ZERO         (1  << 24)
+#       define RADEON_STENCIL_ZFAIL_REPLACE      (2  << 24)
+#       define RADEON_STENCIL_ZFAIL_INC          (3  << 24)
+#       define RADEON_STENCIL_ZFAIL_DEC          (4  << 24)
+#       define RADEON_STENCIL_ZFAIL_INVERT       (5  << 24)
+#       define RADEON_STENCIL_ZFAIL_MASK         (0x7 << 24)
+#       define RADEON_Z_COMPRESSION_ENABLE       (1  << 28)
+#       define RADEON_FORCE_Z_DIRTY              (1  << 29)
+#       define RADEON_Z_WRITE_ENABLE             (1  << 30)
+#define RADEON_RE_LINE_PATTERN              0x1cd0
+#       define RADEON_LINE_PATTERN_MASK             0x0000ffff
+#       define RADEON_LINE_REPEAT_COUNT_SHIFT       16
+#       define RADEON_LINE_PATTERN_START_SHIFT      24
+#       define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
+#       define RADEON_LINE_PATTERN_BIG_BIT_ORDER    (1 << 28)
+#       define RADEON_LINE_PATTERN_AUTO_RESET       (1 << 29)
+#define RADEON_RE_LINE_STATE                0x1cd4
+#       define RADEON_LINE_CURRENT_PTR_SHIFT   0
+#       define RADEON_LINE_CURRENT_COUNT_SHIFT 8
+#define RADEON_RE_MISC                      0x26c4
+#       define RADEON_STIPPLE_COORD_MASK       0x1f
+#       define RADEON_STIPPLE_X_OFFSET_SHIFT   0
+#       define RADEON_STIPPLE_X_OFFSET_MASK    (0x1f << 0)
+#       define RADEON_STIPPLE_Y_OFFSET_SHIFT   8
+#       define RADEON_STIPPLE_Y_OFFSET_MASK    (0x1f << 8)
+#       define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
+#       define RADEON_STIPPLE_BIG_BIT_ORDER    (1 << 16)
+#define RADEON_RE_SOLID_COLOR               0x1c1c
+#define RADEON_RE_TOP_LEFT                  0x26c0
+#       define RADEON_RE_LEFT_SHIFT         0
+#       define RADEON_RE_TOP_SHIFT          16
+#define RADEON_RE_WIDTH_HEIGHT              0x1c44
+#       define RADEON_RE_WIDTH_SHIFT        0
+#       define RADEON_RE_HEIGHT_SHIFT       16
+
+#define RADEON_RB3D_ZPASS_DATA 0x3290
+#define RADEON_RB3D_ZPASS_ADDR 0x3294
+
+#define RADEON_SE_CNTL                      0x1c4c
+#       define RADEON_FFACE_CULL_CW          (0 <<  0)
+#       define RADEON_FFACE_CULL_CCW         (1 <<  0)
+#       define RADEON_FFACE_CULL_DIR_MASK    (1 <<  0)
+#       define RADEON_BFACE_CULL             (0 <<  1)
+#       define RADEON_BFACE_SOLID            (3 <<  1)
+#       define RADEON_FFACE_CULL             (0 <<  3)
+#       define RADEON_FFACE_SOLID            (3 <<  3)
+#       define RADEON_FFACE_CULL_MASK        (3 <<  3)
+#       define RADEON_BADVTX_CULL_DISABLE    (1 <<  5)
+#       define RADEON_FLAT_SHADE_VTX_0       (0 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_1       (1 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_2       (2 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_LAST    (3 <<  6)
+#       define RADEON_DIFFUSE_SHADE_SOLID    (0 <<  8)
+#       define RADEON_DIFFUSE_SHADE_FLAT     (1 <<  8)
+#       define RADEON_DIFFUSE_SHADE_GOURAUD  (2 <<  8)
+#       define RADEON_DIFFUSE_SHADE_MASK     (3 <<  8)
+#       define RADEON_ALPHA_SHADE_SOLID      (0 << 10)
+#       define RADEON_ALPHA_SHADE_FLAT       (1 << 10)
+#       define RADEON_ALPHA_SHADE_GOURAUD    (2 << 10)
+#       define RADEON_ALPHA_SHADE_MASK       (3 << 10)
+#       define RADEON_SPECULAR_SHADE_SOLID   (0 << 12)
+#       define RADEON_SPECULAR_SHADE_FLAT    (1 << 12)
+#       define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+#       define RADEON_SPECULAR_SHADE_MASK    (3 << 12)
+#       define RADEON_FOG_SHADE_SOLID        (0 << 14)
+#       define RADEON_FOG_SHADE_FLAT         (1 << 14)
+#       define RADEON_FOG_SHADE_GOURAUD      (2 << 14)
+#       define RADEON_FOG_SHADE_MASK         (3 << 14)
+#       define RADEON_ZBIAS_ENABLE_POINT     (1 << 16)
+#       define RADEON_ZBIAS_ENABLE_LINE      (1 << 17)
+#       define RADEON_ZBIAS_ENABLE_TRI       (1 << 18)
+#       define RADEON_WIDELINE_ENABLE        (1 << 20)
+#       define RADEON_VPORT_XY_XFORM_ENABLE  (1 << 24)
+#       define RADEON_VPORT_Z_XFORM_ENABLE   (1 << 25)
+#       define RADEON_VTX_PIX_CENTER_D3D     (0 << 27)
+#       define RADEON_VTX_PIX_CENTER_OGL     (1 << 27)
+#       define RADEON_ROUND_MODE_TRUNC       (0 << 28)
+#       define RADEON_ROUND_MODE_ROUND       (1 << 28)
+#       define RADEON_ROUND_MODE_ROUND_EVEN  (2 << 28)
+#       define RADEON_ROUND_MODE_ROUND_ODD   (3 << 28)
+#       define RADEON_ROUND_PREC_16TH_PIX    (0 << 30)
+#       define RADEON_ROUND_PREC_8TH_PIX     (1 << 30)
+#       define RADEON_ROUND_PREC_4TH_PIX     (2 << 30)
+#       define RADEON_ROUND_PREC_HALF_PIX    (3 << 30)
+#define R200_RE_CNTL				0x1c50
+#       define R200_STIPPLE_ENABLE		0x1
+#       define R200_SCISSOR_ENABLE		0x2
+#       define R200_PATTERN_ENABLE		0x4
+#       define R200_PERSPECTIVE_ENABLE		0x8
+#       define R200_POINT_SMOOTH		0x20
+#       define R200_VTX_STQ0_D3D		0x00010000
+#       define R200_VTX_STQ1_D3D		0x00040000
+#       define R200_VTX_STQ2_D3D		0x00100000
+#       define R200_VTX_STQ3_D3D		0x00400000
+#       define R200_VTX_STQ4_D3D		0x01000000
+#       define R200_VTX_STQ5_D3D		0x04000000
+#define RADEON_SE_CNTL_STATUS               0x2140
+#       define RADEON_VC_NO_SWAP            (0 << 0)
+#       define RADEON_VC_16BIT_SWAP         (1 << 0)
+#       define RADEON_VC_32BIT_SWAP         (2 << 0)
+#       define RADEON_VC_HALF_DWORD_SWAP    (3 << 0)
+#       define RADEON_TCL_BYPASS            (1 << 8)
+#define RADEON_SE_COORD_FMT                 0x1c50
+#       define RADEON_VTX_XY_PRE_MULT_1_OVER_W0  (1 <<  0)
+#       define RADEON_VTX_Z_PRE_MULT_1_OVER_W0   (1 <<  1)
+#       define RADEON_VTX_ST0_NONPARAMETRIC      (1 <<  8)
+#       define RADEON_VTX_ST1_NONPARAMETRIC      (1 <<  9)
+#       define RADEON_VTX_ST2_NONPARAMETRIC      (1 << 10)
+#       define RADEON_VTX_ST3_NONPARAMETRIC      (1 << 11)
+#       define RADEON_VTX_W0_NORMALIZE           (1 << 12)
+#       define RADEON_VTX_W0_IS_NOT_1_OVER_W0    (1 << 16)
+#       define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
+#       define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
+#       define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
+#       define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
+#       define RADEON_TEX1_W_ROUTING_USE_W0      (0 << 26)
+#       define RADEON_TEX1_W_ROUTING_USE_Q1      (1 << 26)
+#define RADEON_SE_LINE_WIDTH                0x1db8
+#define RADEON_SE_TCL_LIGHT_MODEL_CTL       0x226c
+#       define RADEON_LIGHTING_ENABLE              (1 << 0)
+#       define RADEON_LIGHT_IN_MODELSPACE          (1 << 1)
+#       define RADEON_LOCAL_VIEWER                 (1 << 2)
+#       define RADEON_NORMALIZE_NORMALS            (1 << 3)
+#       define RADEON_RESCALE_NORMALS              (1 << 4)
+#       define RADEON_SPECULAR_LIGHTS              (1 << 5)
+#       define RADEON_DIFFUSE_SPECULAR_COMBINE     (1 << 6)
+#       define RADEON_LIGHT_ALPHA                  (1 << 7)
+#       define RADEON_LOCAL_LIGHT_VEC_GL           (1 << 8)
+#       define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
+#       define RADEON_LM_SOURCE_STATE_PREMULT      0
+#       define RADEON_LM_SOURCE_STATE_MULT         1
+#       define RADEON_LM_SOURCE_VERTEX_DIFFUSE     2
+#       define RADEON_LM_SOURCE_VERTEX_SPECULAR    3
+#       define RADEON_EMISSIVE_SOURCE_SHIFT        16
+#       define RADEON_AMBIENT_SOURCE_SHIFT         18
+#       define RADEON_DIFFUSE_SOURCE_SHIFT         20
+#       define RADEON_SPECULAR_SOURCE_SHIFT        22
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED     0x2220
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN   0x2224
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE    0x2228
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA   0x222c
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED     0x2230
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN   0x2234
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE    0x2238
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA   0x223c
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED   0x2210
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE  0x2218
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED    0x2240
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN  0x2244
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE   0x2248
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA  0x224c
+#define RADEON_SE_TCL_MATRIX_SELECT_0       0x225c
+#       define RADEON_MODELVIEW_0_SHIFT        0
+#       define RADEON_MODELVIEW_1_SHIFT        4
+#       define RADEON_MODELVIEW_2_SHIFT        8
+#       define RADEON_MODELVIEW_3_SHIFT        12
+#       define RADEON_IT_MODELVIEW_0_SHIFT     16
+#       define RADEON_IT_MODELVIEW_1_SHIFT     20
+#       define RADEON_IT_MODELVIEW_2_SHIFT     24
+#       define RADEON_IT_MODELVIEW_3_SHIFT     28
+#define RADEON_SE_TCL_MATRIX_SELECT_1       0x2260
+#       define RADEON_MODELPROJECT_0_SHIFT     0
+#       define RADEON_MODELPROJECT_1_SHIFT     4
+#       define RADEON_MODELPROJECT_2_SHIFT     8
+#       define RADEON_MODELPROJECT_3_SHIFT     12
+#       define RADEON_TEXMAT_0_SHIFT           16
+#       define RADEON_TEXMAT_1_SHIFT           20
+#       define RADEON_TEXMAT_2_SHIFT           24
+#       define RADEON_TEXMAT_3_SHIFT           28
+
+
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT        0x2254
+#       define RADEON_TCL_VTX_W0                 (1 <<  0)
+#       define RADEON_TCL_VTX_FP_DIFFUSE         (1 <<  1)
+#       define RADEON_TCL_VTX_FP_ALPHA           (1 <<  2)
+#       define RADEON_TCL_VTX_PK_DIFFUSE         (1 <<  3)
+#       define RADEON_TCL_VTX_FP_SPEC            (1 <<  4)
+#       define RADEON_TCL_VTX_FP_FOG             (1 <<  5)
+#       define RADEON_TCL_VTX_PK_SPEC            (1 <<  6)
+#       define RADEON_TCL_VTX_ST0                (1 <<  7)
+#       define RADEON_TCL_VTX_ST1                (1 <<  8)
+#       define RADEON_TCL_VTX_Q1                 (1 <<  9)
+#       define RADEON_TCL_VTX_ST2                (1 << 10)
+#       define RADEON_TCL_VTX_Q2                 (1 << 11)
+#       define RADEON_TCL_VTX_ST3                (1 << 12)
+#       define RADEON_TCL_VTX_Q3                 (1 << 13)
+#       define RADEON_TCL_VTX_Q0                 (1 << 14)
+#       define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
+#       define RADEON_TCL_VTX_NORM0              (1 << 18)
+#       define RADEON_TCL_VTX_XY1                (1 << 27)
+#       define RADEON_TCL_VTX_Z1                 (1 << 28)
+#       define RADEON_TCL_VTX_W1                 (1 << 29)
+#       define RADEON_TCL_VTX_NORM1              (1 << 30)
+#       define RADEON_TCL_VTX_Z0                 (1 << 31)
+
+#define RADEON_SE_TCL_OUTPUT_VTX_SEL        0x2258
+#       define RADEON_TCL_COMPUTE_XYZW           (1 << 0)
+#       define RADEON_TCL_COMPUTE_DIFFUSE        (1 << 1)
+#       define RADEON_TCL_COMPUTE_SPECULAR       (1 << 2)
+#       define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
+#       define RADEON_TCL_FORCE_INORDER_PROC     (1 << 4)
+#       define RADEON_TCL_TEX_INPUT_TEX_0        0
+#       define RADEON_TCL_TEX_INPUT_TEX_1        1
+#       define RADEON_TCL_TEX_INPUT_TEX_2        2
+#       define RADEON_TCL_TEX_INPUT_TEX_3        3
+#       define RADEON_TCL_TEX_COMPUTED_TEX_0     8
+#       define RADEON_TCL_TEX_COMPUTED_TEX_1     9
+#       define RADEON_TCL_TEX_COMPUTED_TEX_2     10
+#       define RADEON_TCL_TEX_COMPUTED_TEX_3     11
+#       define RADEON_TCL_TEX_0_OUTPUT_SHIFT     16
+#       define RADEON_TCL_TEX_1_OUTPUT_SHIFT     20
+#       define RADEON_TCL_TEX_2_OUTPUT_SHIFT     24
+#       define RADEON_TCL_TEX_3_OUTPUT_SHIFT     28
+
+#define RADEON_SE_TCL_PER_LIGHT_CTL_0       0x2270
+#       define RADEON_LIGHT_0_ENABLE               (1 <<  0)
+#       define RADEON_LIGHT_0_ENABLE_AMBIENT       (1 <<  1)
+#       define RADEON_LIGHT_0_ENABLE_SPECULAR      (1 <<  2)
+#       define RADEON_LIGHT_0_IS_LOCAL             (1 <<  3)
+#       define RADEON_LIGHT_0_IS_SPOT              (1 <<  4)
+#       define RADEON_LIGHT_0_DUAL_CONE            (1 <<  5)
+#       define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN   (1 <<  6)
+#       define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 <<  7)
+#       define RADEON_LIGHT_0_SHIFT                0
+#       define RADEON_LIGHT_1_ENABLE               (1 << 16)
+#       define RADEON_LIGHT_1_ENABLE_AMBIENT       (1 << 17)
+#       define RADEON_LIGHT_1_ENABLE_SPECULAR      (1 << 18)
+#       define RADEON_LIGHT_1_IS_LOCAL             (1 << 19)
+#       define RADEON_LIGHT_1_IS_SPOT              (1 << 20)
+#       define RADEON_LIGHT_1_DUAL_CONE            (1 << 21)
+#       define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN   (1 << 22)
+#       define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
+#       define RADEON_LIGHT_1_SHIFT                16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_1       0x2274
+#       define RADEON_LIGHT_2_SHIFT            0
+#       define RADEON_LIGHT_3_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_2       0x2278
+#       define RADEON_LIGHT_4_SHIFT            0
+#       define RADEON_LIGHT_5_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_3       0x227c
+#       define RADEON_LIGHT_6_SHIFT            0
+#       define RADEON_LIGHT_7_SHIFT            16
+
+#define RADEON_SE_TCL_SHININESS             0x2250
+
+#define RADEON_SE_TCL_TEXTURE_PROC_CTL      0x2268
+#       define RADEON_TEXGEN_TEXMAT_0_ENABLE      (1 << 0)
+#       define RADEON_TEXGEN_TEXMAT_1_ENABLE      (1 << 1)
+#       define RADEON_TEXGEN_TEXMAT_2_ENABLE      (1 << 2)
+#       define RADEON_TEXGEN_TEXMAT_3_ENABLE      (1 << 3)
+#       define RADEON_TEXMAT_0_ENABLE             (1 << 4)
+#       define RADEON_TEXMAT_1_ENABLE             (1 << 5)
+#       define RADEON_TEXMAT_2_ENABLE             (1 << 6)
+#       define RADEON_TEXMAT_3_ENABLE             (1 << 7)
+#       define RADEON_TEXGEN_INPUT_MASK           0xf
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_0     0
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_1     1
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_2     2
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_3     3
+#       define RADEON_TEXGEN_INPUT_OBJ            4
+#       define RADEON_TEXGEN_INPUT_EYE            5
+#       define RADEON_TEXGEN_INPUT_EYE_NORMAL     6
+#       define RADEON_TEXGEN_INPUT_EYE_REFLECT    7
+#       define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
+#       define RADEON_TEXGEN_0_INPUT_SHIFT        16
+#       define RADEON_TEXGEN_1_INPUT_SHIFT        20
+#       define RADEON_TEXGEN_2_INPUT_SHIFT        24
+#       define RADEON_TEXGEN_3_INPUT_SHIFT        28
+
+#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL    0x2264
+#       define RADEON_UCP_IN_CLIP_SPACE            (1 <<  0)
+#       define RADEON_UCP_IN_MODEL_SPACE           (1 <<  1)
+#       define RADEON_UCP_ENABLE_0                 (1 <<  2)
+#       define RADEON_UCP_ENABLE_1                 (1 <<  3)
+#       define RADEON_UCP_ENABLE_2                 (1 <<  4)
+#       define RADEON_UCP_ENABLE_3                 (1 <<  5)
+#       define RADEON_UCP_ENABLE_4                 (1 <<  6)
+#       define RADEON_UCP_ENABLE_5                 (1 <<  7)
+#       define RADEON_TCL_FOG_MASK                 (3 <<  8)
+#       define RADEON_TCL_FOG_DISABLE              (0 <<  8)
+#       define RADEON_TCL_FOG_EXP                  (1 <<  8)
+#       define RADEON_TCL_FOG_EXP2                 (2 <<  8)
+#       define RADEON_TCL_FOG_LINEAR               (3 <<  8)
+#       define RADEON_RNG_BASED_FOG                (1 << 10)
+#       define RADEON_LIGHT_TWOSIDE                (1 << 11)
+#       define RADEON_BLEND_OP_COUNT_MASK          (7 << 12)
+#       define RADEON_BLEND_OP_COUNT_SHIFT         12
+#       define RADEON_POSITION_BLEND_OP_ENABLE     (1 << 16)
+#       define RADEON_NORMAL_BLEND_OP_ENABLE       (1 << 17)
+#       define RADEON_VERTEX_BLEND_SRC_0_PRIMARY   (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_1_PRIMARY   (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_2_PRIMARY   (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_3_PRIMARY   (1 << 21)
+#       define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
+#       define RADEON_VERTEX_BLEND_WGT_MINUS_ONE   (1 << 22)
+#       define RADEON_CULL_FRONT_IS_CW             (0 << 28)
+#       define RADEON_CULL_FRONT_IS_CCW            (1 << 28)
+#       define RADEON_CULL_FRONT                   (1 << 29)
+#       define RADEON_CULL_BACK                    (1 << 30)
+#       define RADEON_FORCE_W_TO_ONE               (1 << 31)
+
+#define RADEON_SE_VPORT_XSCALE              0x1d98
+#define RADEON_SE_VPORT_XOFFSET             0x1d9c
+#define RADEON_SE_VPORT_YSCALE              0x1da0
+#define RADEON_SE_VPORT_YOFFSET             0x1da4
+#define RADEON_SE_VPORT_ZSCALE              0x1da8
+#define RADEON_SE_VPORT_ZOFFSET             0x1dac
+#define RADEON_SE_ZBIAS_FACTOR              0x1db0
+#define RADEON_SE_ZBIAS_CONSTANT            0x1db4
+
+#define RADEON_SE_VTX_FMT                   0x2080
+#       define RADEON_SE_VTX_FMT_XY         0x00000000
+#       define RADEON_SE_VTX_FMT_W0         0x00000001
+#       define RADEON_SE_VTX_FMT_FPCOLOR    0x00000002
+#       define RADEON_SE_VTX_FMT_FPALPHA    0x00000004
+#       define RADEON_SE_VTX_FMT_PKCOLOR    0x00000008
+#       define RADEON_SE_VTX_FMT_FPSPEC     0x00000010
+#       define RADEON_SE_VTX_FMT_FPFOG      0x00000020
+#       define RADEON_SE_VTX_FMT_PKSPEC     0x00000040
+#       define RADEON_SE_VTX_FMT_ST0        0x00000080
+#       define RADEON_SE_VTX_FMT_ST1        0x00000100
+#       define RADEON_SE_VTX_FMT_Q1         0x00000200
+#       define RADEON_SE_VTX_FMT_ST2        0x00000400
+#       define RADEON_SE_VTX_FMT_Q2         0x00000800
+#       define RADEON_SE_VTX_FMT_ST3        0x00001000
+#       define RADEON_SE_VTX_FMT_Q3         0x00002000
+#       define RADEON_SE_VTX_FMT_Q0         0x00004000
+#       define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK  0x00038000
+#       define RADEON_SE_VTX_FMT_N0         0x00040000
+#       define RADEON_SE_VTX_FMT_XY1        0x08000000
+#       define RADEON_SE_VTX_FMT_Z1         0x10000000
+#       define RADEON_SE_VTX_FMT_W1         0x20000000
+#       define RADEON_SE_VTX_FMT_N1         0x40000000
+#       define RADEON_SE_VTX_FMT_Z          0x80000000
+
+#define RADEON_SE_VF_CNTL                             0x2084
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST         1
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST          2
+#       define RADEON_VF_PRIM_TYPE_LINE_STRIP         3
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST      4
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN       5
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP     6
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG      7
+#       define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST     8
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST_3       9
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST_3        10
+#       define RADEON_VF_PRIM_TYPE_SPIRIT_LIST        11
+#       define RADEON_VF_PRIM_TYPE_LINE_LOOP          12
+#       define RADEON_VF_PRIM_TYPE_QUAD_LIST          13
+#       define RADEON_VF_PRIM_TYPE_QUAD_STRIP         14
+#       define RADEON_VF_PRIM_TYPE_POLYGON            15
+#       define RADEON_VF_PRIM_WALK_STATE              (0<<4)
+#       define RADEON_VF_PRIM_WALK_INDEX              (1<<4)
+#       define RADEON_VF_PRIM_WALK_LIST               (2<<4)
+#       define RADEON_VF_PRIM_WALK_DATA               (3<<4)
+#       define RADEON_VF_COLOR_ORDER_RGBA             (1<<6)
+#       define RADEON_VF_RADEON_MODE                  (1<<8)
+#       define RADEON_VF_TCL_OUTPUT_CTL_ENA           (1<<9)
+#       define RADEON_VF_PROG_STREAM_ENA              (1<<10)
+#       define RADEON_VF_INDEX_SIZE_SHIFT             11
+#       define RADEON_VF_NUM_VERTICES_SHIFT           16
+
+#define RADEON_SE_PORT_DATA0			0x2000
+
+#define R200_SE_VAP_CNTL			0x2080
+#       define R200_VAP_TCL_ENABLE		0x00000001
+#       define R200_VAP_SINGLE_BUF_STATE_ENABLE	0x00000010
+#       define R200_VAP_FORCE_W_TO_ONE		0x00010000
+#       define R200_VAP_D3D_TEX_DEFAULT		0x00020000
+#       define R200_VAP_VF_MAX_VTX_NUM__SHIFT	18
+#       define R200_VAP_VF_MAX_VTX_NUM		(9 << 18)
+#       define R200_VAP_DX_CLIP_SPACE_DEF	0x00400000
+#define R200_VF_MAX_VTX_INDX			0x210c
+#define R200_VF_MIN_VTX_INDX			0x2110
+#define R200_SE_VTE_CNTL			0x20b0
+#       define R200_VPORT_X_SCALE_ENA			0x00000001
+#       define R200_VPORT_X_OFFSET_ENA			0x00000002
+#       define R200_VPORT_Y_SCALE_ENA			0x00000004
+#       define R200_VPORT_Y_OFFSET_ENA			0x00000008
+#       define R200_VPORT_Z_SCALE_ENA			0x00000010
+#       define R200_VPORT_Z_OFFSET_ENA			0x00000020
+#       define R200_VTX_XY_FMT				0x00000100
+#       define R200_VTX_Z_FMT				0x00000200
+#       define R200_VTX_W0_FMT				0x00000400
+#       define R200_VTX_W0_NORMALIZE			0x00000800
+#       define R200_VTX_ST_DENORMALIZED		0x00001000
+#define R200_SE_VAP_CNTL_STATUS			0x2140
+#       define R200_VC_NO_SWAP			(0 << 0)
+#       define R200_VC_16BIT_SWAP		(1 << 0)
+#       define R200_VC_32BIT_SWAP		(2 << 0)
+#define R200_PP_TXFILTER_0			0x2c00
+#define R200_PP_TXFILTER_1			0x2c20
+#define R200_PP_TXFILTER_2			0x2c40
+#define R200_PP_TXFILTER_3			0x2c60
+#define R200_PP_TXFILTER_4			0x2c80
+#define R200_PP_TXFILTER_5			0x2ca0
+#       define R200_MAG_FILTER_NEAREST		(0  <<  0)
+#       define R200_MAG_FILTER_LINEAR		(1  <<  0)
+#       define R200_MAG_FILTER_MASK		(1  <<  0)
+#       define R200_MIN_FILTER_NEAREST		(0  <<  1)
+#       define R200_MIN_FILTER_LINEAR		(1  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST	(8  <<  1)
+#       define R200_MIN_FILTER_ANISO_LINEAR	(9  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 <<  1)
+#       define R200_MIN_FILTER_MASK		(15 <<  1)
+#       define R200_MAX_ANISO_1_TO_1		(0  <<  5)
+#       define R200_MAX_ANISO_2_TO_1		(1  <<  5)
+#       define R200_MAX_ANISO_4_TO_1		(2  <<  5)
+#       define R200_MAX_ANISO_8_TO_1		(3  <<  5)
+#       define R200_MAX_ANISO_16_TO_1		(4  <<  5)
+#       define R200_MAX_ANISO_MASK		(7  <<  5)
+#       define R200_MAX_MIP_LEVEL_MASK		(0x0f << 16)
+#       define R200_MAX_MIP_LEVEL_SHIFT		16
+#       define R200_YUV_TO_RGB			(1  << 20)
+#       define R200_YUV_TEMPERATURE_COOL	(0  << 21)
+#       define R200_YUV_TEMPERATURE_HOT		(1  << 21)
+#       define R200_YUV_TEMPERATURE_MASK	(1  << 21)
+#       define R200_WRAPEN_S			(1  << 22)
+#       define R200_CLAMP_S_WRAP		(0  << 23)
+#       define R200_CLAMP_S_MIRROR		(1  << 23)
+#       define R200_CLAMP_S_CLAMP_LAST		(2  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_LAST	(3  << 23)
+#       define R200_CLAMP_S_CLAMP_BORDER	(4  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_BORDER	(5  << 23)
+#       define R200_CLAMP_S_CLAMP_GL		(6  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_GL	(7  << 23)
+#       define R200_CLAMP_S_MASK		(7  << 23)
+#       define R200_WRAPEN_T			(1  << 26)
+#       define R200_CLAMP_T_WRAP		(0  << 27)
+#       define R200_CLAMP_T_MIRROR		(1  << 27)
+#       define R200_CLAMP_T_CLAMP_LAST		(2  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_LAST	(3  << 27)
+#       define R200_CLAMP_T_CLAMP_BORDER	(4  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_BORDER	(5  << 27)
+#       define R200_CLAMP_T_CLAMP_GL		(6  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_GL	(7  << 27)
+#       define R200_CLAMP_T_MASK		(7  << 27)
+#       define R200_KILL_LT_ZERO		(1  << 30)
+#       define R200_BORDER_MODE_OGL		(0  << 31)
+#       define R200_BORDER_MODE_D3D		(1  << 31)
+#define R200_PP_TXFORMAT_0			0x2c04
+#define R200_PP_TXFORMAT_1			0x2c24
+#define R200_PP_TXFORMAT_2			0x2c44
+#define R200_PP_TXFORMAT_3			0x2c64
+#define R200_PP_TXFORMAT_4			0x2c84
+#define R200_PP_TXFORMAT_5			0x2ca4
+#       define R200_TXFORMAT_I8			(0 << 0)
+#       define R200_TXFORMAT_AI88		(1 << 0)
+#       define R200_TXFORMAT_RGB332		(2 << 0)
+#       define R200_TXFORMAT_ARGB1555		(3 << 0)
+#       define R200_TXFORMAT_RGB565		(4 << 0)
+#       define R200_TXFORMAT_ARGB4444		(5 << 0)
+#       define R200_TXFORMAT_ARGB8888		(6 << 0)
+#       define R200_TXFORMAT_RGBA8888		(7 << 0)
+#       define R200_TXFORMAT_Y8			(8 << 0)
+#       define R200_TXFORMAT_AVYU4444		(9 << 0)
+#       define R200_TXFORMAT_VYUY422		(10 << 0)
+#       define R200_TXFORMAT_YVYU422		(11 << 0)
+#       define R200_TXFORMAT_DXT1		(12 << 0)
+#       define R200_TXFORMAT_DXT23		(14 << 0)
+#       define R200_TXFORMAT_DXT45		(15 << 0)
+#       define R200_TXFORMAT_DVDU88		(18 << 0)
+#       define R200_TXFORMAT_LDVDU655		(19 << 0)
+#       define R200_TXFORMAT_LDVDU8888		(20 << 0)
+#       define R200_TXFORMAT_GR1616		(21 << 0)
+#       define R200_TXFORMAT_ABGR8888		(22 << 0)
+#       define R200_TXFORMAT_BGR111110		(23 << 0)
+#       define R200_TXFORMAT_FORMAT_MASK	(31 <<	0)
+#       define R200_TXFORMAT_FORMAT_SHIFT	0
+#       define R200_TXFORMAT_ALPHA_IN_MAP	(1 << 6)
+#       define R200_TXFORMAT_NON_POWER2		(1 << 7)
+#       define R200_TXFORMAT_WIDTH_MASK		(15 <<	8)
+#       define R200_TXFORMAT_WIDTH_SHIFT	8
+#       define R200_TXFORMAT_HEIGHT_MASK	(15 << 12)
+#       define R200_TXFORMAT_HEIGHT_SHIFT	12
+#       define R200_TXFORMAT_F5_WIDTH_MASK	(15 << 16)	/* cube face 5 */
+#       define R200_TXFORMAT_F5_WIDTH_SHIFT	16
+#       define R200_TXFORMAT_F5_HEIGHT_MASK	(15 << 20)
+#       define R200_TXFORMAT_F5_HEIGHT_SHIFT	20
+#       define R200_TXFORMAT_ST_ROUTE_STQ0	(0 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ1	(1 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ2	(2 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ3	(3 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ4	(4 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ5	(5 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_MASK	(7 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_SHIFT	24
+#       define R200_TXFORMAT_LOOKUP_DISABLE	(1 << 27)
+#       define R200_TXFORMAT_ALPHA_MASK_ENABLE	(1 << 28)
+#       define R200_TXFORMAT_CHROMA_KEY_ENABLE	(1 << 29)
+#       define R200_TXFORMAT_CUBIC_MAP_ENABLE		(1 << 30)
+#define R200_PP_TXFORMAT_X_0                    0x2c08
+#define R200_PP_TXFORMAT_X_1                    0x2c28
+#define R200_PP_TXFORMAT_X_2                    0x2c48
+#define R200_PP_TXFORMAT_X_3                    0x2c68
+#define R200_PP_TXFORMAT_X_4                    0x2c88
+#define R200_PP_TXFORMAT_X_5                    0x2ca8
+
+#define R200_PP_TXSIZE_0			0x2c0c /* NPOT only */
+#define R200_PP_TXSIZE_1			0x2c2c /* NPOT only */
+#define R200_PP_TXSIZE_2			0x2c4c /* NPOT only */
+#define R200_PP_TXSIZE_3			0x2c6c /* NPOT only */
+#define R200_PP_TXSIZE_4			0x2c8c /* NPOT only */
+#define R200_PP_TXSIZE_5			0x2cac /* NPOT only */
+
+#define R200_PP_TXPITCH_0                       0x2c10 /* NPOT only */
+#define R200_PP_TXPITCH_1			0x2c30 /* NPOT only */
+#define R200_PP_TXPITCH_2			0x2c50 /* NPOT only */
+#define R200_PP_TXPITCH_3			0x2c70 /* NPOT only */
+#define R200_PP_TXPITCH_4			0x2c90 /* NPOT only */
+#define R200_PP_TXPITCH_5			0x2cb0 /* NPOT only */
+
+#define R200_PP_CUBIC_FACES_0			0x2c18
+#define R200_PP_CUBIC_FACES_1			0x2c38
+#define R200_PP_CUBIC_FACES_2			0x2c58
+#define R200_PP_CUBIC_FACES_3			0x2c78
+#define R200_PP_CUBIC_FACES_4			0x2c98
+#define R200_PP_CUBIC_FACES_5			0x2cb8
+
+#define R200_PP_TXOFFSET_0			0x2d00
+#       define R200_TXO_ENDIAN_NO_SWAP		(0 << 0)
+#       define R200_TXO_ENDIAN_BYTE_SWAP	(1 << 0)
+#       define R200_TXO_ENDIAN_WORD_SWAP	(2 << 0)
+#       define R200_TXO_ENDIAN_HALFDW_SWAP	(3 << 0)
+#       define R200_TXO_MACRO_LINEAR		(0 << 2)
+#       define R200_TXO_MACRO_TILE		(1 << 2)
+#       define R200_TXO_MICRO_LINEAR		(0 << 3)
+#       define R200_TXO_MICRO_TILE		(1 << 3)
+#       define R200_TXO_OFFSET_MASK		0xffffffe0
+#       define R200_TXO_OFFSET_SHIFT		5
+#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
+
+#define R200_PP_TXOFFSET_1			0x2d18
+#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
+
+#define R200_PP_TXOFFSET_2			0x2d30
+#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
+
+#define R200_PP_TXOFFSET_3			0x2d48
+#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
+#define R200_PP_TXOFFSET_4			0x2d60
+#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
+#define R200_PP_TXOFFSET_5			0x2d78
+#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
+
+#define R200_PP_TFACTOR_0			0x2ee0
+#define R200_PP_TFACTOR_1			0x2ee4
+#define R200_PP_TFACTOR_2			0x2ee8
+#define R200_PP_TFACTOR_3			0x2eec
+#define R200_PP_TFACTOR_4			0x2ef0
+#define R200_PP_TFACTOR_5			0x2ef4
+
+#define R200_PP_TXCBLEND_0			0x2f00
+#       define R200_TXC_ARG_A_ZERO		(0)
+#       define R200_TXC_ARG_A_CURRENT_COLOR	(2)
+#       define R200_TXC_ARG_A_CURRENT_ALPHA	(3)
+#       define R200_TXC_ARG_A_DIFFUSE_COLOR	(4)
+#       define R200_TXC_ARG_A_DIFFUSE_ALPHA	(5)
+#       define R200_TXC_ARG_A_SPECULAR_COLOR	(6)
+#       define R200_TXC_ARG_A_SPECULAR_ALPHA	(7)
+#       define R200_TXC_ARG_A_TFACTOR_COLOR	(8)
+#       define R200_TXC_ARG_A_TFACTOR_ALPHA	(9)
+#       define R200_TXC_ARG_A_R0_COLOR		(10)
+#       define R200_TXC_ARG_A_R0_ALPHA		(11)
+#       define R200_TXC_ARG_A_R1_COLOR		(12)
+#       define R200_TXC_ARG_A_R1_ALPHA		(13)
+#       define R200_TXC_ARG_A_R2_COLOR		(14)
+#       define R200_TXC_ARG_A_R2_ALPHA		(15)
+#       define R200_TXC_ARG_A_R3_COLOR		(16)
+#       define R200_TXC_ARG_A_R3_ALPHA		(17)
+#       define R200_TXC_ARG_A_R4_COLOR		(18)
+#       define R200_TXC_ARG_A_R4_ALPHA		(19)
+#       define R200_TXC_ARG_A_R5_COLOR		(20)
+#       define R200_TXC_ARG_A_R5_ALPHA		(21)
+#       define R200_TXC_ARG_A_TFACTOR1_COLOR	(26)
+#       define R200_TXC_ARG_A_TFACTOR1_ALPHA	(27)
+#       define R200_TXC_ARG_A_MASK		(31 << 0)
+#       define R200_TXC_ARG_A_SHIFT		0
+#       define R200_TXC_ARG_B_ZERO		(0 << 5)
+#       define R200_TXC_ARG_B_CURRENT_COLOR	(2 << 5)
+#       define R200_TXC_ARG_B_CURRENT_ALPHA	(3 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_COLOR	(4 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_ALPHA	(5 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_COLOR	(6 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_ALPHA	(7 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_COLOR	(8 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_ALPHA	(9 << 5)
+#       define R200_TXC_ARG_B_R0_COLOR		(10 << 5)
+#       define R200_TXC_ARG_B_R0_ALPHA		(11 << 5)
+#       define R200_TXC_ARG_B_R1_COLOR		(12 << 5)
+#       define R200_TXC_ARG_B_R1_ALPHA		(13 << 5)
+#       define R200_TXC_ARG_B_R2_COLOR		(14 << 5)
+#       define R200_TXC_ARG_B_R2_ALPHA		(15 << 5)
+#       define R200_TXC_ARG_B_R3_COLOR		(16 << 5)
+#       define R200_TXC_ARG_B_R3_ALPHA		(17 << 5)
+#       define R200_TXC_ARG_B_R4_COLOR		(18 << 5)
+#       define R200_TXC_ARG_B_R4_ALPHA		(19 << 5)
+#       define R200_TXC_ARG_B_R5_COLOR		(20 << 5)
+#       define R200_TXC_ARG_B_R5_ALPHA		(21 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_COLOR	(26 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_ALPHA	(27 << 5)
+#       define R200_TXC_ARG_B_MASK		(31 << 5)
+#       define R200_TXC_ARG_B_SHIFT		5
+#       define R200_TXC_ARG_C_ZERO		(0 << 10)
+#       define R200_TXC_ARG_C_CURRENT_COLOR	(2 << 10)
+#       define R200_TXC_ARG_C_CURRENT_ALPHA	(3 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_COLOR	(4 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_ALPHA	(5 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_COLOR	(6 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_ALPHA	(7 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_COLOR	(8 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_ALPHA	(9 << 10)
+#       define R200_TXC_ARG_C_R0_COLOR		(10 << 10)
+#       define R200_TXC_ARG_C_R0_ALPHA		(11 << 10)
+#       define R200_TXC_ARG_C_R1_COLOR		(12 << 10)
+#       define R200_TXC_ARG_C_R1_ALPHA		(13 << 10)
+#       define R200_TXC_ARG_C_R2_COLOR		(14 << 10)
+#       define R200_TXC_ARG_C_R2_ALPHA		(15 << 10)
+#       define R200_TXC_ARG_C_R3_COLOR		(16 << 10)
+#       define R200_TXC_ARG_C_R3_ALPHA		(17 << 10)
+#       define R200_TXC_ARG_C_R4_COLOR		(18 << 10)
+#       define R200_TXC_ARG_C_R4_ALPHA		(19 << 10)
+#       define R200_TXC_ARG_C_R5_COLOR		(20 << 10)
+#       define R200_TXC_ARG_C_R5_ALPHA		(21 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_COLOR	(26 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_ALPHA	(27 << 10)
+#       define R200_TXC_ARG_C_MASK		(31 << 10)
+#       define R200_TXC_ARG_C_SHIFT		10
+#       define R200_TXC_COMP_ARG_A		(1 << 16)
+#       define R200_TXC_COMP_ARG_A_SHIFT	(16)
+#       define R200_TXC_BIAS_ARG_A		(1 << 17)
+#       define R200_TXC_SCALE_ARG_A		(1 << 18)
+#       define R200_TXC_NEG_ARG_A		(1 << 19)
+#       define R200_TXC_COMP_ARG_B		(1 << 20)
+#       define R200_TXC_COMP_ARG_B_SHIFT	(20)
+#       define R200_TXC_BIAS_ARG_B		(1 << 21)
+#       define R200_TXC_SCALE_ARG_B		(1 << 22)
+#       define R200_TXC_NEG_ARG_B		(1 << 23)
+#       define R200_TXC_COMP_ARG_C		(1 << 24)
+#       define R200_TXC_COMP_ARG_C_SHIFT	(24)
+#       define R200_TXC_BIAS_ARG_C		(1 << 25)
+#       define R200_TXC_SCALE_ARG_C		(1 << 26)
+#       define R200_TXC_NEG_ARG_C		(1 << 27)
+#       define R200_TXC_OP_MADD			(0 << 28)
+#       define R200_TXC_OP_CND0			(2 << 28)
+#       define R200_TXC_OP_LERP			(3 << 28)
+#       define R200_TXC_OP_DOT3			(4 << 28)
+#       define R200_TXC_OP_DOT4			(5 << 28)
+#       define R200_TXC_OP_CONDITIONAL		(6 << 28)
+#       define R200_TXC_OP_DOT2_ADD		(7 << 28)
+#       define R200_TXC_OP_MASK			(7 << 28)
+#define R200_PP_TXCBLEND2_0		0x2f04
+#       define R200_TXC_TFACTOR_SEL_SHIFT	0
+#       define R200_TXC_TFACTOR_SEL_MASK	0x7
+#       define R200_TXC_TFACTOR1_SEL_SHIFT	4
+#       define R200_TXC_TFACTOR1_SEL_MASK	(0x7 << 4)
+#       define R200_TXC_SCALE_SHIFT		8
+#       define R200_TXC_SCALE_MASK		(7 << 8)
+#       define R200_TXC_SCALE_1X		(0 << 8)
+#       define R200_TXC_SCALE_2X		(1 << 8)
+#       define R200_TXC_SCALE_4X		(2 << 8)
+#       define R200_TXC_SCALE_8X		(3 << 8)
+#       define R200_TXC_SCALE_INV2		(5 << 8)
+#       define R200_TXC_SCALE_INV4		(6 << 8)
+#       define R200_TXC_SCALE_INV8		(7 << 8)
+#       define R200_TXC_CLAMP_SHIFT		12
+#       define R200_TXC_CLAMP_MASK		(3 << 12)
+#       define R200_TXC_CLAMP_WRAP		(0 << 12)
+#       define R200_TXC_CLAMP_0_1		(1 << 12)
+#       define R200_TXC_CLAMP_8_8		(2 << 12)
+#       define R200_TXC_OUTPUT_REG_MASK		(7 << 16)
+#       define R200_TXC_OUTPUT_REG_NONE		(0 << 16)
+#       define R200_TXC_OUTPUT_REG_R0		(1 << 16)
+#       define R200_TXC_OUTPUT_REG_R1		(2 << 16)
+#       define R200_TXC_OUTPUT_REG_R2		(3 << 16)
+#       define R200_TXC_OUTPUT_REG_R3		(4 << 16)
+#       define R200_TXC_OUTPUT_REG_R4		(5 << 16)
+#       define R200_TXC_OUTPUT_REG_R5		(6 << 16)
+#       define R200_TXC_OUTPUT_MASK_MASK	(7 << 20)
+#       define R200_TXC_OUTPUT_MASK_RGB		(0 << 20)
+#       define R200_TXC_OUTPUT_MASK_RG		(1 << 20)
+#       define R200_TXC_OUTPUT_MASK_RB		(2 << 20)
+#       define R200_TXC_OUTPUT_MASK_R		(3 << 20)
+#       define R200_TXC_OUTPUT_MASK_GB		(4 << 20)
+#       define R200_TXC_OUTPUT_MASK_G		(5 << 20)
+#       define R200_TXC_OUTPUT_MASK_B		(6 << 20)
+#       define R200_TXC_OUTPUT_MASK_NONE	(7 << 20)
+#       define R200_TXC_REPL_NORMAL		0
+#       define R200_TXC_REPL_RED		1
+#       define R200_TXC_REPL_GREEN		2
+#       define R200_TXC_REPL_BLUE		3
+#       define R200_TXC_REPL_ARG_A_SHIFT	26
+#       define R200_TXC_REPL_ARG_A_MASK		(3 << 26)
+#       define R200_TXC_REPL_ARG_B_SHIFT	28
+#       define R200_TXC_REPL_ARG_B_MASK		(3 << 28)
+#       define R200_TXC_REPL_ARG_C_SHIFT	30
+#       define R200_TXC_REPL_ARG_C_MASK		(3 << 30)
+#define R200_PP_TXABLEND_0			0x2f08
+#       define R200_TXA_ARG_A_ZERO		(0)
+#       define R200_TXA_ARG_A_CURRENT_ALPHA	(2) /* guess */
+#       define R200_TXA_ARG_A_CURRENT_BLUE	(3) /* guess */
+#       define R200_TXA_ARG_A_DIFFUSE_ALPHA	(4)
+#       define R200_TXA_ARG_A_DIFFUSE_BLUE	(5)
+#       define R200_TXA_ARG_A_SPECULAR_ALPHA	(6)
+#       define R200_TXA_ARG_A_SPECULAR_BLUE	(7)
+#       define R200_TXA_ARG_A_TFACTOR_ALPHA	(8)
+#       define R200_TXA_ARG_A_TFACTOR_BLUE	(9)
+#       define R200_TXA_ARG_A_R0_ALPHA		(10)
+#       define R200_TXA_ARG_A_R0_BLUE		(11)
+#       define R200_TXA_ARG_A_R1_ALPHA		(12)
+#       define R200_TXA_ARG_A_R1_BLUE		(13)
+#       define R200_TXA_ARG_A_R2_ALPHA		(14)
+#       define R200_TXA_ARG_A_R2_BLUE		(15)
+#       define R200_TXA_ARG_A_R3_ALPHA		(16)
+#       define R200_TXA_ARG_A_R3_BLUE		(17)
+#       define R200_TXA_ARG_A_R4_ALPHA		(18)
+#       define R200_TXA_ARG_A_R4_BLUE		(19)
+#       define R200_TXA_ARG_A_R5_ALPHA		(20)
+#       define R200_TXA_ARG_A_R5_BLUE		(21)
+#       define R200_TXA_ARG_A_TFACTOR1_ALPHA	(26)
+#       define R200_TXA_ARG_A_TFACTOR1_BLUE	(27)
+#       define R200_TXA_ARG_A_MASK		(31 << 0)
+#       define R200_TXA_ARG_A_SHIFT		0
+#       define R200_TXA_ARG_B_ZERO		(0 << 5)
+#       define R200_TXA_ARG_B_CURRENT_ALPHA	(2 << 5) /* guess */
+#       define R200_TXA_ARG_B_CURRENT_BLUE	(3 << 5) /* guess */
+#       define R200_TXA_ARG_B_DIFFUSE_ALPHA	(4 << 5)
+#       define R200_TXA_ARG_B_DIFFUSE_BLUE	(5 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_ALPHA	(6 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_BLUE	(7 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_ALPHA	(8 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_BLUE	(9 << 5)
+#       define R200_TXA_ARG_B_R0_ALPHA		(10 << 5)
+#       define R200_TXA_ARG_B_R0_BLUE		(11 << 5)
+#       define R200_TXA_ARG_B_R1_ALPHA		(12 << 5)
+#       define R200_TXA_ARG_B_R1_BLUE		(13 << 5)
+#       define R200_TXA_ARG_B_R2_ALPHA		(14 << 5)
+#       define R200_TXA_ARG_B_R2_BLUE		(15 << 5)
+#       define R200_TXA_ARG_B_R3_ALPHA		(16 << 5)
+#       define R200_TXA_ARG_B_R3_BLUE		(17 << 5)
+#       define R200_TXA_ARG_B_R4_ALPHA		(18 << 5)
+#       define R200_TXA_ARG_B_R4_BLUE		(19 << 5)
+#       define R200_TXA_ARG_B_R5_ALPHA		(20 << 5)
+#       define R200_TXA_ARG_B_R5_BLUE		(21 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_ALPHA	(26 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_BLUE	(27 << 5)
+#       define R200_TXA_ARG_B_MASK		(31 << 5)
+#       define R200_TXA_ARG_B_SHIFT			5
+#       define R200_TXA_ARG_C_ZERO		(0 << 10)
+#       define R200_TXA_ARG_C_CURRENT_ALPHA	(2 << 10) /* guess */
+#       define R200_TXA_ARG_C_CURRENT_BLUE	(3 << 10) /* guess */
+#       define R200_TXA_ARG_C_DIFFUSE_ALPHA	(4 << 10)
+#       define R200_TXA_ARG_C_DIFFUSE_BLUE	(5 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_ALPHA	(6 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_BLUE	(7 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_ALPHA	(8 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_BLUE	(9 << 10)
+#       define R200_TXA_ARG_C_R0_ALPHA		(10 << 10)
+#       define R200_TXA_ARG_C_R0_BLUE		(11 << 10)
+#       define R200_TXA_ARG_C_R1_ALPHA		(12 << 10)
+#       define R200_TXA_ARG_C_R1_BLUE		(13 << 10)
+#       define R200_TXA_ARG_C_R2_ALPHA		(14 << 10)
+#       define R200_TXA_ARG_C_R2_BLUE		(15 << 10)
+#       define R200_TXA_ARG_C_R3_ALPHA		(16 << 10)
+#       define R200_TXA_ARG_C_R3_BLUE		(17 << 10)
+#       define R200_TXA_ARG_C_R4_ALPHA		(18 << 10)
+#       define R200_TXA_ARG_C_R4_BLUE		(19 << 10)
+#       define R200_TXA_ARG_C_R5_ALPHA		(20 << 10)
+#       define R200_TXA_ARG_C_R5_BLUE		(21 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_ALPHA	(26 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_BLUE	(27 << 10)
+#       define R200_TXA_ARG_C_MASK		(31 << 10)
+#       define R200_TXA_ARG_C_SHIFT		10
+#       define R200_TXA_COMP_ARG_A		(1 << 16)
+#       define R200_TXA_COMP_ARG_A_SHIFT	(16)
+#       define R200_TXA_BIAS_ARG_A		(1 << 17)
+#       define R200_TXA_SCALE_ARG_A		(1 << 18)
+#       define R200_TXA_NEG_ARG_A		(1 << 19)
+#       define R200_TXA_COMP_ARG_B		(1 << 20)
+#       define R200_TXA_COMP_ARG_B_SHIFT	(20)
+#       define R200_TXA_BIAS_ARG_B		(1 << 21)
+#       define R200_TXA_SCALE_ARG_B		(1 << 22)
+#       define R200_TXA_NEG_ARG_B		(1 << 23)
+#       define R200_TXA_COMP_ARG_C		(1 << 24)
+#       define R200_TXA_COMP_ARG_C_SHIFT	(24)
+#       define R200_TXA_BIAS_ARG_C		(1 << 25)
+#       define R200_TXA_SCALE_ARG_C		(1 << 26)
+#       define R200_TXA_NEG_ARG_C		(1 << 27)
+#       define R200_TXA_OP_MADD			(0 << 28)
+#       define R200_TXA_OP_CND0			(2 << 28)
+#       define R200_TXA_OP_LERP			(3 << 28)
+#       define R200_TXA_OP_CONDITIONAL		(6 << 28)
+#       define R200_TXA_OP_MASK			(7 << 28)
+#define R200_PP_TXABLEND2_0			0x2f0c
+#       define R200_TXA_TFACTOR_SEL_SHIFT	0
+#       define R200_TXA_TFACTOR_SEL_MASK	0x7
+#       define R200_TXA_TFACTOR1_SEL_SHIFT	4
+#       define R200_TXA_TFACTOR1_SEL_MASK	(0x7 << 4)
+#       define R200_TXA_SCALE_SHIFT		8
+#       define R200_TXA_SCALE_MASK		(7 << 8)
+#       define R200_TXA_SCALE_1X		(0 << 8)
+#       define R200_TXA_SCALE_2X		(1 << 8)
+#       define R200_TXA_SCALE_4X		(2 << 8)
+#       define R200_TXA_SCALE_8X		(3 << 8)
+#       define R200_TXA_SCALE_INV2		(5 << 8)
+#       define R200_TXA_SCALE_INV4		(6 << 8)
+#       define R200_TXA_SCALE_INV8		(7 << 8)
+#       define R200_TXA_CLAMP_SHIFT		12
+#       define R200_TXA_CLAMP_MASK		(3 << 12)
+#       define R200_TXA_CLAMP_WRAP		(0 << 12)
+#       define R200_TXA_CLAMP_0_1		(1 << 12)
+#       define R200_TXA_CLAMP_8_8		(2 << 12)
+#       define R200_TXA_OUTPUT_REG_MASK		(7 << 16)
+#       define R200_TXA_OUTPUT_REG_NONE		(0 << 16)
+#       define R200_TXA_OUTPUT_REG_R0		(1 << 16)
+#       define R200_TXA_OUTPUT_REG_R1		(2 << 16)
+#       define R200_TXA_OUTPUT_REG_R2		(3 << 16)
+#       define R200_TXA_OUTPUT_REG_R3		(4 << 16)
+#       define R200_TXA_OUTPUT_REG_R4		(5 << 16)
+#       define R200_TXA_OUTPUT_REG_R5		(6 << 16)
+#       define R200_TXA_DOT_ALPHA		(1 << 20)
+#       define R200_TXA_REPL_NORMAL		0
+#       define R200_TXA_REPL_RED		1
+#       define R200_TXA_REPL_GREEN		2
+#       define R200_TXA_REPL_ARG_A_SHIFT	26
+#       define R200_TXA_REPL_ARG_A_MASK		(3 << 26)
+#       define R200_TXA_REPL_ARG_B_SHIFT	28
+#       define R200_TXA_REPL_ARG_B_MASK		(3 << 28)
+#       define R200_TXA_REPL_ARG_C_SHIFT	30
+#       define R200_TXA_REPL_ARG_C_MASK		(3 << 30)
+
+#define R200_SE_VTX_FMT_0			0x2088
+#       define R200_VTX_XY			0 /* always have xy */
+#       define R200_VTX_Z0			(1<<0)
+#       define R200_VTX_W0			(1<<1)
+#       define R200_VTX_WEIGHT_COUNT_SHIFT	(2)
+#       define R200_VTX_PV_MATRIX_SEL		(1<<5)
+#       define R200_VTX_N0			(1<<6)
+#       define R200_VTX_POINT_SIZE		(1<<7)
+#       define R200_VTX_DISCRETE_FOG		(1<<8)
+#       define R200_VTX_SHININESS_0		(1<<9)
+#       define R200_VTX_SHININESS_1		(1<<10)
+#       define   R200_VTX_COLOR_NOT_PRESENT	0
+#       define   R200_VTX_PK_RGBA		1
+#       define   R200_VTX_FP_RGB		2
+#       define   R200_VTX_FP_RGBA		3
+#       define   R200_VTX_COLOR_MASK		3
+#       define R200_VTX_COLOR_0_SHIFT		11
+#       define R200_VTX_COLOR_1_SHIFT		13
+#       define R200_VTX_COLOR_2_SHIFT		15
+#       define R200_VTX_COLOR_3_SHIFT		17
+#       define R200_VTX_COLOR_4_SHIFT		19
+#       define R200_VTX_COLOR_5_SHIFT		21
+#       define R200_VTX_COLOR_6_SHIFT		23
+#       define R200_VTX_COLOR_7_SHIFT		25
+#       define R200_VTX_XY1			(1<<28)
+#       define R200_VTX_Z1			(1<<29)
+#       define R200_VTX_W1			(1<<30)
+#       define R200_VTX_N1			(1<<31)
+#define R200_SE_VTX_FMT_1			0x208c
+#       define R200_VTX_TEX0_COMP_CNT_SHIFT	0
+#       define R200_VTX_TEX1_COMP_CNT_SHIFT	3
+#       define R200_VTX_TEX2_COMP_CNT_SHIFT	6
+#       define R200_VTX_TEX3_COMP_CNT_SHIFT	9
+#       define R200_VTX_TEX4_COMP_CNT_SHIFT	12
+#       define R200_VTX_TEX5_COMP_CNT_SHIFT	15
+
+#define R200_SE_TCL_OUTPUT_VTX_FMT_0		0x2090
+#define R200_SE_TCL_OUTPUT_VTX_FMT_1		0x2094
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL		0x2250
+#       define R200_OUTPUT_XYZW			(1<<0)
+#       define R200_OUTPUT_COLOR_0		(1<<8)
+#       define R200_OUTPUT_COLOR_1		(1<<9)
+#       define R200_OUTPUT_TEX_0		(1<<16)
+#       define R200_OUTPUT_TEX_1		(1<<17)
+#       define R200_OUTPUT_TEX_2		(1<<18)
+#       define R200_OUTPUT_TEX_3		(1<<19)
+#       define R200_OUTPUT_TEX_4		(1<<20)
+#       define R200_OUTPUT_TEX_5		(1<<21)
+#       define R200_OUTPUT_TEX_MASK		(0x3f<<16)
+#       define R200_OUTPUT_DISCRETE_FOG		(1<<24)
+#       define R200_OUTPUT_PT_SIZE		(1<<25)
+#       define R200_FORCE_INORDER_PROC		(1<<31)
+#define R200_PP_CNTL_X				0x2cc4
+#define R200_PP_TXMULTI_CTL_0			0x2c1c
+#define R200_PP_TXMULTI_CTL_1			0x2c3c
+#define R200_PP_TXMULTI_CTL_2			0x2c5c
+#define R200_PP_TXMULTI_CTL_3			0x2c7c
+#define R200_PP_TXMULTI_CTL_4			0x2c9c
+#define R200_PP_TXMULTI_CTL_5			0x2cbc
+#define R200_SE_VTX_STATE_CNTL			0x2180
+#       define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
+
+				/* Registers for CP and Microcode Engine */
+#define RADEON_CP_ME_RAM_ADDR               0x07d4
+#define RADEON_CP_ME_RAM_RADDR              0x07d8
+#define RADEON_CP_ME_RAM_DATAH              0x07dc
+#define RADEON_CP_ME_RAM_DATAL              0x07e0
+
+#define RADEON_CP_RB_BASE                   0x0700
+#define RADEON_CP_RB_CNTL                   0x0704
+#	define RADEON_RB_BUFSZ_SHIFT		0
+#	define RADEON_RB_BUFSZ_MASK		(0x3f << 0)
+#	define RADEON_RB_BLKSZ_SHIFT		8
+#	define RADEON_RB_BLKSZ_MASK		(0x3f << 8)
+#	define RADEON_BUF_SWAP_32BIT		(2 << 16)
+#	define RADEON_MAX_FETCH_SHIFT		18
+#	define RADEON_MAX_FETCH_MASK		(0x3 << 18)
+#	define RADEON_RB_NO_UPDATE		(1 << 27)
+#	define RADEON_RB_RPTR_WR_ENA		(1 << 31)
+#define RADEON_CP_RB_RPTR_ADDR              0x070c
+#define RADEON_CP_RB_RPTR                   0x0710
+#define RADEON_CP_RB_WPTR                   0x0714
+#define RADEON_CP_RB_RPTR_WR                0x071c
+
+#define RADEON_SCRATCH_UMSK		    0x0770
+#define RADEON_SCRATCH_ADDR		    0x0774
+
+#define R600_CP_RB_BASE                     0xc100
+#define R600_CP_RB_CNTL                     0xc104
+#       define R600_RB_BUFSZ(x)             ((x) << 0)
+#       define R600_RB_BLKSZ(x)             ((x) << 8)
+#       define R600_RB_NO_UPDATE            (1 << 27)
+#       define R600_RB_RPTR_WR_ENA          (1 << 31)
+#define R600_CP_RB_RPTR_WR                  0xc108
+#define R600_CP_RB_RPTR_ADDR                0xc10c
+#define R600_CP_RB_RPTR_ADDR_HI             0xc110
+#define R600_CP_RB_WPTR                     0xc114
+#define R600_CP_RB_WPTR_ADDR                0xc118
+#define R600_CP_RB_WPTR_ADDR_HI             0xc11c
+#define R600_CP_RB_RPTR                     0x8700
+#define R600_CP_RB_WPTR_DELAY               0x8704
+
+#define RADEON_CP_IB_BASE                   0x0738
+#define RADEON_CP_IB_BUFSZ                  0x073c
+
+#define RADEON_CP_CSQ_CNTL                  0x0740
+#       define RADEON_CSQ_CNT_PRIMARY_MASK     (0xff << 0)
+#       define RADEON_CSQ_PRIDIS_INDDIS        (0    << 28)
+#       define RADEON_CSQ_PRIPIO_INDDIS        (1    << 28)
+#       define RADEON_CSQ_PRIBM_INDDIS         (2    << 28)
+#       define RADEON_CSQ_PRIPIO_INDBM         (3    << 28)
+#       define RADEON_CSQ_PRIBM_INDBM          (4    << 28)
+#       define RADEON_CSQ_PRIPIO_INDPIO        (15   << 28)
+
+#define R300_CP_RESYNC_ADDR                 0x778
+#define R300_CP_RESYNC_DATA                 0x77c
+
+#define RADEON_CP_CSQ_STAT                  0x07f8
+#       define RADEON_CSQ_RPTR_PRIMARY_MASK    (0xff <<  0)
+#       define RADEON_CSQ_WPTR_PRIMARY_MASK    (0xff <<  8)
+#       define RADEON_CSQ_RPTR_INDIRECT_MASK   (0xff << 16)
+#       define RADEON_CSQ_WPTR_INDIRECT_MASK   (0xff << 24)
+#define RADEON_CP_CSQ2_STAT                  0x07fc
+#define RADEON_CP_CSQ_ADDR                  0x07f0
+#define RADEON_CP_CSQ_DATA                  0x07f4
+#define RADEON_CP_CSQ_APER_PRIMARY          0x1000
+#define RADEON_CP_CSQ_APER_INDIRECT         0x1300
+
+#define RADEON_CP_RB_WPTR_DELAY             0x0718
+#       define RADEON_PRE_WRITE_TIMER_SHIFT    0
+#       define RADEON_PRE_WRITE_LIMIT_SHIFT    23
+#define RADEON_CP_CSQ_MODE		0x0744
+#	define RADEON_INDIRECT2_START_SHIFT	0
+#	define RADEON_INDIRECT2_START_MASK	(0x7f << 0)
+#	define RADEON_INDIRECT1_START_SHIFT	8
+#	define RADEON_INDIRECT1_START_MASK	(0x7f << 8)
+
+#define RADEON_AIC_CNTL                     0x01d0
+#       define RADEON_PCIGART_TRANSLATE_EN     (1 << 0)
+#       define RADEON_DIS_OUT_OF_PCI_GART_ACCESS     (1 << 1)
+#	define RS400_MSI_REARM	                (1 << 3) /* rs400/rs480 */
+#define RADEON_AIC_LO_ADDR                  0x01dc
+#define RADEON_AIC_PT_BASE		0x01d8
+#define RADEON_AIC_HI_ADDR		0x01e0
+
+
+
+				/* Constants */
+/* #define RADEON_LAST_FRAME_REG               RADEON_GUI_SCRATCH_REG0 */
+/* efine RADEON_LAST_CLEAR_REG               RADEON_GUI_SCRATCH_REG2 */
+
+
+
+				/* CP packet types */
+#define RADEON_CP_PACKET0                           0x00000000
+#define RADEON_CP_PACKET1                           0x40000000
+#define RADEON_CP_PACKET2                           0x80000000
+#define RADEON_CP_PACKET3                           0xC0000000
+#       define RADEON_CP_PACKET_MASK                0xC0000000
+#       define RADEON_CP_PACKET_COUNT_MASK          0x3fff0000
+#       define RADEON_CP_PACKET_MAX_DWORDS          (1 << 12)
+#       define RADEON_CP_PACKET0_REG_MASK           0x000007ff
+#       define R300_CP_PACKET0_REG_MASK             0x00001fff
+#       define R600_CP_PACKET0_REG_MASK             0x0000ffff
+#       define RADEON_CP_PACKET1_REG0_MASK          0x000007ff
+#       define RADEON_CP_PACKET1_REG1_MASK          0x003ff800
+
+#define RADEON_CP_PACKET0_ONE_REG_WR                0x00008000
+
+#define RADEON_CP_PACKET3_NOP                       0xC0001000
+#define RADEON_CP_PACKET3_NEXT_CHAR                 0xC0001900
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN              0xC0001D00
+#define RADEON_CP_PACKET3_SET_SCISSORS              0xC0001E00
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM     0xC0002300
+#define RADEON_CP_PACKET3_LOAD_MICROCODE            0xC0002400
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE             0xC0002600
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF              0xC0002800
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD              0xC0002900
+#define RADEON_CP_PACKET3_3D_DRAW_INDX              0xC0002A00
+#define RADEON_CP_PACKET3_LOAD_PALETTE              0xC0002C00
+#define R200_CP_PACKET3_3D_DRAW_IMMD_2              0xc0003500
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR            0xC0002F00
+#define RADEON_CP_PACKET3_CNTL_PAINT                0xC0009100
+#define RADEON_CP_PACKET3_CNTL_BITBLT               0xC0009200
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT            0xC0009300
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT         0xC0009400
+#define RADEON_CP_PACKET3_CNTL_POLYLINE             0xC0009500
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES        0xC0009800
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI          0xC0009A00
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI         0xC0009B00
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT         0xC0009C00
+
+
+#define RADEON_CP_VC_FRMT_XY                        0x00000000
+#define RADEON_CP_VC_FRMT_W0                        0x00000001
+#define RADEON_CP_VC_FRMT_FPCOLOR                   0x00000002
+#define RADEON_CP_VC_FRMT_FPALPHA                   0x00000004
+#define RADEON_CP_VC_FRMT_PKCOLOR                   0x00000008
+#define RADEON_CP_VC_FRMT_FPSPEC                    0x00000010
+#define RADEON_CP_VC_FRMT_FPFOG                     0x00000020
+#define RADEON_CP_VC_FRMT_PKSPEC                    0x00000040
+#define RADEON_CP_VC_FRMT_ST0                       0x00000080
+#define RADEON_CP_VC_FRMT_ST1                       0x00000100
+#define RADEON_CP_VC_FRMT_Q1                        0x00000200
+#define RADEON_CP_VC_FRMT_ST2                       0x00000400
+#define RADEON_CP_VC_FRMT_Q2                        0x00000800
+#define RADEON_CP_VC_FRMT_ST3                       0x00001000
+#define RADEON_CP_VC_FRMT_Q3                        0x00002000
+#define RADEON_CP_VC_FRMT_Q0                        0x00004000
+#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK      0x00038000
+#define RADEON_CP_VC_FRMT_N0                        0x00040000
+#define RADEON_CP_VC_FRMT_XY1                       0x08000000
+#define RADEON_CP_VC_FRMT_Z1                        0x10000000
+#define RADEON_CP_VC_FRMT_W1                        0x20000000
+#define RADEON_CP_VC_FRMT_N1                        0x40000000
+#define RADEON_CP_VC_FRMT_Z                         0x80000000
+
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE            0x00000000
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT           0x00000001
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE            0x00000002
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP      0x00000003
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST        0x00000004
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN         0x00000005
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP       0x00000006
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2      0x00000007
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST       0x00000008
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST  0x0000000a
+#define RADEON_CP_VC_CNTL_PRIM_WALK_IND             0x00000010
+#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST            0x00000020
+#define RADEON_CP_VC_CNTL_PRIM_WALK_RING            0x00000030
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA          0x00000000
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA          0x00000040
+#define RADEON_CP_VC_CNTL_MAOS_ENABLE               0x00000080
+#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE   0x00000000
+#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE       0x00000100
+#define RADEON_CP_VC_CNTL_TCL_DISABLE               0x00000000
+#define RADEON_CP_VC_CNTL_TCL_ENABLE                0x00000200
+#define RADEON_CP_VC_CNTL_NUM_SHIFT                 16
+
+#define RADEON_VS_MATRIX_0_ADDR                   0
+#define RADEON_VS_MATRIX_1_ADDR                   4
+#define RADEON_VS_MATRIX_2_ADDR                   8
+#define RADEON_VS_MATRIX_3_ADDR                  12
+#define RADEON_VS_MATRIX_4_ADDR                  16
+#define RADEON_VS_MATRIX_5_ADDR                  20
+#define RADEON_VS_MATRIX_6_ADDR                  24
+#define RADEON_VS_MATRIX_7_ADDR                  28
+#define RADEON_VS_MATRIX_8_ADDR                  32
+#define RADEON_VS_MATRIX_9_ADDR                  36
+#define RADEON_VS_MATRIX_10_ADDR                 40
+#define RADEON_VS_MATRIX_11_ADDR                 44
+#define RADEON_VS_MATRIX_12_ADDR                 48
+#define RADEON_VS_MATRIX_13_ADDR                 52
+#define RADEON_VS_MATRIX_14_ADDR                 56
+#define RADEON_VS_MATRIX_15_ADDR                 60
+#define RADEON_VS_LIGHT_AMBIENT_ADDR             64
+#define RADEON_VS_LIGHT_DIFFUSE_ADDR             72
+#define RADEON_VS_LIGHT_SPECULAR_ADDR            80
+#define RADEON_VS_LIGHT_DIRPOS_ADDR              88
+#define RADEON_VS_LIGHT_HWVSPOT_ADDR             96
+#define RADEON_VS_LIGHT_ATTENUATION_ADDR        104
+#define RADEON_VS_MATRIX_EYE2CLIP_ADDR          112
+#define RADEON_VS_UCP_ADDR                      116
+#define RADEON_VS_GLOBAL_AMBIENT_ADDR           122
+#define RADEON_VS_FOG_PARAM_ADDR                123
+#define RADEON_VS_EYE_VECTOR_ADDR               124
+
+#define RADEON_SS_LIGHT_DCD_ADDR                  0
+#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR        8
+#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR         16
+#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR     24
+#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR        32
+#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR       48
+#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR    49
+#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR       50
+#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR    51
+#define RADEON_SS_SHININESS                      60
+
+#define RADEON_TV_MASTER_CNTL                    0x0800
+#       define RADEON_TV_ASYNC_RST               (1 <<  0)
+#       define RADEON_CRT_ASYNC_RST              (1 <<  1)
+#       define RADEON_RESTART_PHASE_FIX          (1 <<  3)
+#	define RADEON_TV_FIFO_ASYNC_RST		 (1 <<  4)
+#	define RADEON_VIN_ASYNC_RST		 (1 <<  5)
+#	define RADEON_AUD_ASYNC_RST		 (1 <<  6)
+#	define RADEON_DVS_ASYNC_RST		 (1 <<  7)
+#       define RADEON_CRT_FIFO_CE_EN             (1 <<  9)
+#       define RADEON_TV_FIFO_CE_EN              (1 << 10)
+#       define RADEON_RE_SYNC_NOW_SEL_MASK       (3 << 14)
+#       define RADEON_TVCLK_ALWAYS_ONb           (1 << 30)
+#	define RADEON_TV_ON			 (1 << 31)
+#define RADEON_TV_PRE_DAC_MUX_CNTL               0x0888
+#       define RADEON_Y_RED_EN                   (1 << 0)
+#       define RADEON_C_GRN_EN                   (1 << 1)
+#       define RADEON_CMP_BLU_EN                 (1 << 2)
+#       define RADEON_DAC_DITHER_EN              (1 << 3)
+#       define RADEON_RED_MX_FORCE_DAC_DATA      (6 << 4)
+#       define RADEON_GRN_MX_FORCE_DAC_DATA      (6 << 8)
+#       define RADEON_BLU_MX_FORCE_DAC_DATA      (6 << 12)
+#       define RADEON_TV_FORCE_DAC_DATA_SHIFT    16
+#define RADEON_TV_RGB_CNTL                           0x0804
+#       define RADEON_SWITCH_TO_BLUE		  (1 <<  4)
+#       define RADEON_RGB_DITHER_EN		  (1 <<  5)
+#       define RADEON_RGB_SRC_SEL_MASK		  (3 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC1		  (0 <<  8)
+#       define RADEON_RGB_SRC_SEL_RMX		  (1 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC2		  (2 <<  8)
+#       define RADEON_RGB_CONVERT_BY_PASS	  (1 << 10)
+#       define RADEON_UVRAM_READ_MARGIN_SHIFT	  16
+#       define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT	  20
+#       define RADEON_RGB_ATTEN_SEL(x)            ((x) << 24)
+#       define RADEON_TVOUT_SCALE_EN              (1 << 26)
+#       define RADEON_RGB_ATTEN_VAL(x)            ((x) << 28)
+#define RADEON_TV_SYNC_CNTL                          0x0808
+#       define RADEON_SYNC_OE                     (1 <<  0)
+#       define RADEON_SYNC_OUT                    (1 <<  1)
+#       define RADEON_SYNC_IN                     (1 <<  2)
+#       define RADEON_SYNC_PUB                    (1 <<  3)
+#       define RADEON_SYNC_PD                     (1 <<  4)
+#       define RADEON_TV_SYNC_IO_DRIVE            (1 <<  5)
+#define RADEON_TV_HTOTAL                             0x080c
+#define RADEON_TV_HDISP                              0x0810
+#define RADEON_TV_HSTART                             0x0818
+#define RADEON_TV_HCOUNT                             0x081C
+#define RADEON_TV_VTOTAL                             0x0820
+#define RADEON_TV_VDISP                              0x0824
+#define RADEON_TV_VCOUNT                             0x0828
+#define RADEON_TV_FTOTAL                             0x082c
+#define RADEON_TV_FCOUNT                             0x0830
+#define RADEON_TV_FRESTART                           0x0834
+#define RADEON_TV_HRESTART                           0x0838
+#define RADEON_TV_VRESTART                           0x083c
+#define RADEON_TV_HOST_READ_DATA                     0x0840
+#define RADEON_TV_HOST_WRITE_DATA                    0x0844
+#define RADEON_TV_HOST_RD_WT_CNTL                    0x0848
+#	define RADEON_HOST_FIFO_RD		 (1 << 12)
+#	define RADEON_HOST_FIFO_RD_ACK		 (1 << 13)
+#	define RADEON_HOST_FIFO_WT		 (1 << 14)
+#	define RADEON_HOST_FIFO_WT_ACK		 (1 << 15)
+#define RADEON_TV_VSCALER_CNTL1                      0x084c
+#       define RADEON_UV_INC_MASK                0xffff
+#       define RADEON_UV_INC_SHIFT               0
+#       define RADEON_Y_W_EN			 (1 << 24)
+#       define RADEON_RESTART_FIELD              (1 << 29) /* restart on field 0 */
+#       define RADEON_Y_DEL_W_SIG_SHIFT          26
+#define RADEON_TV_TIMING_CNTL                        0x0850
+#       define RADEON_H_INC_MASK                 0xfff
+#       define RADEON_H_INC_SHIFT                0
+#       define RADEON_REQ_Y_FIRST                (1 << 19)
+#       define RADEON_FORCE_BURST_ALWAYS         (1 << 21)
+#       define RADEON_UV_POST_SCALE_BYPASS       (1 << 23)
+#       define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
+#define RADEON_TV_VSCALER_CNTL2                      0x0854
+#       define RADEON_DITHER_MODE                (1 <<  0)
+#       define RADEON_Y_OUTPUT_DITHER_EN         (1 <<  1)
+#       define RADEON_UV_OUTPUT_DITHER_EN        (1 <<  2)
+#       define RADEON_UV_TO_BUF_DITHER_EN        (1 <<  3)
+#define RADEON_TV_Y_FALL_CNTL                        0x0858
+#       define RADEON_Y_FALL_PING_PONG           (1 << 16)
+#       define RADEON_Y_COEF_EN                  (1 << 17)
+#define RADEON_TV_Y_RISE_CNTL                        0x085c
+#       define RADEON_Y_RISE_PING_PONG           (1 << 16)
+#define RADEON_TV_Y_SAW_TOOTH_CNTL                   0x0860
+#define RADEON_TV_UPSAMP_AND_GAIN_CNTL               0x0864
+#	define RADEON_YUPSAMP_EN		 (1 <<  0)
+#	define RADEON_UVUPSAMP_EN		 (1 <<  2)
+#define RADEON_TV_GAIN_LIMIT_SETTINGS                0x0868
+#       define RADEON_Y_GAIN_LIMIT_SHIFT         0
+#       define RADEON_UV_GAIN_LIMIT_SHIFT        16
+#define RADEON_TV_LINEAR_GAIN_SETTINGS               0x086c
+#       define RADEON_Y_GAIN_SHIFT               0
+#       define RADEON_UV_GAIN_SHIFT              16
+#define RADEON_TV_MODULATOR_CNTL1                    0x0870
+#	define RADEON_YFLT_EN			 (1 <<  2)
+#	define RADEON_UVFLT_EN			 (1 <<  3)
+#       define RADEON_ALT_PHASE_EN               (1 <<  6)
+#       define RADEON_SYNC_TIP_LEVEL             (1 <<  7)
+#       define RADEON_BLANK_LEVEL_SHIFT          8
+#       define RADEON_SET_UP_LEVEL_SHIFT         16
+#	define RADEON_SLEW_RATE_LIMIT		 (1 << 23)
+#       define RADEON_CY_FILT_BLEND_SHIFT        28
+#define RADEON_TV_MODULATOR_CNTL2                    0x0874
+#       define RADEON_TV_U_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_SHIFT    16
+#define RADEON_TV_CRC_CNTL                           0x0890
+#define RADEON_TV_UV_ADR                             0x08ac
+#	define RADEON_MAX_UV_ADR_MASK		 0x000000ff
+#	define RADEON_MAX_UV_ADR_SHIFT		 0
+#	define RADEON_TABLE1_BOT_ADR_MASK	 0x0000ff00
+#	define RADEON_TABLE1_BOT_ADR_SHIFT	 8
+#	define RADEON_TABLE3_TOP_ADR_MASK	 0x00ff0000
+#	define RADEON_TABLE3_TOP_ADR_SHIFT	 16
+#	define RADEON_HCODE_TABLE_SEL_MASK	 0x06000000
+#	define RADEON_HCODE_TABLE_SEL_SHIFT	 25
+#	define RADEON_VCODE_TABLE_SEL_MASK	 0x18000000
+#	define RADEON_VCODE_TABLE_SEL_SHIFT	 27
+#	define RADEON_TV_MAX_FIFO_ADDR		 0x1a7
+#	define RADEON_TV_MAX_FIFO_ADDR_INTERNAL	 0x1ff
+#define RADEON_TV_PLL_FINE_CNTL			     0x0020	/* PLL */
+#define RADEON_TV_PLL_CNTL                           0x0021	/* PLL */
+#       define RADEON_TV_M0LO_MASK               0xff
+#       define RADEON_TV_M0HI_MASK               0x7
+#       define RADEON_TV_M0HI_SHIFT              18
+#       define RADEON_TV_N0LO_MASK               0x1ff
+#       define RADEON_TV_N0LO_SHIFT              8
+#       define RADEON_TV_N0HI_MASK               0x3
+#       define RADEON_TV_N0HI_SHIFT              21
+#       define RADEON_TV_P_MASK                  0xf
+#       define RADEON_TV_P_SHIFT                 24
+#       define RADEON_TV_SLIP_EN                 (1 << 23)
+#       define RADEON_TV_DTO_EN                  (1 << 28)
+#define RADEON_TV_PLL_CNTL1                          0x0022	/* PLL */
+#       define RADEON_TVPLL_RESET                (1 <<  1)
+#       define RADEON_TVPLL_SLEEP                (1 <<  3)
+#       define RADEON_TVPLL_REFCLK_SEL           (1 <<  4)
+#       define RADEON_TVPCP_SHIFT                8
+#       define RADEON_TVPCP_MASK                 (7 << 8)
+#       define RADEON_TVPVG_SHIFT                11
+#       define RADEON_TVPVG_MASK                 (7 << 11)
+#       define RADEON_TVPDC_SHIFT                14
+#       define RADEON_TVPDC_MASK                 (3 << 14)
+#       define RADEON_TVPLL_TEST_DIS             (1 << 31)
+#       define RADEON_TVCLK_SRC_SEL_TVPLL        (1 << 30)
+
+#define RS400_DISP2_REQ_CNTL1			0xe30
+#       define RS400_DISP2_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP2_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP2_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP2_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP2_ALLOW_FID_LEVEL_MASK    0x3ff
+#define RS400_DISP2_REQ_CNTL2			0xe34
+#       define RS400_DISP2_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP2_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP2_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DMIF_MEM_CNTL1			0xe38
+#       define RS400_DISP2_START_ADR_SHIFT      0
+#       define RS400_DISP2_START_ADR_MASK       0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP1_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP1_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DISP1_REQ_CNTL1			0xe3c
+#       define RS400_DISP1_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP1_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP1_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP1_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP1_ALLOW_FID_LEVEL_MASK    0x3ff
+
+#define RADEON_PCIE_INDEX               0x0030
+#define RADEON_PCIE_DATA                0x0034
+#define RADEON_PCIE_TX_GART_CNTL	0x10
+#	define RADEON_PCIE_TX_GART_EN		(1 << 0)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
+#	define RADEON_PCIE_TX_GART_MODE_32_128_CACHE	(0 << 3)
+#	define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE	(1 << 3)
+#	define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
+#	define RADEON_PCIE_TX_GART_INVALIDATE_TLB	(1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE	0x13
+#define RADEON_PCIE_TX_GART_START_LO	0x14
+#define RADEON_PCIE_TX_GART_START_HI	0x15
+#define RADEON_PCIE_TX_GART_END_LO	0x16
+#define RADEON_PCIE_TX_GART_END_HI	0x17
+#define RADEON_PCIE_TX_GART_ERROR	0x18
+
+#define RADEON_SCRATCH_REG0		0x15e0
+#define RADEON_SCRATCH_REG1		0x15e4
+#define RADEON_SCRATCH_REG2		0x15e8
+#define RADEON_SCRATCH_REG3		0x15ec
+#define RADEON_SCRATCH_REG4		0x15f0
+#define RADEON_SCRATCH_REG5		0x15f4
+
+#define RV530_GB_PIPE_SELECT2           0x4124
+
+#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define RADEON_PACKET_TYPE0 0
+#define RADEON_PACKET_TYPE1 1
+#define RADEON_PACKET_TYPE2 2
+#define RADEON_PACKET_TYPE3 3
+
+#define RADEON_PACKET3_NOP 0x10
+
+#define RADEON_VLINE_STAT (1 << 12)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644
index 0000000..84802b2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ *          Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+/*
+ * Rings
+ * Most engines on the GPU are fed via ring buffers.  Ring
+ * buffers are areas of GPU accessible memory that the host
+ * writes commands into and the GPU reads commands out of.
+ * There is a rptr (read pointer) that determines where the
+ * GPU is currently reading, and a wptr (write pointer)
+ * which determines where the host has written.  When the
+ * pointers are equal, the ring is idle.  When the host
+ * writes commands to the ring buffer, it increments the
+ * wptr.  The GPU then starts fetching commands and executes
+ * them until the pointers are equal again.
+ */
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+/**
+ * radeon_ring_supports_scratch_reg - check if the ring supports
+ * writing to scratch registers
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if a specific ring supports writing to scratch registers (all asics).
+ * Returns true if the ring supports writing to scratch regs, false if not.
+ */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+				      struct radeon_ring *ring)
+{
+	switch (ring->idx) {
+	case RADEON_RING_TYPE_GFX_INDEX:
+	case CAYMAN_RING_TYPE_CP1_INDEX:
+	case CAYMAN_RING_TYPE_CP2_INDEX:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * radeon_ring_free_size - update the free size
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the free dw slots in the ring buffer (all asics).
+ */
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
+
+	/* This works because ring_size is a power of 2 */
+	ring->ring_free_dw = rptr + (ring->ring_size / 4);
+	ring->ring_free_dw -= ring->wptr;
+	ring->ring_free_dw &= ring->ptr_mask;
+	if (!ring->ring_free_dw) {
+		/* this is an empty ring */
+		ring->ring_free_dw = ring->ring_size / 4;
+		/*  update lockup info to avoid false positive */
+		radeon_ring_lockup_update(rdev, ring);
+	}
+}
+
+/**
+ * radeon_ring_alloc - allocate space on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+	int r;
+
+	/* make sure we aren't trying to allocate more space than there is on the ring */
+	if (ndw > (ring->ring_size / 4))
+		return -ENOMEM;
+	/* Align requested size with padding so unlock_commit can
+	 * pad safely */
+	radeon_ring_free_size(rdev, ring);
+	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+	while (ndw > (ring->ring_free_dw - 1)) {
+		radeon_ring_free_size(rdev, ring);
+		if (ndw < ring->ring_free_dw) {
+			break;
+		}
+		r = radeon_fence_wait_next(rdev, ring->idx);
+		if (r)
+			return r;
+	}
+	ring->count_dw = ndw;
+	ring->wptr_old = ring->wptr;
+	return 0;
+}
+
+/**
+ * radeon_ring_lock - lock the ring and allocate space on it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Lock the ring and allocate @ndw dwords in the ring buffer
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+	int r;
+
+	mutex_lock(&rdev->ring_lock);
+	r = radeon_ring_alloc(rdev, ring, ndw);
+	if (r) {
+		mutex_unlock(&rdev->ring_lock);
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * radeon_ring_commit - tell the GPU to execute the new
+ * commands on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
+ *
+ * Update the wptr (write pointer) to tell the GPU to
+ * execute new commands on the ring buffer (all asics).
+ */
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+			bool hdp_flush)
+{
+	/* If we are emitting the HDP flush via the ring buffer, we need to
+	 * do it before padding.
+	 */
+	if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
+		rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
+	/* We pad to match fetch size */
+	while (ring->wptr & ring->align_mask) {
+		radeon_ring_write(ring, ring->nop);
+	}
+	mb();
+	/* If we are emitting the HDP flush via MMIO, we need to do it after
+	 * all CPU writes to VRAM finished.
+	 */
+	if (hdp_flush && rdev->asic->mmio_hdp_flush)
+		rdev->asic->mmio_hdp_flush(rdev);
+	radeon_ring_set_wptr(rdev, ring);
+}
+
+/**
+ * radeon_ring_unlock_commit - tell the GPU to execute the new
+ * commands on the ring buffer and unlock it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
+ *
+ * Call radeon_ring_commit() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+			       bool hdp_flush)
+{
+	radeon_ring_commit(rdev, ring, hdp_flush);
+	mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_undo - reset the wptr
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Reset the driver's copy of the wptr (all asics).
+ */
+void radeon_ring_undo(struct radeon_ring *ring)
+{
+	ring->wptr = ring->wptr_old;
+}
+
+/**
+ * radeon_ring_unlock_undo - reset the wptr and unlock the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_undo() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	radeon_ring_undo(ring);
+	mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_lockup_update - update lockup variables
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the last rptr value and timestamp (all asics).
+ */
+void radeon_ring_lockup_update(struct radeon_device *rdev,
+			       struct radeon_ring *ring)
+{
+	atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
+	atomic64_set(&ring->last_activity, jiffies_64);
+}
+
+/**
+ * radeon_ring_test_lockup() - check if ring is lockedup by recording information
+ * @rdev:       radeon device structure
+ * @ring:       radeon_ring structure holding ring information
+ *
+ */
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
+	uint64_t last = atomic64_read(&ring->last_activity);
+	uint64_t elapsed;
+
+	if (rptr != atomic_read(&ring->last_rptr)) {
+		/* ring is still working, no lockup */
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+
+	elapsed = jiffies_to_msecs(jiffies_64 - last);
+	if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
+		dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
+			ring->idx, elapsed);
+		return true;
+	}
+	/* give a chance to the GPU ... */
+	return false;
+}
+
+/**
+ * radeon_ring_backup - Back up the content of a ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the ring we want to back up
+ *
+ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
+ */
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+			    uint32_t **data)
+{
+	unsigned size, ptr, i;
+
+	/* just in case lock the ring */
+	mutex_lock(&rdev->ring_lock);
+	*data = NULL;
+
+	if (ring->ring_obj == NULL) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* it doesn't make sense to save anything if all fences are signaled */
+	if (!radeon_fence_count_emitted(rdev, ring->idx)) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* calculate the number of dw on the ring */
+	if (ring->rptr_save_reg)
+		ptr = RREG32(ring->rptr_save_reg);
+	else if (rdev->wb.enabled)
+		ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+	else {
+		/* no way to read back the next rptr */
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	size = ring->wptr + (ring->ring_size / 4);
+	size -= ptr;
+	size &= ring->ptr_mask;
+	if (size == 0) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* and then save the content of the ring */
+	*data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+	if (!*data) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+	for (i = 0; i < size; ++i) {
+		(*data)[i] = ring->ring[ptr++];
+		ptr &= ring->ptr_mask;
+	}
+
+	mutex_unlock(&rdev->ring_lock);
+	return size;
+}
+
+/**
+ * radeon_ring_restore - append saved commands to the ring again
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring to append commands to
+ * @size: number of dwords we want to write
+ * @data: saved commands
+ *
+ * Allocates space on the ring and restore the previously saved commands.
+ */
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+			unsigned size, uint32_t *data)
+{
+	int i, r;
+
+	if (!size || !data)
+		return 0;
+
+	/* restore the saved ring content */
+	r = radeon_ring_lock(rdev, ring, size);
+	if (r)
+		return r;
+
+	for (i = 0; i < size; ++i) {
+		radeon_ring_write(ring, data[i]);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	kvfree(data);
+	return 0;
+}
+
+/**
+ * radeon_ring_init - init driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ring_size: size of the ring
+ * @rptr_offs: offset of the rptr writeback location in the WB buffer
+ * @nop: nop packet for this ring
+ *
+ * Initialize the driver information for the selected ring (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+		     unsigned rptr_offs, u32 nop)
+{
+	int r;
+
+	ring->ring_size = ring_size;
+	ring->rptr_offs = rptr_offs;
+	ring->nop = nop;
+	/* Allocate ring buffer */
+	if (ring->ring_obj == NULL) {
+		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, 0, NULL,
+				     NULL, &ring->ring_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) ring create failed\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(ring->ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+					&ring->gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(ring->ring_obj);
+			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
+			return r;
+		}
+		r = radeon_bo_kmap(ring->ring_obj,
+				       (void **)&ring->ring);
+		radeon_bo_unreserve(ring->ring_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) ring map failed\n", r);
+			return r;
+		}
+	}
+	ring->ptr_mask = (ring->ring_size / 4) - 1;
+	ring->ring_free_dw = ring->ring_size / 4;
+	if (rdev->wb.enabled) {
+		u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
+		ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
+		ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
+	}
+	if (radeon_debugfs_ring_init(rdev, ring)) {
+		DRM_ERROR("Failed to register debugfs file for rings !\n");
+	}
+	radeon_ring_lockup_update(rdev, ring);
+	return 0;
+}
+
+/**
+ * radeon_ring_fini - tear down the driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Tear down the driver information for the selected ring (all asics).
+ */
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+	struct radeon_bo *ring_obj;
+
+	mutex_lock(&rdev->ring_lock);
+	ring_obj = ring->ring_obj;
+	ring->ready = false;
+	ring->ring = NULL;
+	ring->ring_obj = NULL;
+	mutex_unlock(&rdev->ring_lock);
+
+	if (ring_obj) {
+		r = radeon_bo_reserve(ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(ring_obj);
+			radeon_bo_unpin(ring_obj);
+			radeon_bo_unreserve(ring_obj);
+		}
+		radeon_bo_unref(&ring_obj);
+	}
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ridx = *(int*)node->info_ent->data;
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	uint32_t rptr, wptr, rptr_next;
+	unsigned count, i, j;
+
+	radeon_ring_free_size(rdev, ring);
+	count = (ring->ring_size / 4) - ring->ring_free_dw;
+
+	wptr = radeon_ring_get_wptr(rdev, ring);
+	seq_printf(m, "wptr: 0x%08x [%5d]\n",
+		   wptr, wptr);
+
+	rptr = radeon_ring_get_rptr(rdev, ring);
+	seq_printf(m, "rptr: 0x%08x [%5d]\n",
+		   rptr, rptr);
+
+	if (ring->rptr_save_reg) {
+		rptr_next = RREG32(ring->rptr_save_reg);
+		seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
+			   ring->rptr_save_reg, rptr_next, rptr_next);
+	} else
+		rptr_next = ~0;
+
+	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
+		   ring->wptr, ring->wptr);
+	seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
+		   ring->last_semaphore_signal_addr);
+	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n",
+		   ring->last_semaphore_wait_addr);
+	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+	seq_printf(m, "%u dwords in ring\n", count);
+
+	if (!ring->ring)
+		return 0;
+
+	/* print 8 dw before current rptr as often it's the last executed
+	 * packet that is the root issue
+	 */
+	i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+	for (j = 0; j <= (count + 32); j++) {
+		seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+		if (rptr == i)
+			seq_puts(m, " *");
+		if (rptr_next == i)
+			seq_puts(m, " #");
+		seq_puts(m, "\n");
+		i = (i + 1) & ring->ptr_mask;
+	}
+	return 0;
+}
+
+static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
+static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX;
+static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
+	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
+	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
+	{"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
+	{"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
+	{"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
+	{"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index},
+	{"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
+};
+
+#endif
+
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+	for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
+		struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
+		int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
+		unsigned r;
+
+		if (&rdev->ring[ridx] != ring)
+			continue;
+
+		r = radeon_debugfs_add_files(rdev, info, 1);
+		if (r)
+			return r;
+	}
+#endif
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644
index 0000000..197b157
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+			      struct radeon_sa_manager *sa_manager,
+			      unsigned size, u32 align, u32 domain, u32 flags)
+{
+	int i, r;
+
+	init_waitqueue_head(&sa_manager->wq);
+	sa_manager->bo = NULL;
+	sa_manager->size = size;
+	sa_manager->domain = domain;
+	sa_manager->align = align;
+	sa_manager->hole = &sa_manager->olist;
+	INIT_LIST_HEAD(&sa_manager->olist);
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		INIT_LIST_HEAD(&sa_manager->flist[i]);
+	}
+
+	r = radeon_bo_create(rdev, size, align, true,
+			     domain, flags, NULL, NULL, &sa_manager->bo);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+		return r;
+	}
+
+	return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+			       struct radeon_sa_manager *sa_manager)
+{
+	struct radeon_sa_bo *sa_bo, *tmp;
+
+	if (!list_empty(&sa_manager->olist)) {
+		sa_manager->hole = &sa_manager->olist,
+		radeon_sa_bo_try_free(sa_manager);
+		if (!list_empty(&sa_manager->olist)) {
+			dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+		}
+	}
+	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+		radeon_sa_bo_remove_locked(sa_bo);
+	}
+	radeon_bo_unref(&sa_manager->bo);
+	sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+			       struct radeon_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(rdev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	/* map the buffer */
+	r = radeon_bo_reserve(sa_manager->bo, false);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+		return r;
+	}
+	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(sa_manager->bo);
+		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+		return r;
+	}
+	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+	radeon_bo_unreserve(sa_manager->bo);
+	return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+				 struct radeon_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(rdev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	r = radeon_bo_reserve(sa_manager->bo, false);
+	if (!r) {
+		radeon_bo_kunmap(sa_manager->bo);
+		radeon_bo_unpin(sa_manager->bo);
+		radeon_bo_unreserve(sa_manager->bo);
+	}
+	return r;
+}
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+	struct radeon_sa_manager *sa_manager = sa_bo->manager;
+	if (sa_manager->hole == &sa_bo->olist) {
+		sa_manager->hole = sa_bo->olist.prev;
+	}
+	list_del_init(&sa_bo->olist);
+	list_del_init(&sa_bo->flist);
+	radeon_fence_unref(&sa_bo->fence);
+	kfree(sa_bo);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+	struct radeon_sa_bo *sa_bo, *tmp;
+
+	if (sa_manager->hole->next == &sa_manager->olist)
+		return;
+
+	sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+		if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+			return;
+		}
+		radeon_sa_bo_remove_locked(sa_bo);
+	}
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole != &sa_manager->olist) {
+		return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+	}
+	return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole->next != &sa_manager->olist) {
+		return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+	}
+	return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+				   struct radeon_sa_bo *sa_bo,
+				   unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		soffset += wasted;
+
+		sa_bo->manager = sa_manager;
+		sa_bo->soffset = soffset;
+		sa_bo->eoffset = soffset + size;
+		list_add(&sa_bo->olist, sa_manager->hole);
+		INIT_LIST_HEAD(&sa_bo->flist);
+		sa_manager->hole = &sa_bo->olist;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_sa_event - Check if we can stop waiting
+ *
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Check if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly
+ */
+static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
+			    unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+	int i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!list_empty(&sa_manager->flist[i])) {
+			return true;
+		}
+	}
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		return true;
+	}
+
+	return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+				   struct radeon_fence **fences,
+				   unsigned *tries)
+{
+	struct radeon_sa_bo *best_bo = NULL;
+	unsigned i, soffset, best, tmp;
+
+	/* if hole points to the end of the buffer */
+	if (sa_manager->hole->next == &sa_manager->olist) {
+		/* try again with its beginning */
+		sa_manager->hole = &sa_manager->olist;
+		return true;
+	}
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	/* to handle wrap around we add sa_manager->size */
+	best = sa_manager->size * 2;
+	/* go over all fence list and try to find the closest sa_bo
+	 * of the current last
+	 */
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_sa_bo *sa_bo;
+
+		if (list_empty(&sa_manager->flist[i])) {
+			continue;
+		}
+
+		sa_bo = list_first_entry(&sa_manager->flist[i],
+					 struct radeon_sa_bo, flist);
+
+		if (!radeon_fence_signaled(sa_bo->fence)) {
+			fences[i] = sa_bo->fence;
+			continue;
+		}
+
+		/* limit the number of tries each ring gets */
+		if (tries[i] > 2) {
+			continue;
+		}
+
+		tmp = sa_bo->soffset;
+		if (tmp < soffset) {
+			/* wrap around, pretend it's after */
+			tmp += sa_manager->size;
+		}
+		tmp -= soffset;
+		if (tmp < best) {
+			/* this sa bo is the closest one */
+			best = tmp;
+			best_bo = sa_bo;
+		}
+	}
+
+	if (best_bo) {
+		++tries[best_bo->fence->ring];
+		sa_manager->hole = best_bo->olist.prev;
+
+		/* we knew that this one is signaled,
+		   so it's save to remote it */
+		radeon_sa_bo_remove_locked(best_bo);
+		return true;
+	}
+	return false;
+}
+
+int radeon_sa_bo_new(struct radeon_device *rdev,
+		     struct radeon_sa_manager *sa_manager,
+		     struct radeon_sa_bo **sa_bo,
+		     unsigned size, unsigned align)
+{
+	struct radeon_fence *fences[RADEON_NUM_RINGS];
+	unsigned tries[RADEON_NUM_RINGS];
+	int i, r;
+
+	BUG_ON(align > sa_manager->align);
+	BUG_ON(size > sa_manager->size);
+
+	*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+	if ((*sa_bo) == NULL) {
+		return -ENOMEM;
+	}
+	(*sa_bo)->manager = sa_manager;
+	(*sa_bo)->fence = NULL;
+	INIT_LIST_HEAD(&(*sa_bo)->olist);
+	INIT_LIST_HEAD(&(*sa_bo)->flist);
+
+	spin_lock(&sa_manager->wq.lock);
+	do {
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			fences[i] = NULL;
+			tries[i] = 0;
+		}
+
+		do {
+			radeon_sa_bo_try_free(sa_manager);
+
+			if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+						   size, align)) {
+				spin_unlock(&sa_manager->wq.lock);
+				return 0;
+			}
+
+			/* see if we can skip over some allocations */
+		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_fence_ref(fences[i]);
+
+		spin_unlock(&sa_manager->wq.lock);
+		r = radeon_fence_wait_any(rdev, fences, false);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_fence_unref(&fences[i]);
+		spin_lock(&sa_manager->wq.lock);
+		/* if we have nothing to wait for block */
+		if (r == -ENOENT) {
+			r = wait_event_interruptible_locked(
+				sa_manager->wq, 
+				radeon_sa_event(sa_manager, size, align)
+			);
+		}
+
+	} while (!r);
+
+	spin_unlock(&sa_manager->wq.lock);
+	kfree(*sa_bo);
+	*sa_bo = NULL;
+	return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+		       struct radeon_fence *fence)
+{
+	struct radeon_sa_manager *sa_manager;
+
+	if (sa_bo == NULL || *sa_bo == NULL) {
+		return;
+	}
+
+	sa_manager = (*sa_bo)->manager;
+	spin_lock(&sa_manager->wq.lock);
+	if (fence && !radeon_fence_signaled(fence)) {
+		(*sa_bo)->fence = radeon_fence_ref(fence);
+		list_add_tail(&(*sa_bo)->flist,
+			      &sa_manager->flist[fence->ring]);
+	} else {
+		radeon_sa_bo_remove_locked(*sa_bo);
+	}
+	wake_up_all_locked(&sa_manager->wq);
+	spin_unlock(&sa_manager->wq.lock);
+	*sa_bo = NULL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+				  struct seq_file *m)
+{
+	struct radeon_sa_bo *i;
+
+	spin_lock(&sa_manager->wq.lock);
+	list_for_each_entry(i, &sa_manager->olist, olist) {
+		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
+		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
+		if (&i->olist == sa_manager->hole) {
+			seq_printf(m, ">");
+		} else {
+			seq_printf(m, " ");
+		}
+		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
+			   soffset, eoffset, eoffset - soffset);
+		if (i->fence) {
+			seq_printf(m, " protected by 0x%016llx on ring %d",
+				   i->fence->seq, i->fence->ring);
+		}
+		seq_printf(m, "\n");
+	}
+	spin_unlock(&sa_manager->wq.lock);
+}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644
index 0000000..b0eb28e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+			    struct radeon_semaphore **semaphore)
+{
+	int r;
+
+	*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
+	if (*semaphore == NULL) {
+		return -ENOMEM;
+	}
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+			     &(*semaphore)->sa_bo, 8, 8);
+	if (r) {
+		kfree(*semaphore);
+		*semaphore = NULL;
+		return r;
+	}
+	(*semaphore)->waiters = 0;
+	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+
+	*((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+	return 0;
+}
+
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
+				  struct radeon_semaphore *semaphore)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	trace_radeon_semaphore_signale(ridx, semaphore);
+
+	if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
+		--semaphore->waiters;
+
+		/* for debugging lockup only, used by sysfs debug files */
+		ring->last_semaphore_signal_addr = semaphore->gpu_addr;
+		return true;
+	}
+	return false;
+}
+
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
+				struct radeon_semaphore *semaphore)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	trace_radeon_semaphore_wait(ridx, semaphore);
+
+	if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
+		++semaphore->waiters;
+
+		/* for debugging lockup only, used by sysfs debug files */
+		ring->last_semaphore_wait_addr = semaphore->gpu_addr;
+		return true;
+	}
+	return false;
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+			   struct radeon_semaphore **semaphore,
+			   struct radeon_fence *fence)
+{
+	if (semaphore == NULL || *semaphore == NULL) {
+		return;
+	}
+	if ((*semaphore)->waiters > 0) {
+		dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+			" hardware lockup imminent!\n", *semaphore);
+	}
+	radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+	kfree(*semaphore);
+	*semaphore = NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
new file mode 100644
index 0000000..be5d7a3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/**
+ * radeon_sync_create - zero init sync object
+ *
+ * @sync: sync object to initialize
+ *
+ * Just clear the sync object for now.
+ */
+void radeon_sync_create(struct radeon_sync *sync)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+		sync->semaphores[i] = NULL;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		sync->sync_to[i] = NULL;
+
+	sync->last_vm_update = NULL;
+}
+
+/**
+ * radeon_sync_fence - use the semaphore to sync to a fence
+ *
+ * @sync: sync object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using the semaphore objects
+ */
+void radeon_sync_fence(struct radeon_sync *sync,
+		       struct radeon_fence *fence)
+{
+	struct radeon_fence *other;
+
+	if (!fence)
+		return;
+
+	other = sync->sync_to[fence->ring];
+	sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
+
+	if (fence->is_vm_update) {
+		other = sync->last_vm_update;
+		sync->last_vm_update = radeon_fence_later(fence, other);
+	}
+}
+
+/**
+ * radeon_sync_resv - use the semaphores to sync to a reservation object
+ *
+ * @sync: sync object to add fences from reservation object to
+ * @resv: reservation object with embedded fence
+ * @shared: true if we should only sync to the exclusive fence
+ *
+ * Sync to the fence using the semaphore objects
+ */
+int radeon_sync_resv(struct radeon_device *rdev,
+		     struct radeon_sync *sync,
+		     struct reservation_object *resv,
+		     bool shared)
+{
+	struct reservation_object_list *flist;
+	struct dma_fence *f;
+	struct radeon_fence *fence;
+	unsigned i;
+	int r = 0;
+
+	/* always sync to the exclusive fence */
+	f = reservation_object_get_excl(resv);
+	fence = f ? to_radeon_fence(f) : NULL;
+	if (fence && fence->rdev == rdev)
+		radeon_sync_fence(sync, fence);
+	else if (f)
+		r = dma_fence_wait(f, true);
+
+	flist = reservation_object_get_list(resv);
+	if (shared || !flist || r)
+		return r;
+
+	for (i = 0; i < flist->shared_count; ++i) {
+		f = rcu_dereference_protected(flist->shared[i],
+					      reservation_object_held(resv));
+		fence = to_radeon_fence(f);
+		if (fence && fence->rdev == rdev)
+			radeon_sync_fence(sync, fence);
+		else
+			r = dma_fence_wait(f, true);
+
+		if (r)
+			break;
+	}
+	return r;
+}
+
+/**
+ * radeon_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
+int radeon_sync_rings(struct radeon_device *rdev,
+		      struct radeon_sync *sync,
+		      int ring)
+{
+	unsigned count = 0;
+	int i, r;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_fence *fence = sync->sync_to[i];
+		struct radeon_semaphore *semaphore;
+
+		/* check if we really need to sync */
+		if (!radeon_fence_need_sync(fence, ring))
+			continue;
+
+		/* prevent GPU deadlocks */
+		if (!rdev->ring[i].ready) {
+			dev_err(rdev->dev, "Syncing to a disabled ring!");
+			return -EINVAL;
+		}
+
+		if (count >= RADEON_NUM_SYNCS) {
+			/* not enough room, wait manually */
+			r = radeon_fence_wait(fence, false);
+			if (r)
+				return r;
+			continue;
+		}
+		r = radeon_semaphore_create(rdev, &semaphore);
+		if (r)
+			return r;
+
+		sync->semaphores[count++] = semaphore;
+
+		/* allocate enough space for sync command */
+		r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+		if (r)
+			return r;
+
+		/* emit the signal semaphore */
+		if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+			/* signaling wasn't successful wait manually */
+			radeon_ring_undo(&rdev->ring[i]);
+			r = radeon_fence_wait(fence, false);
+			if (r)
+				return r;
+			continue;
+		}
+
+		/* we assume caller has already allocated space on waiters ring */
+		if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+			/* waiting wasn't successful wait manually */
+			radeon_ring_undo(&rdev->ring[i]);
+			r = radeon_fence_wait(fence, false);
+			if (r)
+				return r;
+			continue;
+		}
+
+		radeon_ring_commit(rdev, &rdev->ring[i], false);
+		radeon_fence_note_sync(fence, ring);
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_sync_free - free the sync object
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @fence: fence to use for the free
+ *
+ * Free the sync object by freeing all semaphores in it.
+ */
+void radeon_sync_free(struct radeon_device *rdev,
+		      struct radeon_sync *sync,
+		      struct radeon_fence *fence)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+		radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
new file mode 100644
index 0000000..0c7f228
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2009 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Michel Dänzer
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA  0
+
+
+/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
+{
+	struct radeon_bo *vram_obj = NULL;
+	struct radeon_bo **gtt_obj = NULL;
+	uint64_t gtt_addr, vram_addr;
+	unsigned n, size;
+	int i, r, ring;
+
+	switch (flag) {
+	case RADEON_TEST_COPY_DMA:
+		ring = radeon_copy_dma_ring_index(rdev);
+		break;
+	case RADEON_TEST_COPY_BLIT:
+		ring = radeon_copy_blit_ring_index(rdev);
+		break;
+	default:
+		DRM_ERROR("Unknown copy method\n");
+		return;
+	}
+
+	size = 1024 * 1024;
+
+	/* Number of tests =
+	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
+	 */
+	n = rdev->mc.gtt_size - rdev->gart_pin_size;
+	n /= size;
+
+	gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
+	if (!gtt_obj) {
+		DRM_ERROR("Failed to allocate %d pointers\n", n);
+		r = 1;
+		goto out_cleanup;
+	}
+
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+			     0, NULL, NULL, &vram_obj);
+	if (r) {
+		DRM_ERROR("Failed to create VRAM object\n");
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(vram_obj, false);
+	if (unlikely(r != 0))
+		goto out_unref;
+	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+	if (r) {
+		DRM_ERROR("Failed to pin VRAM object\n");
+		goto out_unres;
+	}
+	for (i = 0; i < n; i++) {
+		void *gtt_map, *vram_map;
+		void **gtt_start, **gtt_end;
+		void **vram_start, **vram_end;
+		struct radeon_fence *fence = NULL;
+
+		r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
+				     gtt_obj + i);
+		if (r) {
+			DRM_ERROR("Failed to create GTT object %d\n", i);
+			goto out_lclean;
+		}
+
+		r = radeon_bo_reserve(gtt_obj[i], false);
+		if (unlikely(r != 0))
+			goto out_lclean_unref;
+		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+		if (r) {
+			DRM_ERROR("Failed to pin GTT object %d\n", i);
+			goto out_lclean_unres;
+		}
+
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
+		     gtt_start < gtt_end;
+		     gtt_start++)
+			*gtt_start = gtt_start;
+
+		radeon_bo_kunmap(gtt_obj[i]);
+
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
+						size / RADEON_GPU_PAGE_SIZE,
+						vram_obj->tbo.resv);
+		else
+			fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
+						 size / RADEON_GPU_PAGE_SIZE,
+						 vram_obj->tbo.resv);
+		if (IS_ERR(fence)) {
+			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
+			r = PTR_ERR(fence);
+			goto out_lclean_unpin;
+		}
+
+		r = radeon_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		radeon_fence_unref(&fence);
+
+		r = radeon_bo_kmap(vram_obj, &vram_map);
+		if (r) {
+			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		     vram_start = vram_map, vram_end = vram_map + size;
+		     vram_start < vram_end;
+		     gtt_start++, vram_start++) {
+			if (*vram_start != gtt_start) {
+				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
+					  "expected 0x%p (GTT/VRAM offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *vram_start, gtt_start,
+					  (unsigned long long)
+					  (gtt_addr - rdev->mc.gtt_start +
+					   (void*)gtt_start - gtt_map),
+					  (unsigned long long)
+					  (vram_addr - rdev->mc.vram_start +
+					   (void*)gtt_start - gtt_map));
+				radeon_bo_kunmap(vram_obj);
+				goto out_lclean_unpin;
+			}
+			*vram_start = vram_start;
+		}
+
+		radeon_bo_kunmap(vram_obj);
+
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
+						size / RADEON_GPU_PAGE_SIZE,
+						vram_obj->tbo.resv);
+		else
+			fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
+						 size / RADEON_GPU_PAGE_SIZE,
+						 vram_obj->tbo.resv);
+		if (IS_ERR(fence)) {
+			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
+			r = PTR_ERR(fence);
+			goto out_lclean_unpin;
+		}
+
+		r = radeon_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		radeon_fence_unref(&fence);
+
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		     vram_start = vram_map, vram_end = vram_map + size;
+		     gtt_start < gtt_end;
+		     gtt_start++, vram_start++) {
+			if (*gtt_start != vram_start) {
+				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
+					  "expected 0x%p (VRAM/GTT offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *gtt_start, vram_start,
+					  (unsigned long long)
+					  (vram_addr - rdev->mc.vram_start +
+					   (void*)vram_start - vram_map),
+					  (unsigned long long)
+					  (gtt_addr - rdev->mc.gtt_start +
+					   (void*)vram_start - vram_map));
+				radeon_bo_kunmap(gtt_obj[i]);
+				goto out_lclean_unpin;
+			}
+		}
+
+		radeon_bo_kunmap(gtt_obj[i]);
+
+		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
+			 gtt_addr - rdev->mc.gtt_start);
+		continue;
+
+out_lclean_unpin:
+		radeon_bo_unpin(gtt_obj[i]);
+out_lclean_unres:
+		radeon_bo_unreserve(gtt_obj[i]);
+out_lclean_unref:
+		radeon_bo_unref(&gtt_obj[i]);
+out_lclean:
+		for (--i; i >= 0; --i) {
+			radeon_bo_unpin(gtt_obj[i]);
+			radeon_bo_unreserve(gtt_obj[i]);
+			radeon_bo_unref(&gtt_obj[i]);
+		}
+		if (fence && !IS_ERR(fence))
+			radeon_fence_unref(&fence);
+		break;
+	}
+
+	radeon_bo_unpin(vram_obj);
+out_unres:
+	radeon_bo_unreserve(vram_obj);
+out_unref:
+	radeon_bo_unref(&vram_obj);
+out_cleanup:
+	kfree(gtt_obj);
+	if (r) {
+		pr_warn("Error while testing BO move\n");
+	}
+}
+
+void radeon_test_moves(struct radeon_device *rdev)
+{
+	if (rdev->asic->copy.dma)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+	if (rdev->asic->copy.blit)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
+static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
+					     struct radeon_ring *ring,
+					     struct radeon_fence **fence)
+{
+	uint32_t handle = ring->idx ^ 0xdeafbeef;
+	int r;
+
+	if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
+		r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
+		if (r) {
+			DRM_ERROR("Failed to get dummy create msg\n");
+			return r;
+		}
+
+		r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
+		if (r) {
+			DRM_ERROR("Failed to get dummy destroy msg\n");
+			return r;
+		}
+
+	} else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
+		   ring->idx == TN_RING_TYPE_VCE2_INDEX) {
+		r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
+		if (r) {
+			DRM_ERROR("Failed to get dummy create msg\n");
+			return r;
+		}
+
+		r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
+		if (r) {
+			DRM_ERROR("Failed to get dummy destroy msg\n");
+			return r;
+		}
+
+	} else {
+		r = radeon_ring_lock(rdev, ring, 64);
+		if (r) {
+			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
+			return r;
+		}
+		r = radeon_fence_emit(rdev, fence, ring->idx);
+		if (r) {
+			DRM_ERROR("Failed to emit fence\n");
+			radeon_ring_unlock_undo(rdev, ring);
+			return r;
+		}
+		radeon_ring_unlock_commit(rdev, ring, false);
+	}
+	return 0;
+}
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+			   struct radeon_ring *ringA,
+			   struct radeon_ring *ringB)
+{
+	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+	struct radeon_semaphore *semaphore = NULL;
+	int r;
+
+	r = radeon_semaphore_create(rdev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringA, false);
+
+	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
+	if (r)
+		goto out_cleanup;
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringA, false);
+
+	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
+	if (r)
+		goto out_cleanup;
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fence1)) {
+		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB, false);
+
+	r = radeon_fence_wait(fence1, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fence2)) {
+		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB, false);
+
+	r = radeon_fence_wait(fence2, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	radeon_semaphore_free(rdev, &semaphore, NULL);
+
+	if (fence1)
+		radeon_fence_unref(&fence1);
+
+	if (fence2)
+		radeon_fence_unref(&fence2);
+
+	if (r)
+		pr_warn("Error while testing ring sync (%d)\n", r);
+}
+
+static void radeon_test_ring_sync2(struct radeon_device *rdev,
+			    struct radeon_ring *ringA,
+			    struct radeon_ring *ringB,
+			    struct radeon_ring *ringC)
+{
+	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+	struct radeon_semaphore *semaphore = NULL;
+	bool sigA, sigB;
+	int i, r;
+
+	r = radeon_semaphore_create(rdev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringA, false);
+
+	r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
+	if (r)
+		goto out_cleanup;
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB, false);
+	r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
+	if (r)
+		goto out_cleanup;
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fenceA)) {
+		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+	if (radeon_fence_signaled(fenceB)) {
+		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringC, false);
+
+	for (i = 0; i < 30; ++i) {
+		mdelay(100);
+		sigA = radeon_fence_signaled(fenceA);
+		sigB = radeon_fence_signaled(fenceB);
+		if (sigA || sigB)
+			break;
+	}
+
+	if (!sigA && !sigB) {
+		DRM_ERROR("Neither fence A nor B has been signaled\n");
+		goto out_cleanup;
+	} else if (sigA && sigB) {
+		DRM_ERROR("Both fence A and B has been signaled\n");
+		goto out_cleanup;
+	}
+
+	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+	r = radeon_ring_lock(rdev, ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringC, false);
+
+	mdelay(1000);
+
+	r = radeon_fence_wait(fenceA, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence A\n");
+		goto out_cleanup;
+	}
+	r = radeon_fence_wait(fenceB, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence B\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	radeon_semaphore_free(rdev, &semaphore, NULL);
+
+	if (fenceA)
+		radeon_fence_unref(&fenceA);
+
+	if (fenceB)
+		radeon_fence_unref(&fenceB);
+
+	if (r)
+		pr_warn("Error while testing ring sync (%d)\n", r);
+}
+
+static bool radeon_test_sync_possible(struct radeon_ring *ringA,
+				      struct radeon_ring *ringB)
+{
+	if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
+	    ringB->idx == TN_RING_TYPE_VCE1_INDEX)
+		return false;
+
+	return true;
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+	int i, j, k;
+
+	for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_ring *ringA = &rdev->ring[i];
+		if (!ringA->ready)
+			continue;
+
+		for (j = 0; j < i; ++j) {
+			struct radeon_ring *ringB = &rdev->ring[j];
+			if (!ringB->ready)
+				continue;
+
+			if (!radeon_test_sync_possible(ringA, ringB))
+				continue;
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+			radeon_test_ring_sync(rdev, ringA, ringB);
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+			radeon_test_ring_sync(rdev, ringB, ringA);
+
+			for (k = 0; k < j; ++k) {
+				struct radeon_ring *ringC = &rdev->ring[k];
+				if (!ringC->ready)
+					continue;
+
+				if (!radeon_test_sync_possible(ringA, ringC))
+					continue;
+
+				if (!radeon_test_sync_possible(ringB, ringC))
+					continue;
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+				radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+				radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+				radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+				radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+				radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+				radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+			}
+		}
+	}
+}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 0000000..bc26efd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+	    TP_PROTO(struct radeon_bo *bo),
+	    TP_ARGS(bo),
+	    TP_STRUCT__entry(
+			     __field(struct radeon_bo *, bo)
+			     __field(u32, pages)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->bo = bo;
+			   __entry->pages = bo->tbo.num_pages;
+			   ),
+	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+TRACE_EVENT(radeon_cs,
+	    TP_PROTO(struct radeon_cs_parser *p),
+	    TP_ARGS(p),
+	    TP_STRUCT__entry(
+			     __field(u32, ring)
+			     __field(u32, dw)
+			     __field(u32, fences)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->ring = p->ring;
+			   __entry->dw = p->chunk_ib->length_dw;
+			   __entry->fences = radeon_fence_count_emitted(
+				p->rdev, p->ring);
+			   ),
+	    TP_printk("ring=%u, dw=%u, fences=%u",
+		      __entry->ring, __entry->dw,
+		      __entry->fences)
+);
+
+TRACE_EVENT(radeon_vm_grab_id,
+	    TP_PROTO(unsigned vmid, int ring),
+	    TP_ARGS(vmid, ring),
+	    TP_STRUCT__entry(
+			     __field(u32, vmid)
+			     __field(u32, ring)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->vmid = vmid;
+			   __entry->ring = ring;
+			   ),
+	    TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
+);
+
+TRACE_EVENT(radeon_vm_bo_update,
+	    TP_PROTO(struct radeon_bo_va *bo_va),
+	    TP_ARGS(bo_va),
+	    TP_STRUCT__entry(
+			     __field(u64, soffset)
+			     __field(u64, eoffset)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->soffset = bo_va->it.start;
+			   __entry->eoffset = bo_va->it.last + 1;
+			   __entry->flags = bo_va->flags;
+			   ),
+	    TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+		      __entry->soffset, __entry->eoffset, __entry->flags)
+);
+
+TRACE_EVENT(radeon_vm_set_page,
+	    TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
+		     uint32_t incr, uint32_t flags),
+	    TP_ARGS(pe, addr, count, incr, flags),
+	    TP_STRUCT__entry(
+			     __field(u64, pe)
+			     __field(u64, addr)
+			     __field(u32, count)
+			     __field(u32, incr)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pe = pe;
+			   __entry->addr = addr;
+			   __entry->count = count;
+			   __entry->incr = incr;
+			   __entry->flags = flags;
+			   ),
+	    TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+		      __entry->pe, __entry->addr, __entry->incr,
+		      __entry->flags, __entry->count)
+);
+
+TRACE_EVENT(radeon_vm_flush,
+	    TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
+	    TP_ARGS(pd_addr, ring, id),
+	    TP_STRUCT__entry(
+			     __field(u64, pd_addr)
+			     __field(u32, ring)
+			     __field(u32, id)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pd_addr = pd_addr;
+			   __entry->ring = ring;
+			   __entry->id = id;
+			   ),
+	    TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
+		      __entry->pd_addr, __entry->ring, __entry->id)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(int, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->ring = ring;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, ring=%d, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno)
+);
+
+DECLARE_EVENT_CLASS(radeon_semaphore_request,
+
+	    TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+	    TP_ARGS(ring, sem),
+
+	    TP_STRUCT__entry(
+			     __field(int, ring)
+			     __field(signed, waiters)
+			     __field(uint64_t, gpu_addr)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->ring = ring;
+			   __entry->waiters = sem->waiters;
+			   __entry->gpu_addr = sem->gpu_addr;
+			   ),
+
+	    TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
+		      __entry->waiters, __entry->gpu_addr)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
+
+	    TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+	    TP_ARGS(ring, sem)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
+
+	    TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+	    TP_ARGS(ring, sem)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/radeon
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 0000000..66b3d50
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
new file mode 100644
index 0000000..cbb67e9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -0,0 +1,1175 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/swiotlb.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/debugfs.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
+
+static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
+{
+	struct radeon_mman *mman;
+	struct radeon_device *rdev;
+
+	mman = container_of(bdev, struct radeon_mman, bdev);
+	rdev = container_of(mman, struct radeon_device, mman);
+	return rdev;
+}
+
+
+/*
+ * Global memory.
+ */
+static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int radeon_ttm_global_init(struct radeon_device *rdev)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	rdev->mman.mem_global_referenced = false;
+	global_ref = &rdev->mman.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &radeon_ttm_mem_global_init;
+	global_ref->release = &radeon_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	rdev->mman.bo_global_ref.mem_glob =
+		rdev->mman.mem_global_ref.object;
+	global_ref = &rdev->mman.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&rdev->mman.mem_global_ref);
+		return r;
+	}
+
+	rdev->mman.mem_global_referenced = true;
+	return 0;
+}
+
+static void radeon_ttm_global_fini(struct radeon_device *rdev)
+{
+	if (rdev->mman.mem_global_referenced) {
+		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
+		drm_global_item_unref(&rdev->mman.mem_global_ref);
+		rdev->mman.mem_global_referenced = false;
+	}
+}
+
+static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+				struct ttm_mem_type_manager *man)
+{
+	struct radeon_device *rdev;
+
+	rdev = radeon_get_rdev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_TT:
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = rdev->mc.gtt_start;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+#if IS_ENABLED(CONFIG_AGP)
+		if (rdev->flags & RADEON_IS_AGP) {
+			if (!rdev->ddev->agp) {
+				DRM_ERROR("AGP is not enabled for memory type %u\n",
+					  (unsigned)type);
+				return -EINVAL;
+			}
+			if (!rdev->ddev->agp->cant_use_aperture)
+				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+			man->available_caching = TTM_PL_FLAG_UNCACHED |
+						 TTM_PL_FLAG_WC;
+			man->default_caching = TTM_PL_FLAG_WC;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = rdev->mc.vram_start;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	static const struct ttm_place placements = {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+	};
+
+	struct radeon_bo *rbo;
+
+	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+		placement->placement = &placements;
+		placement->busy_placement = &placements;
+		placement->num_placement = 1;
+		placement->num_busy_placement = 1;
+		return;
+	}
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+		else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
+			 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+			unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+			int i;
+
+			/* Try evicting to the CPU inaccessible part of VRAM
+			 * first, but only set GTT as busy placement, so this
+			 * BO will be evicted to GTT rather than causing other
+			 * BOs to be evicted from VRAM
+			 */
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
+							 RADEON_GEM_DOMAIN_GTT);
+			rbo->placement.num_busy_placement = 0;
+			for (i = 0; i < rbo->placement.num_placement; i++) {
+				if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
+					if (rbo->placements[i].fpfn < fpfn)
+						rbo->placements[i].fpfn = fpfn;
+				} else {
+					rbo->placement.busy_placement =
+						&rbo->placements[i];
+					rbo->placement.num_busy_placement = 1;
+				}
+			}
+		} else
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		break;
+	case TTM_PL_TT:
+	default:
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+	}
+	*placement = rbo->placement;
+}
+
+static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+
+	if (radeon_ttm_tt_has_userptr(bo->ttm))
+		return -EPERM;
+	return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+					  filp->private_data);
+}
+
+static void radeon_move_null(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	BUG_ON(old_mem->mm_node != NULL);
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int radeon_move_blit(struct ttm_buffer_object *bo,
+			bool evict, bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem,
+			struct ttm_mem_reg *old_mem)
+{
+	struct radeon_device *rdev;
+	uint64_t old_start, new_start;
+	struct radeon_fence *fence;
+	unsigned num_pages;
+	int r, ridx;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	ridx = radeon_copy_ring_index(rdev);
+	old_start = (u64)old_mem->start << PAGE_SHIFT;
+	new_start = (u64)new_mem->start << PAGE_SHIFT;
+
+	switch (old_mem->mem_type) {
+	case TTM_PL_VRAM:
+		old_start += rdev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		old_start += rdev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	switch (new_mem->mem_type) {
+	case TTM_PL_VRAM:
+		new_start += rdev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		new_start += rdev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	if (!rdev->ring[ridx].ready) {
+		DRM_ERROR("Trying to move memory with ring turned off.\n");
+		return -EINVAL;
+	}
+
+	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+	num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+	fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
+	if (IS_ERR(fence))
+		return PTR_ERR(fence);
+
+	r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
+	radeon_fence_unref(&fence);
+	return r;
+}
+
+static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	struct ttm_place placements;
+	struct ttm_placement placement;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements.fpfn = 0;
+	placements.lpfn = 0;
+	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+	if (unlikely(r)) {
+		return r;
+	}
+
+	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+
+	r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = ttm_bo_move_ttm(bo, &ctx, new_mem);
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	struct ttm_placement placement;
+	struct ttm_place placements;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements.fpfn = 0;
+	placements.lpfn = 0;
+	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+	if (unlikely(r)) {
+		return r;
+	}
+	r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
+			  struct ttm_operation_ctx *ctx,
+			  struct ttm_mem_reg *new_mem)
+{
+	struct radeon_device *rdev;
+	struct radeon_bo *rbo;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int r;
+
+	r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+	if (r)
+		return r;
+
+	/* Can't move a pinned BO */
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	if (WARN_ON_ONCE(rbo->pin_count > 0))
+		return -EINVAL;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		radeon_move_null(bo, new_mem);
+		return 0;
+	}
+	if ((old_mem->mem_type == TTM_PL_TT &&
+	     new_mem->mem_type == TTM_PL_SYSTEM) ||
+	    (old_mem->mem_type == TTM_PL_SYSTEM &&
+	     new_mem->mem_type == TTM_PL_TT)) {
+		/* bind is enough */
+		radeon_move_null(bo, new_mem);
+		return 0;
+	}
+	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
+	    rdev->asic->copy.copy == NULL) {
+		/* use memcpy */
+		goto memcpy;
+	}
+
+	if (old_mem->mem_type == TTM_PL_VRAM &&
+	    new_mem->mem_type == TTM_PL_SYSTEM) {
+		r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
+					ctx->no_wait_gpu, new_mem);
+	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+		   new_mem->mem_type == TTM_PL_VRAM) {
+		r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
+					    ctx->no_wait_gpu, new_mem);
+	} else {
+		r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
+				     new_mem, old_mem);
+	}
+
+	if (r) {
+memcpy:
+		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
+		if (r) {
+			return r;
+		}
+	}
+
+	/* update statistics */
+	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+	return 0;
+}
+
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_TT:
+#if IS_ENABLED(CONFIG_AGP)
+		if (rdev->flags & RADEON_IS_AGP) {
+			/* RADEON_IS_AGP is set only if AGP is active */
+			mem->bus.offset = mem->start << PAGE_SHIFT;
+			mem->bus.base = rdev->mc.agp_base;
+			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		/* check if it's visible */
+		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+			return -EINVAL;
+		mem->bus.base = rdev->mc.aper_base;
+		mem->bus.is_iomem = true;
+#ifdef __alpha__
+		/*
+		 * Alpha: use bus.addr to hold the ioremap() return,
+		 * so we can modify bus.base below.
+		 */
+		if (mem->placement & TTM_PL_FLAG_WC)
+			mem->bus.addr =
+				ioremap_wc(mem->bus.base + mem->bus.offset,
+					   mem->bus.size);
+		else
+			mem->bus.addr =
+				ioremap_nocache(mem->bus.base + mem->bus.offset,
+						mem->bus.size);
+		if (!mem->bus.addr)
+			return -ENOMEM;
+
+		/*
+		 * Alpha: Use just the bus offset plus
+		 * the hose/domain memory base for bus.base.
+		 * It then can be used to build PTEs for VRAM
+		 * access, as done in ttm_bo_vm_fault().
+		 */
+		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+			rdev->ddev->hose->dense_mem_base;
+#endif
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+	struct ttm_dma_tt		ttm;
+	struct radeon_device		*rdev;
+	u64				offset;
+
+	uint64_t			userptr;
+	struct mm_struct		*usermm;
+	uint32_t			userflags;
+};
+
+/* prepare the sg table with the user pages */
+static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+{
+	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+	unsigned pinned = 0, nents;
+	int r;
+
+	int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+	enum dma_data_direction direction = write ?
+		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+	if (current->mm != gtt->usermm)
+		return -EPERM;
+
+	if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
+		/* check that we only pin down anonymous memory
+		   to prevent problems with writeback */
+		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+		struct vm_area_struct *vma;
+		vma = find_vma(gtt->usermm, gtt->userptr);
+		if (!vma || vma->vm_file || vma->vm_end < end)
+			return -EPERM;
+	}
+
+	do {
+		unsigned num_pages = ttm->num_pages - pinned;
+		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
+		struct page **pages = ttm->pages + pinned;
+
+		r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+				   pages, NULL);
+		if (r < 0)
+			goto release_pages;
+
+		pinned += r;
+
+	} while (pinned < ttm->num_pages);
+
+	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
+				      ttm->num_pages << PAGE_SHIFT,
+				      GFP_KERNEL);
+	if (r)
+		goto release_sg;
+
+	r = -ENOMEM;
+	nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+	if (nents != ttm->sg->nents)
+		goto release_sg;
+
+	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+					 gtt->ttm.dma_address, ttm->num_pages);
+
+	return 0;
+
+release_sg:
+	kfree(ttm->sg);
+
+release_pages:
+	release_pages(ttm->pages, pinned);
+	return r;
+}
+
+static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+{
+	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+	struct sg_page_iter sg_iter;
+
+	int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+	enum dma_data_direction direction = write ?
+		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+	/* double check that we don't free the table twice */
+	if (!ttm->sg->sgl)
+		return;
+
+	/* free the sg table and pages again */
+	dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+
+	for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
+			set_page_dirty(page);
+
+		mark_page_accessed(page);
+		put_page(page);
+	}
+
+	sg_free_table(ttm->sg);
+}
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+				   struct ttm_mem_reg *bo_mem)
+{
+	struct radeon_ttm_tt *gtt = (void*)ttm;
+	uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
+		RADEON_GART_PAGE_WRITE;
+	int r;
+
+	if (gtt->userptr) {
+		radeon_ttm_tt_pin_userptr(ttm);
+		flags &= ~RADEON_GART_PAGE_WRITE;
+	}
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+	if (ttm->caching_state == tt_cached)
+		flags |= RADEON_GART_PAGE_SNOOP;
+	r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
+			     ttm->pages, gtt->ttm.dma_address, flags);
+	if (r) {
+		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+			  ttm->num_pages, (unsigned)gtt->offset);
+		return r;
+	}
+	return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+
+	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+
+	if (gtt->userptr)
+		radeon_ttm_tt_unpin_userptr(ttm);
+
+	return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+
+	ttm_dma_tt_fini(&gtt->ttm);
+	kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+	.bind = &radeon_ttm_backend_bind,
+	.unbind = &radeon_ttm_backend_unbind,
+	.destroy = &radeon_ttm_backend_destroy,
+};
+
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
+					   uint32_t page_flags)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt;
+
+	rdev = radeon_get_rdev(bo->bdev);
+#if IS_ENABLED(CONFIG_AGP)
+	if (rdev->flags & RADEON_IS_AGP) {
+		return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
+					 page_flags);
+	}
+#endif
+
+	gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+	if (gtt == NULL) {
+		return NULL;
+	}
+	gtt->ttm.ttm.func = &radeon_backend_func;
+	gtt->rdev = rdev;
+	if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
+		kfree(gtt);
+		return NULL;
+	}
+	return &gtt->ttm.ttm;
+}
+
+static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
+{
+	if (!ttm || ttm->func != &radeon_backend_func)
+		return NULL;
+	return (struct radeon_ttm_tt *)ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
+			struct ttm_operation_ctx *ctx)
+{
+	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+	struct radeon_device *rdev;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (gtt && gtt->userptr) {
+		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+		if (!ttm->sg)
+			return -ENOMEM;
+
+		ttm->page_flags |= TTM_PAGE_FLAG_SG;
+		ttm->state = tt_unbound;
+		return 0;
+	}
+
+	if (slave && ttm->sg) {
+		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+						 gtt->ttm.dma_address, ttm->num_pages);
+		ttm->state = tt_unbound;
+		return 0;
+	}
+
+	rdev = radeon_get_rdev(ttm->bdev);
+#if IS_ENABLED(CONFIG_AGP)
+	if (rdev->flags & RADEON_IS_AGP) {
+		return ttm_agp_tt_populate(ttm, ctx);
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
+		return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
+	}
+#endif
+
+	return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (gtt && gtt->userptr) {
+		kfree(ttm->sg);
+		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+		return;
+	}
+
+	if (slave)
+		return;
+
+	rdev = radeon_get_rdev(ttm->bdev);
+#if IS_ENABLED(CONFIG_AGP)
+	if (rdev->flags & RADEON_IS_AGP) {
+		ttm_agp_tt_unpopulate(ttm);
+		return;
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
+		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+		return;
+	}
+#endif
+
+	ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
+}
+
+int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+			      uint32_t flags)
+{
+	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+
+	if (gtt == NULL)
+		return -EINVAL;
+
+	gtt->userptr = addr;
+	gtt->usermm = current->mm;
+	gtt->userflags = flags;
+	return 0;
+}
+
+bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+
+	if (gtt == NULL)
+		return false;
+
+	return !!gtt->userptr;
+}
+
+bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+
+	if (gtt == NULL)
+		return false;
+
+	return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+}
+
+static struct ttm_bo_driver radeon_bo_driver = {
+	.ttm_tt_create = &radeon_ttm_tt_create,
+	.ttm_tt_populate = &radeon_ttm_tt_populate,
+	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
+	.invalidate_caches = &radeon_invalidate_caches,
+	.init_mem_type = &radeon_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
+	.evict_flags = &radeon_evict_flags,
+	.move = &radeon_bo_move,
+	.verify_access = &radeon_verify_access,
+	.move_notify = &radeon_bo_move_notify,
+	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
+	.io_mem_free = &radeon_ttm_io_mem_free,
+};
+
+int radeon_ttm_init(struct radeon_device *rdev)
+{
+	int r;
+
+	r = radeon_ttm_global_init(rdev);
+	if (r) {
+		return r;
+	}
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&rdev->mman.bdev,
+			       rdev->mman.bo_global_ref.ref.object,
+			       &radeon_bo_driver,
+			       rdev->ddev->anon_inode->i_mapping,
+			       DRM_FILE_PAGE_OFFSET,
+			       rdev->need_dma32);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+	rdev->mman.initialized = true;
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+				rdev->mc.real_vram_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing VRAM heap.\n");
+		return r;
+	}
+	/* Change the size here instead of the init above so only lpfn is affected */
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+			     NULL, &rdev->stolen_vga_memory);
+	if (r) {
+		return r;
+	}
+	r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
+	if (r)
+		return r;
+	r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+	radeon_bo_unreserve(rdev->stolen_vga_memory);
+	if (r) {
+		radeon_bo_unref(&rdev->stolen_vga_memory);
+		return r;
+	}
+	DRM_INFO("radeon: %uM of VRAM memory ready\n",
+		 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+				rdev->mc.gtt_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing GTT heap.\n");
+		return r;
+	}
+	DRM_INFO("radeon: %uM of GTT memory ready.\n",
+		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
+
+	r = radeon_ttm_debugfs_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init debugfs\n");
+		return r;
+	}
+	return 0;
+}
+
+void radeon_ttm_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->mman.initialized)
+		return;
+	radeon_ttm_debugfs_fini(rdev);
+	if (rdev->stolen_vga_memory) {
+		r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
+		if (r == 0) {
+			radeon_bo_unpin(rdev->stolen_vga_memory);
+			radeon_bo_unreserve(rdev->stolen_vga_memory);
+		}
+		radeon_bo_unref(&rdev->stolen_vga_memory);
+	}
+	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+	ttm_bo_device_release(&rdev->mman.bdev);
+	radeon_gart_fini(rdev);
+	radeon_ttm_global_fini(rdev);
+	rdev->mman.initialized = false;
+	DRM_INFO("radeon: ttm finalized\n");
+}
+
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+	struct ttm_mem_type_manager *man;
+
+	if (!rdev->mman.initialized)
+		return;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
+	man->size = size >> PAGE_SHIFT;
+}
+
+static struct vm_operations_struct radeon_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo;
+	struct radeon_device *rdev;
+	vm_fault_t ret;
+
+	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
+	if (bo == NULL) {
+		return VM_FAULT_NOPAGE;
+	}
+	rdev = radeon_get_rdev(bo->bdev);
+	down_read(&rdev->pm.mclk_lock);
+	ret = ttm_vm_ops->fault(vmf);
+	up_read(&rdev->pm.mclk_lock);
+	return ret;
+}
+
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct radeon_device *rdev;
+	int r;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+		return -EINVAL;
+	}
+
+	file_priv = filp->private_data;
+	rdev = file_priv->minor->dev->dev_private;
+	if (rdev == NULL) {
+		return -EINVAL;
+	}
+	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		radeon_ttm_vm_ops = *ttm_vm_ops;
+		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+	}
+	vma->vm_ops = &radeon_ttm_vm_ops;
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_mm_dump_table(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	unsigned ttm_pl = *(int*)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
+	struct drm_printer p = drm_seq_file_printer(m);
+
+	man->func->debug(man, &p);
+	return 0;
+}
+
+
+static int ttm_pl_vram = TTM_PL_VRAM;
+static int ttm_pl_tt = TTM_PL_TT;
+
+static struct drm_info_list radeon_ttm_debugfs_list[] = {
+	{"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
+	{"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
+	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
+#ifdef CONFIG_SWIOTLB
+	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
+#endif
+};
+
+static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
+{
+	struct radeon_device *rdev = inode->i_private;
+	i_size_write(inode, rdev->mc.mc_vram_size);
+	filep->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
+				    size_t size, loff_t *pos)
+{
+	struct radeon_device *rdev = f->private_data;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		unsigned long flags;
+		uint32_t value;
+
+		if (*pos >= rdev->mc.mc_vram_size)
+			return result;
+
+		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+		WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
+		if (rdev->family >= CHIP_CEDAR)
+			WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
+		value = RREG32(RADEON_MM_DATA);
+		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static const struct file_operations radeon_ttm_vram_fops = {
+	.owner = THIS_MODULE,
+	.open = radeon_ttm_vram_open,
+	.read = radeon_ttm_vram_read,
+	.llseek = default_llseek
+};
+
+static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
+{
+	struct radeon_device *rdev = inode->i_private;
+	i_size_write(inode, rdev->mc.gtt_size);
+	filep->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
+				   size_t size, loff_t *pos)
+{
+	struct radeon_device *rdev = f->private_data;
+	ssize_t result = 0;
+	int r;
+
+	while (size) {
+		loff_t p = *pos / PAGE_SIZE;
+		unsigned off = *pos & ~PAGE_MASK;
+		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
+		struct page *page;
+		void *ptr;
+
+		if (p >= rdev->gart.num_cpu_pages)
+			return result;
+
+		page = rdev->gart.pages[p];
+		if (page) {
+			ptr = kmap(page);
+			ptr += off;
+
+			r = copy_to_user(buf, ptr, cur_size);
+			kunmap(rdev->gart.pages[p]);
+		} else
+			r = clear_user(buf, cur_size);
+
+		if (r)
+			return -EFAULT;
+
+		result += cur_size;
+		buf += cur_size;
+		*pos += cur_size;
+		size -= cur_size;
+	}
+
+	return result;
+}
+
+static const struct file_operations radeon_ttm_gtt_fops = {
+	.owner = THIS_MODULE,
+	.open = radeon_ttm_gtt_open,
+	.read = radeon_ttm_gtt_read,
+	.llseek = default_llseek
+};
+
+#endif
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned count;
+
+	struct drm_minor *minor = rdev->ddev->primary;
+	struct dentry *ent, *root = minor->debugfs_root;
+
+	ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
+				  rdev, &radeon_ttm_vram_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+	rdev->mman.vram = ent;
+
+	ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
+				  rdev, &radeon_ttm_gtt_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+	rdev->mman.gtt = ent;
+
+	count = ARRAY_SIZE(radeon_ttm_debugfs_list);
+
+#ifdef CONFIG_SWIOTLB
+	if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
+		--count;
+#endif
+
+	return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
+#else
+
+	return 0;
+#endif
+}
+
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+	debugfs_remove(rdev->mman.vram);
+	rdev->mman.vram = NULL;
+
+	debugfs_remove(rdev->mman.gtt);
+	rdev->mman.gtt = NULL;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.c b/drivers/gpu/drm/radeon/radeon_ucode.c
new file mode 100644
index 0000000..6beec68
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_ucode.h"
+
+static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr)
+{
+	DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
+	DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
+	DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
+	DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
+	DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
+	DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
+	DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
+	DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
+	DRM_DEBUG("ucode_array_offset_bytes: %u\n",
+		  le32_to_cpu(hdr->ucode_array_offset_bytes));
+	DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
+}
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("MC\n");
+	radeon_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct mc_firmware_header_v1_0 *mc_hdr =
+			container_of(hdr, struct mc_firmware_header_v1_0, header);
+
+		DRM_DEBUG("io_debug_size_bytes: %u\n",
+			  le32_to_cpu(mc_hdr->io_debug_size_bytes));
+		DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
+			  le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
+	} else {
+		DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("SMC\n");
+	radeon_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct smc_firmware_header_v1_0 *smc_hdr =
+			container_of(hdr, struct smc_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
+	} else {
+		DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("GFX\n");
+	radeon_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct gfx_firmware_header_v1_0 *gfx_hdr =
+			container_of(hdr, struct gfx_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_feature_version: %u\n",
+			  le32_to_cpu(gfx_hdr->ucode_feature_version));
+		DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
+		DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
+	} else {
+		DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("RLC\n");
+	radeon_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct rlc_firmware_header_v1_0 *rlc_hdr =
+			container_of(hdr, struct rlc_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_feature_version: %u\n",
+			  le32_to_cpu(rlc_hdr->ucode_feature_version));
+		DRM_DEBUG("save_and_restore_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->save_and_restore_offset));
+		DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+		DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+			  le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+		DRM_DEBUG("master_pkt_description_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->master_pkt_description_offset));
+	} else {
+		DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("SDMA\n");
+	radeon_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct sdma_firmware_header_v1_0 *sdma_hdr =
+			container_of(hdr, struct sdma_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_feature_version: %u\n",
+			  le32_to_cpu(sdma_hdr->ucode_feature_version));
+		DRM_DEBUG("ucode_change_version: %u\n",
+			  le32_to_cpu(sdma_hdr->ucode_change_version));
+		DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
+		DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
+	} else {
+		DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
+			  version_major, version_minor);
+	}
+}
+
+int radeon_ucode_validate(const struct firmware *fw)
+{
+	const struct common_firmware_header *hdr =
+		(const struct common_firmware_header *)fw->data;
+
+	if (fw->size == le32_to_cpu(hdr->size_bytes))
+		return 0;
+
+	return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
new file mode 100644
index 0000000..dc4576e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RADEON_UCODE_H__
+#define __RADEON_UCODE_H__
+
+/* CP */
+#define R600_PFP_UCODE_SIZE          576
+#define R600_PM4_UCODE_SIZE          1792
+#define R700_PFP_UCODE_SIZE          848
+#define R700_PM4_UCODE_SIZE          1360
+#define EVERGREEN_PFP_UCODE_SIZE     1120
+#define EVERGREEN_PM4_UCODE_SIZE     1376
+#define CAYMAN_PFP_UCODE_SIZE        2176
+#define CAYMAN_PM4_UCODE_SIZE        2176
+#define SI_PFP_UCODE_SIZE            2144
+#define SI_PM4_UCODE_SIZE            2144
+#define SI_CE_UCODE_SIZE             2144
+#define CIK_PFP_UCODE_SIZE           2144
+#define CIK_ME_UCODE_SIZE            2144
+#define CIK_CE_UCODE_SIZE            2144
+
+/* MEC */
+#define CIK_MEC_UCODE_SIZE           4192
+
+/* RLC */
+#define R600_RLC_UCODE_SIZE          768
+#define R700_RLC_UCODE_SIZE          1024
+#define EVERGREEN_RLC_UCODE_SIZE     768
+#define CAYMAN_RLC_UCODE_SIZE        1024
+#define ARUBA_RLC_UCODE_SIZE         1536
+#define SI_RLC_UCODE_SIZE            2048
+#define BONAIRE_RLC_UCODE_SIZE       2048
+#define KB_RLC_UCODE_SIZE            2560
+#define KV_RLC_UCODE_SIZE            2560
+#define ML_RLC_UCODE_SIZE            2560
+
+/* MC */
+#define BTC_MC_UCODE_SIZE            6024
+#define CAYMAN_MC_UCODE_SIZE         6037
+#define SI_MC_UCODE_SIZE             7769
+#define TAHITI_MC_UCODE_SIZE         7808
+#define PITCAIRN_MC_UCODE_SIZE       7775
+#define VERDE_MC_UCODE_SIZE          7875
+#define OLAND_MC_UCODE_SIZE          7863
+#define BONAIRE_MC_UCODE_SIZE        7866
+#define BONAIRE_MC2_UCODE_SIZE       7948
+#define HAWAII_MC_UCODE_SIZE         7933
+#define HAWAII_MC2_UCODE_SIZE        8091
+
+/* SDMA */
+#define CIK_SDMA_UCODE_SIZE          1050
+#define CIK_SDMA_UCODE_VERSION       64
+
+/* SMC */
+#define RV770_SMC_UCODE_START        0x0100
+#define RV770_SMC_UCODE_SIZE         0x410d
+#define RV770_SMC_INT_VECTOR_START   0xffc0
+#define RV770_SMC_INT_VECTOR_SIZE    0x0040
+
+#define RV730_SMC_UCODE_START        0x0100
+#define RV730_SMC_UCODE_SIZE         0x412c
+#define RV730_SMC_INT_VECTOR_START   0xffc0
+#define RV730_SMC_INT_VECTOR_SIZE    0x0040
+
+#define RV710_SMC_UCODE_START        0x0100
+#define RV710_SMC_UCODE_SIZE         0x3f1f
+#define RV710_SMC_INT_VECTOR_START   0xffc0
+#define RV710_SMC_INT_VECTOR_SIZE    0x0040
+
+#define RV740_SMC_UCODE_START        0x0100
+#define RV740_SMC_UCODE_SIZE         0x41c5
+#define RV740_SMC_INT_VECTOR_START   0xffc0
+#define RV740_SMC_INT_VECTOR_SIZE    0x0040
+
+#define CEDAR_SMC_UCODE_START        0x0100
+#define CEDAR_SMC_UCODE_SIZE         0x5d50
+#define CEDAR_SMC_INT_VECTOR_START   0xffc0
+#define CEDAR_SMC_INT_VECTOR_SIZE    0x0040
+
+#define REDWOOD_SMC_UCODE_START      0x0100
+#define REDWOOD_SMC_UCODE_SIZE       0x5f0a
+#define REDWOOD_SMC_INT_VECTOR_START 0xffc0
+#define REDWOOD_SMC_INT_VECTOR_SIZE  0x0040
+
+#define JUNIPER_SMC_UCODE_START      0x0100
+#define JUNIPER_SMC_UCODE_SIZE       0x5f1f
+#define JUNIPER_SMC_INT_VECTOR_START 0xffc0
+#define JUNIPER_SMC_INT_VECTOR_SIZE  0x0040
+
+#define CYPRESS_SMC_UCODE_START      0x0100
+#define CYPRESS_SMC_UCODE_SIZE       0x61f7
+#define CYPRESS_SMC_INT_VECTOR_START 0xffc0
+#define CYPRESS_SMC_INT_VECTOR_SIZE  0x0040
+
+#define BARTS_SMC_UCODE_START        0x0100
+#define BARTS_SMC_UCODE_SIZE         0x6107
+#define BARTS_SMC_INT_VECTOR_START   0xffc0
+#define BARTS_SMC_INT_VECTOR_SIZE    0x0040
+
+#define TURKS_SMC_UCODE_START        0x0100
+#define TURKS_SMC_UCODE_SIZE         0x605b
+#define TURKS_SMC_INT_VECTOR_START   0xffc0
+#define TURKS_SMC_INT_VECTOR_SIZE    0x0040
+
+#define CAICOS_SMC_UCODE_START       0x0100
+#define CAICOS_SMC_UCODE_SIZE        0x5fbd
+#define CAICOS_SMC_INT_VECTOR_START  0xffc0
+#define CAICOS_SMC_INT_VECTOR_SIZE   0x0040
+
+#define CAYMAN_SMC_UCODE_START       0x0100
+#define CAYMAN_SMC_UCODE_SIZE        0x79ec
+#define CAYMAN_SMC_INT_VECTOR_START  0xffc0
+#define CAYMAN_SMC_INT_VECTOR_SIZE   0x0040
+
+#define TAHITI_SMC_UCODE_START       0x10000
+#define TAHITI_SMC_UCODE_SIZE        0xf458
+
+#define PITCAIRN_SMC_UCODE_START     0x10000
+#define PITCAIRN_SMC_UCODE_SIZE      0xe9f4
+
+#define VERDE_SMC_UCODE_START        0x10000
+#define VERDE_SMC_UCODE_SIZE         0xebe4
+
+#define OLAND_SMC_UCODE_START        0x10000
+#define OLAND_SMC_UCODE_SIZE         0xe7b4
+
+#define HAINAN_SMC_UCODE_START       0x10000
+#define HAINAN_SMC_UCODE_SIZE        0xe67C
+
+#define BONAIRE_SMC_UCODE_START      0x20000
+#define BONAIRE_SMC_UCODE_SIZE       0x1FDEC
+
+#define HAWAII_SMC_UCODE_START       0x20000
+#define HAWAII_SMC_UCODE_SIZE        0x1FDEC
+
+struct common_firmware_header {
+	uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
+	uint32_t header_size_bytes; /* size of just the header in bytes */
+	uint16_t header_version_major; /* header version */
+	uint16_t header_version_minor; /* header version */
+	uint16_t ip_version_major; /* IP version */
+	uint16_t ip_version_minor; /* IP version */
+	uint32_t ucode_version;
+	uint32_t ucode_size_bytes; /* size of ucode in bytes */
+	uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
+	uint32_t crc32;  /* crc32 checksum of the payload */
+};
+
+/* version_major=1, version_minor=0 */
+struct mc_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t io_debug_size_bytes; /* size of debug array in dwords */
+	uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
+};
+
+/* version_major=1, version_minor=0 */
+struct smc_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_start_addr;
+};
+
+/* version_major=1, version_minor=0 */
+struct gfx_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_feature_version;
+	uint32_t jt_offset; /* jt location */
+	uint32_t jt_size;  /* size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct rlc_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_feature_version;
+	uint32_t save_and_restore_offset;
+	uint32_t clear_state_descriptor_offset;
+	uint32_t avail_scratch_ram_locations;
+	uint32_t master_pkt_description_offset;
+};
+
+/* version_major=1, version_minor=0 */
+struct sdma_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_feature_version;
+	uint32_t ucode_change_version;
+	uint32_t jt_offset; /* jt location */
+	uint32_t jt_size; /* size of jt */
+};
+
+/* header is fixed size */
+union radeon_firmware_header {
+	struct common_firmware_header common;
+	struct mc_firmware_header_v1_0 mc;
+	struct smc_firmware_header_v1_0 smc;
+	struct gfx_firmware_header_v1_0 gfx;
+	struct rlc_firmware_header_v1_0 rlc;
+	struct sdma_firmware_header_v1_0 sdma;
+	uint8_t raw[0x100];
+};
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+int radeon_ucode_validate(const struct firmware *fw);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644
index 0000000..95f4db7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "radeon_ucode.h"
+#include "r600d.h"
+
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS	1000
+
+/* Firmware Names */
+#define FIRMWARE_R600		"radeon/R600_uvd.bin"
+#define FIRMWARE_RS780		"radeon/RS780_uvd.bin"
+#define FIRMWARE_RV770		"radeon/RV770_uvd.bin"
+#define FIRMWARE_RV710		"radeon/RV710_uvd.bin"
+#define FIRMWARE_CYPRESS	"radeon/CYPRESS_uvd.bin"
+#define FIRMWARE_SUMO		"radeon/SUMO_uvd.bin"
+#define FIRMWARE_TAHITI		"radeon/TAHITI_uvd.bin"
+#define FIRMWARE_BONAIRE_LEGACY	"radeon/BONAIRE_uvd.bin"
+#define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
+
+MODULE_FIRMWARE(FIRMWARE_R600);
+MODULE_FIRMWARE(FIRMWARE_RS780);
+MODULE_FIRMWARE(FIRMWARE_RV770);
+MODULE_FIRMWARE(FIRMWARE_RV710);
+MODULE_FIRMWARE(FIRMWARE_CYPRESS);
+MODULE_FIRMWARE(FIRMWARE_SUMO);
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+MODULE_FIRMWARE(FIRMWARE_BONAIRE_LEGACY);
+MODULE_FIRMWARE(FIRMWARE_BONAIRE);
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work);
+
+int radeon_uvd_init(struct radeon_device *rdev)
+{
+	unsigned long bo_size;
+	const char *fw_name = NULL, *legacy_fw_name = NULL;
+	int i, r;
+
+	INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
+
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV670:
+	case CHIP_RV620:
+	case CHIP_RV635:
+		legacy_fw_name = FIRMWARE_R600;
+		break;
+
+	case CHIP_RS780:
+	case CHIP_RS880:
+		legacy_fw_name = FIRMWARE_RS780;
+		break;
+
+	case CHIP_RV770:
+		legacy_fw_name = FIRMWARE_RV770;
+		break;
+
+	case CHIP_RV710:
+	case CHIP_RV730:
+	case CHIP_RV740:
+		legacy_fw_name = FIRMWARE_RV710;
+		break;
+
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+	case CHIP_JUNIPER:
+	case CHIP_REDWOOD:
+	case CHIP_CEDAR:
+		legacy_fw_name = FIRMWARE_CYPRESS;
+		break;
+
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_PALM:
+	case CHIP_CAYMAN:
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+		legacy_fw_name = FIRMWARE_SUMO;
+		break;
+
+	case CHIP_TAHITI:
+	case CHIP_VERDE:
+	case CHIP_PITCAIRN:
+	case CHIP_ARUBA:
+	case CHIP_OLAND:
+		legacy_fw_name = FIRMWARE_TAHITI;
+		break;
+
+	case CHIP_BONAIRE:
+	case CHIP_KABINI:
+	case CHIP_KAVERI:
+	case CHIP_HAWAII:
+	case CHIP_MULLINS:
+		legacy_fw_name = FIRMWARE_BONAIRE_LEGACY;
+		fw_name = FIRMWARE_BONAIRE;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	rdev->uvd.fw_header_present = false;
+	rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
+	if (fw_name) {
+		/* Let's try to load the newer firmware first */
+		r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
+		if (r) {
+			dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+				fw_name);
+		} else {
+			struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data;
+			unsigned version_major, version_minor, family_id;
+
+			r = radeon_ucode_validate(rdev->uvd_fw);
+			if (r)
+				return r;
+
+			rdev->uvd.fw_header_present = true;
+
+			family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+			version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+			version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+			DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+				 version_major, version_minor, family_id);
+
+			/*
+			 * Limit the number of UVD handles depending on
+			 * microcode major and minor versions.
+			 */
+			if ((version_major >= 0x01) && (version_minor >= 0x37))
+				rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
+		}
+	}
+
+	/*
+	 * In case there is only legacy firmware, or we encounter an error
+	 * while loading the new firmware, we fall back to loading the legacy
+	 * firmware now.
+	 */
+	if (!fw_name || r) {
+		r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev);
+		if (r) {
+			dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+				legacy_fw_name);
+			return r;
+		}
+	}
+
+	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
+		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
+		  RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
+	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+			     NULL, &rdev->uvd.vcpu_bo);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
+		return r;
+	}
+
+	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+	if (r) {
+		radeon_bo_unref(&rdev->uvd.vcpu_bo);
+		dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
+		return r;
+	}
+
+	r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+			  &rdev->uvd.gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+		radeon_bo_unref(&rdev->uvd.vcpu_bo);
+		dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+		return r;
+	}
+
+	r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+	if (r) {
+		dev_err(rdev->dev, "(%d) UVD map failed\n", r);
+		return r;
+	}
+
+	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
+		atomic_set(&rdev->uvd.handles[i], 0);
+		rdev->uvd.filp[i] = NULL;
+		rdev->uvd.img_size[i] = 0;
+	}
+
+	return 0;
+}
+
+void radeon_uvd_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return;
+
+	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+	if (!r) {
+		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+		radeon_bo_unpin(rdev->uvd.vcpu_bo);
+		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+	}
+
+	radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
+
+	release_firmware(rdev->uvd_fw);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+	int i, r;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return 0;
+
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
+		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+		if (handle != 0) {
+			struct radeon_fence *fence;
+
+			radeon_uvd_note_usage(rdev);
+
+			r = radeon_uvd_get_destroy_msg(rdev,
+				R600_RING_TYPE_UVD_INDEX, handle, &fence);
+			if (r) {
+				DRM_ERROR("Error destroying UVD (%d)!\n", r);
+				continue;
+			}
+
+			radeon_fence_wait(fence, false);
+			radeon_fence_unref(&fence);
+
+			rdev->uvd.filp[i] = NULL;
+			atomic_set(&rdev->uvd.handles[i], 0);
+		}
+	}
+
+	return 0;
+}
+
+int radeon_uvd_resume(struct radeon_device *rdev)
+{
+	unsigned size;
+	void *ptr;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return -EINVAL;
+
+	memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+	size = radeon_bo_size(rdev->uvd.vcpu_bo);
+	size -= rdev->uvd_fw->size;
+
+	ptr = rdev->uvd.cpu_addr;
+	ptr += rdev->uvd_fw->size;
+
+	memset(ptr, 0, size);
+
+	return 0;
+}
+
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
+				       uint32_t allowed_domains)
+{
+	int i;
+
+	for (i = 0; i < rbo->placement.num_placement; ++i) {
+		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
+		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+	}
+
+	/* If it must be in VRAM it must be in the first segment as well */
+	if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
+		return;
+
+	/* abort if we already have more than one placement */
+	if (rbo->placement.num_placement > 1)
+		return;
+
+	/* add another 256MB segment */
+	rbo->placements[1] = rbo->placements[0];
+	rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
+	rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
+	rbo->placement.num_placement++;
+	rbo->placement.num_busy_placement++;
+}
+
+void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+	int i, r;
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
+		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+		if (handle != 0 && rdev->uvd.filp[i] == filp) {
+			struct radeon_fence *fence;
+
+			radeon_uvd_note_usage(rdev);
+
+			r = radeon_uvd_get_destroy_msg(rdev,
+				R600_RING_TYPE_UVD_INDEX, handle, &fence);
+			if (r) {
+				DRM_ERROR("Error destroying UVD (%d)!\n", r);
+				continue;
+			}
+
+			radeon_fence_wait(fence, false);
+			radeon_fence_unref(&fence);
+
+			rdev->uvd.filp[i] = NULL;
+			atomic_set(&rdev->uvd.handles[i], 0);
+		}
+	}
+}
+
+static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+{
+	unsigned stream_type = msg[4];
+	unsigned width = msg[6];
+	unsigned height = msg[7];
+	unsigned dpb_size = msg[9];
+	unsigned pitch = msg[28];
+
+	unsigned width_in_mb = width / 16;
+	unsigned height_in_mb = ALIGN(height / 16, 2);
+
+	unsigned image_size, tmp, min_dpb_size;
+
+	image_size = width * height;
+	image_size += image_size / 2;
+	image_size = ALIGN(image_size, 1024);
+
+	switch (stream_type) {
+	case 0: /* H264 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 17;
+
+		/* macroblock context buffer */
+		min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
+
+		/* IT surface buffer */
+		min_dpb_size += width_in_mb * height_in_mb * 32;
+		break;
+
+	case 1: /* VC1 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+
+		/* CONTEXT_BUFFER */
+		min_dpb_size += width_in_mb * height_in_mb * 128;
+
+		/* IT surface buffer */
+		min_dpb_size += width_in_mb * 64;
+
+		/* DB surface buffer */
+		min_dpb_size += width_in_mb * 128;
+
+		/* BP */
+		tmp = max(width_in_mb, height_in_mb);
+		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
+		break;
+
+	case 3: /* MPEG2 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+		break;
+
+	case 4: /* MPEG4 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+
+		/* CM */
+		min_dpb_size += width_in_mb * height_in_mb * 64;
+
+		/* IT surface buffer */
+		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
+		break;
+
+	default:
+		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
+		return -EINVAL;
+	}
+
+	if (width > pitch) {
+		DRM_ERROR("Invalid UVD decoding target pitch!\n");
+		return -EINVAL;
+	}
+
+	if (dpb_size < min_dpb_size) {
+		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
+			  dpb_size, min_dpb_size);
+		return -EINVAL;
+	}
+
+	buf_sizes[0x1] = dpb_size;
+	buf_sizes[0x2] = image_size;
+	return 0;
+}
+
+static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
+				     unsigned stream_type)
+{
+	switch (stream_type) {
+	case 0: /* H264 */
+	case 1: /* VC1 */
+		/* always supported */
+		return 0;
+
+	case 3: /* MPEG2 */
+	case 4: /* MPEG4 */
+		/* only since UVD 3 */
+		if (p->rdev->family >= CHIP_PALM)
+			return 0;
+
+		/* fall through */
+	default:
+		DRM_ERROR("UVD codec not supported by hardware %d!\n",
+			  stream_type);
+		return -EINVAL;
+	}
+}
+
+static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+			     unsigned offset, unsigned buf_sizes[])
+{
+	int32_t *msg, msg_type, handle;
+	unsigned img_size = 0;
+	struct dma_fence *f;
+	void *ptr;
+
+	int i, r;
+
+	if (offset & 0x3F) {
+		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+		return -EINVAL;
+	}
+
+	f = reservation_object_get_excl(bo->tbo.resv);
+	if (f) {
+		r = radeon_fence_wait((struct radeon_fence *)f, false);
+		if (r) {
+			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+			return r;
+		}
+	}
+
+	r = radeon_bo_kmap(bo, &ptr);
+	if (r) {
+		DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+		return r;
+	}
+
+	msg = ptr + offset;
+
+	msg_type = msg[1];
+	handle = msg[2];
+
+	if (handle == 0) {
+		DRM_ERROR("Invalid UVD handle!\n");
+		return -EINVAL;
+	}
+
+	switch (msg_type) {
+	case 0:
+		/* it's a create msg, calc image size (width * height) */
+		img_size = msg[7] * msg[8];
+
+		r = radeon_uvd_validate_codec(p, msg[4]);
+		radeon_bo_kunmap(bo);
+		if (r)
+			return r;
+
+		/* try to alloc a new handle */
+		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
+			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+				DRM_ERROR("Handle 0x%x already in use!\n", handle);
+				return -EINVAL;
+			}
+
+			if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+				p->rdev->uvd.filp[i] = p->filp;
+				p->rdev->uvd.img_size[i] = img_size;
+				return 0;
+			}
+		}
+
+		DRM_ERROR("No more free UVD handles!\n");
+		return -EINVAL;
+
+	case 1:
+		/* it's a decode msg, validate codec and calc buffer sizes */
+		r = radeon_uvd_validate_codec(p, msg[4]);
+		if (!r)
+			r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+		radeon_bo_kunmap(bo);
+		if (r)
+			return r;
+
+		/* validate the handle */
+		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
+			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+				if (p->rdev->uvd.filp[i] != p->filp) {
+					DRM_ERROR("UVD handle collision detected!\n");
+					return -EINVAL;
+				}
+				return 0;
+			}
+		}
+
+		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+		return -ENOENT;
+
+	case 2:
+		/* it's a destroy msg, free the handle */
+		for (i = 0; i < p->rdev->uvd.max_handles; ++i)
+			atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+		radeon_bo_kunmap(bo);
+		return 0;
+
+	default:
+
+		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+		return -EINVAL;
+	}
+
+	BUG();
+	return -EINVAL;
+}
+
+static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+			       int data0, int data1,
+			       unsigned buf_sizes[], bool *has_msg_cmd)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_bo_list *reloc;
+	unsigned idx, cmd, offset;
+	uint64_t start, end;
+	int r;
+
+	relocs_chunk = p->chunk_relocs;
+	offset = radeon_get_ib_value(p, data0);
+	idx = radeon_get_ib_value(p, data1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		return -EINVAL;
+	}
+
+	reloc = &p->relocs[(idx / 4)];
+	start = reloc->gpu_offset;
+	end = start + radeon_bo_size(reloc->robj);
+	start += offset;
+
+	p->ib.ptr[data0] = start & 0xFFFFFFFF;
+	p->ib.ptr[data1] = start >> 32;
+
+	cmd = radeon_get_ib_value(p, p->idx) >> 1;
+
+	if (cmd < 0x4) {
+		if (end <= start) {
+			DRM_ERROR("invalid reloc offset %X!\n", offset);
+			return -EINVAL;
+		}
+		if ((end - start) < buf_sizes[cmd]) {
+			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+				  (unsigned)(end - start), buf_sizes[cmd]);
+			return -EINVAL;
+		}
+
+	} else if (cmd != 0x100) {
+		DRM_ERROR("invalid UVD command %X!\n", cmd);
+		return -EINVAL;
+	}
+
+	if ((start >> 28) != ((end - 1) >> 28)) {
+		DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+			  start, end);
+		return -EINVAL;
+	}
+
+	/* TODO: is this still necessary on NI+ ? */
+	if ((cmd == 0 || cmd == 0x3) &&
+	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
+		DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+			  start, end);
+		return -EINVAL;
+	}
+
+	if (cmd == 0) {
+		if (*has_msg_cmd) {
+			DRM_ERROR("More than one message in a UVD-IB!\n");
+			return -EINVAL;
+		}
+		*has_msg_cmd = true;
+		r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
+		if (r)
+			return r;
+	} else if (!*has_msg_cmd) {
+		DRM_ERROR("Message needed before other commands are send!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int *data0, int *data1,
+			     unsigned buf_sizes[],
+			     bool *has_msg_cmd)
+{
+	int i, r;
+
+	p->idx++;
+	for (i = 0; i <= pkt->count; ++i) {
+		switch (pkt->reg + i*4) {
+		case UVD_GPCOM_VCPU_DATA0:
+			*data0 = p->idx;
+			break;
+		case UVD_GPCOM_VCPU_DATA1:
+			*data1 = p->idx;
+			break;
+		case UVD_GPCOM_VCPU_CMD:
+			r = radeon_uvd_cs_reloc(p, *data0, *data1,
+						buf_sizes, has_msg_cmd);
+			if (r)
+				return r;
+			break;
+		case UVD_ENGINE_CNTL:
+		case UVD_NO_OP:
+			break;
+		default:
+			DRM_ERROR("Invalid reg 0x%X!\n",
+				  pkt->reg + i*4);
+			return -EINVAL;
+		}
+		p->idx++;
+	}
+	return 0;
+}
+
+int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	int r, data0 = 0, data1 = 0;
+
+	/* does the IB has a msg command */
+	bool has_msg_cmd = false;
+
+	/* minimum buffer sizes */
+	unsigned buf_sizes[] = {
+		[0x00000000]	=	2048,
+		[0x00000001]	=	32 * 1024 * 1024,
+		[0x00000002]	=	2048 * 1152 * 3,
+		[0x00000003]	=	2048,
+	};
+
+	if (p->chunk_ib->length_dw % 16) {
+		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
+			  p->chunk_ib->length_dw);
+		return -EINVAL;
+	}
+
+	if (p->chunk_relocs == NULL) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+
+
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r)
+			return r;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
+					      buf_sizes, &has_msg_cmd);
+			if (r)
+				return r;
+			break;
+		case RADEON_PACKET_TYPE2:
+			p->idx += pkt.count + 2;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunk_ib->length_dw);
+
+	if (!has_msg_cmd) {
+		DRM_ERROR("UVD-IBs need a msg command!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int radeon_uvd_send_msg(struct radeon_device *rdev,
+			       int ring, uint64_t addr,
+			       struct radeon_fence **fence)
+{
+	struct radeon_ib ib;
+	int i, r;
+
+	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
+	if (r)
+		return r;
+
+	ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
+	ib.ptr[1] = addr;
+	ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
+	ib.ptr[3] = addr >> 32;
+	ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
+	ib.ptr[5] = 0;
+	for (i = 6; i < 16; i += 2) {
+		ib.ptr[i] = PACKET0(UVD_NO_OP, 0);
+		ib.ptr[i+1] = 0;
+	}
+	ib.length_dw = 16;
+
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+
+	if (fence)
+		*fence = radeon_fence_ref(ib.fence);
+
+	radeon_ib_free(rdev, &ib);
+	return r;
+}
+
+/*
+ * multiple fence commands without any stream commands in between can
+ * crash the vcpu so just try to emmit a dummy create/destroy msg to
+ * avoid this
+ */
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+			      uint32_t handle, struct radeon_fence **fence)
+{
+	/* we use the last page of the vcpu bo for the UVD message */
+	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
+		RADEON_GPU_PAGE_SIZE;
+
+	uint32_t *msg = rdev->uvd.cpu_addr + offs;
+	uint64_t addr = rdev->uvd.gpu_addr + offs;
+
+	int r, i;
+
+	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
+	if (r)
+		return r;
+
+	/* stitch together an UVD create msg */
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000000);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	msg[4] = cpu_to_le32(0x00000000);
+	msg[5] = cpu_to_le32(0x00000000);
+	msg[6] = cpu_to_le32(0x00000000);
+	msg[7] = cpu_to_le32(0x00000780);
+	msg[8] = cpu_to_le32(0x00000440);
+	msg[9] = cpu_to_le32(0x00000000);
+	msg[10] = cpu_to_le32(0x01b37000);
+	for (i = 11; i < 1024; ++i)
+		msg[i] = cpu_to_le32(0x0);
+
+	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
+	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+	return r;
+}
+
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+			       uint32_t handle, struct radeon_fence **fence)
+{
+	/* we use the last page of the vcpu bo for the UVD message */
+	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
+		RADEON_GPU_PAGE_SIZE;
+
+	uint32_t *msg = rdev->uvd.cpu_addr + offs;
+	uint64_t addr = rdev->uvd.gpu_addr + offs;
+
+	int r, i;
+
+	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
+	if (r)
+		return r;
+
+	/* stitch together an UVD destroy msg */
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000002);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	for (i = 4; i < 1024; ++i)
+		msg[i] = cpu_to_le32(0x0);
+
+	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
+	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+	return r;
+}
+
+/**
+ * radeon_uvd_count_handles - count number of open streams
+ *
+ * @rdev: radeon_device pointer
+ * @sd: number of SD streams
+ * @hd: number of HD streams
+ *
+ * Count the number of open SD/HD streams as a hint for power mangement
+ */
+static void radeon_uvd_count_handles(struct radeon_device *rdev,
+				     unsigned *sd, unsigned *hd)
+{
+	unsigned i;
+
+	*sd = 0;
+	*hd = 0;
+
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
+		if (!atomic_read(&rdev->uvd.handles[i]))
+			continue;
+
+		if (rdev->uvd.img_size[i] >= 720*576)
+			++(*hd);
+		else
+			++(*sd);
+	}
+}
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work)
+{
+	struct radeon_device *rdev =
+		container_of(work, struct radeon_device, uvd.idle_work.work);
+
+	if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
+						 &rdev->pm.dpm.hd);
+			radeon_dpm_enable_uvd(rdev, false);
+		} else {
+			radeon_set_uvd_clocks(rdev, 0, 0);
+		}
+	} else {
+		schedule_delayed_work(&rdev->uvd.idle_work,
+				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+	}
+}
+
+void radeon_uvd_note_usage(struct radeon_device *rdev)
+{
+	bool streams_changed = false;
+	bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
+	set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
+					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+		unsigned hd = 0, sd = 0;
+		radeon_uvd_count_handles(rdev, &sd, &hd);
+		if ((rdev->pm.dpm.sd != sd) ||
+		    (rdev->pm.dpm.hd != hd)) {
+			rdev->pm.dpm.sd = sd;
+			rdev->pm.dpm.hd = hd;
+			/* disable this for now */
+			/*streams_changed = true;*/
+		}
+	}
+
+	if (set_clocks || streams_changed) {
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			radeon_dpm_enable_uvd(rdev, true);
+		} else {
+			radeon_set_uvd_clocks(rdev, 53300, 40000);
+		}
+	}
+}
+
+static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
+					      unsigned target_freq,
+					      unsigned pd_min,
+					      unsigned pd_even)
+{
+	unsigned post_div = vco_freq / target_freq;
+
+	/* adjust to post divider minimum value */
+	if (post_div < pd_min)
+		post_div = pd_min;
+
+	/* we alway need a frequency less than or equal the target */
+	if ((vco_freq / post_div) > target_freq)
+		post_div += 1;
+
+	/* post dividers above a certain value must be even */
+	if (post_div > pd_even && post_div % 2)
+		post_div += 1;
+
+	return post_div;
+}
+
+/**
+ * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
+ *
+ * @rdev: radeon_device pointer
+ * @vclk: wanted VCLK
+ * @dclk: wanted DCLK
+ * @vco_min: minimum VCO frequency
+ * @vco_max: maximum VCO frequency
+ * @fb_factor: factor to multiply vco freq with
+ * @fb_mask: limit and bitmask for feedback divider
+ * @pd_min: post divider minimum
+ * @pd_max: post divider maximum
+ * @pd_even: post divider must be even above this value
+ * @optimal_fb_div: resulting feedback divider
+ * @optimal_vclk_div: resulting vclk post divider
+ * @optimal_dclk_div: resulting dclk post divider
+ *
+ * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
+ * Returns zero on success -EINVAL on error.
+ */
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+				  unsigned vclk, unsigned dclk,
+				  unsigned vco_min, unsigned vco_max,
+				  unsigned fb_factor, unsigned fb_mask,
+				  unsigned pd_min, unsigned pd_max,
+				  unsigned pd_even,
+				  unsigned *optimal_fb_div,
+				  unsigned *optimal_vclk_div,
+				  unsigned *optimal_dclk_div)
+{
+	unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
+
+	/* start off with something large */
+	unsigned optimal_score = ~0;
+
+	/* loop through vco from low to high */
+	vco_min = max(max(vco_min, vclk), dclk);
+	for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
+
+		uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
+		unsigned vclk_div, dclk_div, score;
+
+		do_div(fb_div, ref_freq);
+
+		/* fb div out of range ? */
+		if (fb_div > fb_mask)
+			break; /* it can oly get worse */
+
+		fb_div &= fb_mask;
+
+		/* calc vclk divider with current vco freq */
+		vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
+							 pd_min, pd_even);
+		if (vclk_div > pd_max)
+			break; /* vco is too big, it has to stop */
+
+		/* calc dclk divider with current vco freq */
+		dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+							 pd_min, pd_even);
+		if (dclk_div > pd_max)
+			break; /* vco is too big, it has to stop */
+
+		/* calc score with current vco freq */
+		score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
+
+		/* determine if this vco setting is better than current optimal settings */
+		if (score < optimal_score) {
+			*optimal_fb_div = fb_div;
+			*optimal_vclk_div = vclk_div;
+			*optimal_dclk_div = dclk_div;
+			optimal_score = score;
+			if (optimal_score == 0)
+				break; /* it can't get better than this */
+		}
+	}
+
+	/* did we found a valid setup ? */
+	if (optimal_score == ~0)
+		return -EINVAL;
+
+	return 0;
+}
+
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+				unsigned cg_upll_func_cntl)
+{
+	unsigned i;
+
+	/* make sure UPLL_CTLREQ is deasserted */
+	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+	mdelay(10);
+
+	/* assert UPLL_CTLREQ */
+	WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+
+	/* wait for CTLACK and CTLACK2 to get asserted */
+	for (i = 0; i < 100; ++i) {
+		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+		if ((RREG32(cg_upll_func_cntl) & mask) == mask)
+			break;
+		mdelay(10);
+	}
+
+	/* deassert UPLL_CTLREQ */
+	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+	if (i == 100) {
+		DRM_ERROR("Timeout setting UVD clocks!\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
new file mode 100644
index 0000000..c1c619f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+
+/* 1 second timeout */
+#define VCE_IDLE_TIMEOUT_MS	1000
+
+/* Firmware Names */
+#define FIRMWARE_TAHITI	"radeon/TAHITI_vce.bin"
+#define FIRMWARE_BONAIRE	"radeon/BONAIRE_vce.bin"
+
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+MODULE_FIRMWARE(FIRMWARE_BONAIRE);
+
+static void radeon_vce_idle_work_handler(struct work_struct *work);
+
+/**
+ * radeon_vce_init - allocate memory, load vce firmware
+ *
+ * @rdev: radeon_device pointer
+ *
+ * First step to get VCE online, allocate memory and load the firmware
+ */
+int radeon_vce_init(struct radeon_device *rdev)
+{
+	static const char *fw_version = "[ATI LIB=VCEFW,";
+	static const char *fb_version = "[ATI LIB=VCEFWSTATS,";
+	unsigned long size;
+	const char *fw_name, *c;
+	uint8_t start, mid, end;
+	int i, r;
+
+	INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler);
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+	case CHIP_ARUBA:
+		fw_name = FIRMWARE_TAHITI;
+		break;
+
+	case CHIP_BONAIRE:
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	case CHIP_HAWAII:
+	case CHIP_MULLINS:
+		fw_name = FIRMWARE_BONAIRE;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
+	if (r) {
+		dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
+			fw_name);
+		return r;
+	}
+
+	/* search for firmware version */
+
+	size = rdev->vce_fw->size - strlen(fw_version) - 9;
+	c = rdev->vce_fw->data;
+	for (;size > 0; --size, ++c)
+		if (strncmp(c, fw_version, strlen(fw_version)) == 0)
+			break;
+
+	if (size == 0)
+		return -EINVAL;
+
+	c += strlen(fw_version);
+	if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
+		return -EINVAL;
+
+	/* search for feedback version */
+
+	size = rdev->vce_fw->size - strlen(fb_version) - 3;
+	c = rdev->vce_fw->data;
+	for (;size > 0; --size, ++c)
+		if (strncmp(c, fb_version, strlen(fb_version)) == 0)
+			break;
+
+	if (size == 0)
+		return -EINVAL;
+
+	c += strlen(fb_version);
+	if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
+		return -EINVAL;
+
+	DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
+		 start, mid, end, rdev->vce.fb_version);
+
+	rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
+
+	/* we can only work with this fw version for now */
+	if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) &&
+	    (rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) &&
+	    (rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8))))
+		return -EINVAL;
+
+	/* allocate firmware, stack and heap BO */
+
+	if (rdev->family < CHIP_BONAIRE)
+		size = vce_v1_0_bo_size(rdev);
+	else
+		size = vce_v2_0_bo_size(rdev);
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL,
+			     &rdev->vce.vcpu_bo);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
+		return r;
+	}
+
+	r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
+	if (r) {
+		radeon_bo_unref(&rdev->vce.vcpu_bo);
+		dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
+		return r;
+	}
+
+	r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+			  &rdev->vce.gpu_addr);
+	radeon_bo_unreserve(rdev->vce.vcpu_bo);
+	if (r) {
+		radeon_bo_unref(&rdev->vce.vcpu_bo);
+		dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
+		return r;
+	}
+
+	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+		atomic_set(&rdev->vce.handles[i], 0);
+		rdev->vce.filp[i] = NULL;
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vce_fini - free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Last step on VCE teardown, free firmware memory
+ */
+void radeon_vce_fini(struct radeon_device *rdev)
+{
+	if (rdev->vce.vcpu_bo == NULL)
+		return;
+
+	radeon_bo_unref(&rdev->vce.vcpu_bo);
+
+	release_firmware(rdev->vce_fw);
+}
+
+/**
+ * radeon_vce_suspend - unpin VCE fw memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ */
+int radeon_vce_suspend(struct radeon_device *rdev)
+{
+	int i;
+
+	if (rdev->vce.vcpu_bo == NULL)
+		return 0;
+
+	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
+		if (atomic_read(&rdev->vce.handles[i]))
+			break;
+
+	if (i == RADEON_MAX_VCE_HANDLES)
+		return 0;
+
+	/* TODO: suspending running encoding sessions isn't supported */
+	return -EINVAL;
+}
+
+/**
+ * radeon_vce_resume - pin VCE fw memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ */
+int radeon_vce_resume(struct radeon_device *rdev)
+{
+	void *cpu_addr;
+	int r;
+
+	if (rdev->vce.vcpu_bo == NULL)
+		return -EINVAL;
+
+	r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
+		return r;
+	}
+
+	r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->vce.vcpu_bo);
+		dev_err(rdev->dev, "(%d) VCE map failed\n", r);
+		return r;
+	}
+
+	memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo));
+	if (rdev->family < CHIP_BONAIRE)
+		r = vce_v1_0_load_fw(rdev, cpu_addr);
+	else
+		memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
+
+	radeon_bo_kunmap(rdev->vce.vcpu_bo);
+
+	radeon_bo_unreserve(rdev->vce.vcpu_bo);
+
+	return r;
+}
+
+/**
+ * radeon_vce_idle_work_handler - power off VCE
+ *
+ * @work: pointer to work structure
+ *
+ * power of VCE when it's not used any more
+ */
+static void radeon_vce_idle_work_handler(struct work_struct *work)
+{
+	struct radeon_device *rdev =
+		container_of(work, struct radeon_device, vce.idle_work.work);
+
+	if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) &&
+	    (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) {
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			radeon_dpm_enable_vce(rdev, false);
+		} else {
+			radeon_set_vce_clocks(rdev, 0, 0);
+		}
+	} else {
+		schedule_delayed_work(&rdev->vce.idle_work,
+				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+	}
+}
+
+/**
+ * radeon_vce_note_usage - power up VCE
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Make sure VCE is powerd up when we want to use it
+ */
+void radeon_vce_note_usage(struct radeon_device *rdev)
+{
+	bool streams_changed = false;
+	bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
+	set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
+					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+		/* XXX figure out if the streams changed */
+		streams_changed = false;
+	}
+
+	if (set_clocks || streams_changed) {
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			radeon_dpm_enable_vce(rdev, true);
+		} else {
+			radeon_set_vce_clocks(rdev, 53300, 40000);
+		}
+	}
+}
+
+/**
+ * radeon_vce_free_handles - free still open VCE handles
+ *
+ * @rdev: radeon_device pointer
+ * @filp: drm file pointer
+ *
+ * Close all VCE handles still open by this file pointer
+ */
+void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+	int i, r;
+	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+		uint32_t handle = atomic_read(&rdev->vce.handles[i]);
+		if (!handle || rdev->vce.filp[i] != filp)
+			continue;
+
+		radeon_vce_note_usage(rdev);
+
+		r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX,
+					       handle, NULL);
+		if (r)
+			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
+
+		rdev->vce.filp[i] = NULL;
+		atomic_set(&rdev->vce.handles[i], 0);
+	}
+}
+
+/**
+ * radeon_vce_get_create_msg - generate a VCE create msg
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: VCE session handle to use
+ * @fence: optional fence to return
+ *
+ * Open up a stream for HW test
+ */
+int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
+			      uint32_t handle, struct radeon_fence **fence)
+{
+	const unsigned ib_size_dw = 1024;
+	struct radeon_ib ib;
+	uint64_t dummy;
+	int i, r;
+
+	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	dummy = ib.gpu_addr + 1024;
+
+	/* stitch together an VCE create msg */
+	ib.length_dw = 0;
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
+
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
+
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
+	ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
+
+	for (i = ib.length_dw; i < ib_size_dw; ++i)
+		ib.ptr[i] = cpu_to_le32(0x0);
+
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+	}
+
+	if (fence)
+		*fence = radeon_fence_ref(ib.fence);
+
+	radeon_ib_free(rdev, &ib);
+
+	return r;
+}
+
+/**
+ * radeon_vce_get_destroy_msg - generate a VCE destroy msg
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: VCE session handle to use
+ * @fence: optional fence to return
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
+			       uint32_t handle, struct radeon_fence **fence)
+{
+	const unsigned ib_size_dw = 1024;
+	struct radeon_ib ib;
+	uint64_t dummy;
+	int i, r;
+
+	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	dummy = ib.gpu_addr + 1024;
+
+	/* stitch together an VCE destroy msg */
+	ib.length_dw = 0;
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
+
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
+	ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
+
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */
+	ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */
+
+	for (i = ib.length_dw; i < ib_size_dw; ++i)
+		ib.ptr[i] = cpu_to_le32(0x0);
+
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+	}
+
+	if (fence)
+		*fence = radeon_fence_ref(ib.fence);
+
+	radeon_ib_free(rdev, &ib);
+
+	return r;
+}
+
+/**
+ * radeon_vce_cs_reloc - command submission relocation
+ *
+ * @p: parser context
+ * @lo: address of lower dword
+ * @hi: address of higher dword
+ * @size: size of checker for relocation buffer
+ *
+ * Patch relocation inside command stream with real buffer address
+ */
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+			unsigned size)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_bo_list *reloc;
+	uint64_t start, end, offset;
+	unsigned idx;
+
+	relocs_chunk = p->chunk_relocs;
+	offset = radeon_get_ib_value(p, lo);
+	idx = radeon_get_ib_value(p, hi);
+
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		return -EINVAL;
+	}
+
+	reloc = &p->relocs[(idx / 4)];
+	start = reloc->gpu_offset;
+	end = start + radeon_bo_size(reloc->robj);
+	start += offset;
+
+	p->ib.ptr[lo] = start & 0xFFFFFFFF;
+	p->ib.ptr[hi] = start >> 32;
+
+	if (end <= start) {
+		DRM_ERROR("invalid reloc offset %llX!\n", offset);
+		return -EINVAL;
+	}
+	if ((end - start) < size) {
+		DRM_ERROR("buffer to small (%d / %d)!\n",
+			(unsigned)(end - start), size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ * @allocated: allocated a new handle?
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
+				      uint32_t handle, bool *allocated)
+{
+	unsigned i;
+
+	*allocated = false;
+
+	/* validate the handle */
+	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+		if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
+			if (p->rdev->vce.filp[i] != p->filp) {
+				DRM_ERROR("VCE handle collision detected!\n");
+				return -EINVAL;
+			}
+			return i;
+		}
+	}
+
+	/* handle not found try to alloc a new one */
+	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+		if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+			p->rdev->vce.filp[i] = p->filp;
+			p->rdev->vce.img_size[i] = 0;
+			*allocated = true;
+			return i;
+		}
+	}
+
+	DRM_ERROR("No more free VCE handles!\n");
+	return -EINVAL;
+}
+
+/**
+ * radeon_vce_cs_parse - parse and validate the command stream
+ *
+ * @p: parser context
+ *
+ */
+int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+{
+	int session_idx = -1;
+	bool destroyed = false, created = false, allocated = false;
+	uint32_t tmp, handle = 0;
+	uint32_t *size = &tmp;
+	int i, r = 0;
+
+	while (p->idx < p->chunk_ib->length_dw) {
+		uint32_t len = radeon_get_ib_value(p, p->idx);
+		uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
+
+		if ((len < 8) || (len & 3)) {
+			DRM_ERROR("invalid VCE command length (%d)!\n", len);
+			r = -EINVAL;
+			goto out;
+		}
+
+		if (destroyed) {
+			DRM_ERROR("No other command allowed after destroy!\n");
+			r = -EINVAL;
+			goto out;
+		}
+
+		switch (cmd) {
+		case 0x00000001: // session
+			handle = radeon_get_ib_value(p, p->idx + 2);
+			session_idx = radeon_vce_validate_handle(p, handle,
+								 &allocated);
+			if (session_idx < 0)
+				return session_idx;
+			size = &p->rdev->vce.img_size[session_idx];
+			break;
+
+		case 0x00000002: // task info
+			break;
+
+		case 0x01000001: // create
+			created = true;
+			if (!allocated) {
+				DRM_ERROR("Handle already in use!\n");
+				r = -EINVAL;
+				goto out;
+			}
+
+			*size = radeon_get_ib_value(p, p->idx + 8) *
+				radeon_get_ib_value(p, p->idx + 10) *
+				8 * 3 / 2;
+			break;
+
+		case 0x04000001: // config extension
+		case 0x04000002: // pic control
+		case 0x04000005: // rate control
+		case 0x04000007: // motion estimation
+		case 0x04000008: // rdo
+		case 0x04000009: // vui
+			break;
+
+		case 0x03000001: // encode
+			r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+						*size);
+			if (r)
+				goto out;
+
+			r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+						*size / 3);
+			if (r)
+				goto out;
+			break;
+
+		case 0x02000001: // destroy
+			destroyed = true;
+			break;
+
+		case 0x05000001: // context buffer
+			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+						*size * 2);
+			if (r)
+				goto out;
+			break;
+
+		case 0x05000004: // video bitstream buffer
+			tmp = radeon_get_ib_value(p, p->idx + 4);
+			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+						tmp);
+			if (r)
+				goto out;
+			break;
+
+		case 0x05000005: // feedback buffer
+			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+						4096);
+			if (r)
+				goto out;
+			break;
+
+		default:
+			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
+			r = -EINVAL;
+			goto out;
+		}
+
+		if (session_idx == -1) {
+			DRM_ERROR("no session command at start of IB\n");
+			r = -EINVAL;
+			goto out;
+		}
+
+		p->idx += len / 4;
+	}
+
+	if (allocated && !created) {
+		DRM_ERROR("New session without create command!\n");
+		r = -ENOENT;
+	}
+
+out:
+	if ((!r && destroyed) || (r && allocated)) {
+		/*
+		 * IB contains a destroy msg or we have allocated an
+		 * handle and got an error, anyway free the handle
+		 */
+		for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
+			atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
+	}
+
+	return r;
+}
+
+/**
+ * radeon_vce_semaphore_emit - emit a semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: engine to use
+ * @semaphore: address of semaphore
+ * @emit_wait: true=emit wait, false=emit signal
+ *
+ */
+bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
+			       struct radeon_ring *ring,
+			       struct radeon_semaphore *semaphore,
+			       bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+
+	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE));
+	radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF));
+	radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF));
+	radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0)));
+	if (!emit_wait)
+		radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
+
+	return true;
+}
+
+/**
+ * radeon_vce_ib_execute - execute indirect buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ib: the IB to execute
+ *
+ */
+void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB));
+	radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr));
+	radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr)));
+	radeon_ring_write(ring, cpu_to_le32(ib->length_dw));
+}
+
+/**
+ * radeon_vce_fence_emit - add a fence command to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: the fence
+ *
+ */
+void radeon_vce_fence_emit(struct radeon_device *rdev,
+			   struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE));
+	radeon_ring_write(ring, cpu_to_le32(addr));
+	radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr)));
+	radeon_ring_write(ring, cpu_to_le32(fence->seq));
+	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP));
+	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
+}
+
+/**
+ * radeon_vce_ring_test - test if VCE ring is working
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the engine to test on
+ *
+ */
+int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
+	unsigned i;
+	int r;
+
+	r = radeon_ring_lock(rdev, ring, 16);
+	if (r) {
+		DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
+			  ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (vce_v1_0_get_rptr(rdev, ring) != rptr)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n",
+			 ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed\n",
+			 ring->idx);
+		r = -ETIMEDOUT;
+	}
+
+	return r;
+}
+
+/**
+ * radeon_vce_ib_test - test if VCE IBs are working
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the engine to test on
+ *
+ */
+int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_fence *fence = NULL;
+	int r;
+
+	r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL);
+	if (r) {
+		DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+		goto error;
+	}
+
+	r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
+	if (r) {
+		DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+		goto error;
+	}
+
+	r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
+	} else {
+		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+		r = 0;
+	}
+error:
+	radeon_fence_unref(&fence);
+	return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
new file mode 100644
index 0000000..7f1a9c7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -0,0 +1,1268 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time.  The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID.  When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer.  VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+	return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
+}
+
+/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+	return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
+}
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->vm_manager.enabled) {
+		r = radeon_asic_vm_init(rdev);
+		if (r)
+			return r;
+
+		rdev->vm_manager.enabled = true;
+	}
+	return 0;
+}
+
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	if (!rdev->vm_manager.enabled)
+		return;
+
+	for (i = 0; i < RADEON_NUM_VM; ++i)
+		radeon_fence_unref(&rdev->vm_manager.active[i]);
+	radeon_asic_vm_fini(rdev);
+	rdev->vm_manager.enabled = false;
+}
+
+/**
+ * radeon_vm_get_bos - add the vm BOs to a validation list
+ *
+ * @vm: vm providing the BOs
+ * @head: head of validation list
+ *
+ * Add the page directory to the list of BOs to
+ * validate for command submission (cayman+).
+ */
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
+					  struct radeon_vm *vm,
+					  struct list_head *head)
+{
+	struct radeon_bo_list *list;
+	unsigned i, idx;
+
+	list = kvmalloc_array(vm->max_pde_used + 2,
+			     sizeof(struct radeon_bo_list), GFP_KERNEL);
+	if (!list)
+		return NULL;
+
+	/* add the vm page table to the list */
+	list[0].robj = vm->page_directory;
+	list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
+	list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
+	list[0].tv.bo = &vm->page_directory->tbo;
+	list[0].tv.shared = true;
+	list[0].tiling_flags = 0;
+	list_add(&list[0].tv.head, head);
+
+	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
+		if (!vm->page_tables[i].bo)
+			continue;
+
+		list[idx].robj = vm->page_tables[i].bo;
+		list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
+		list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
+		list[idx].tv.bo = &list[idx].robj->tbo;
+		list[idx].tv.shared = true;
+		list[idx].tiling_flags = 0;
+		list_add(&list[idx++].tv.head, head);
+	}
+
+	return list;
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring)
+{
+	struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+	struct radeon_vm_id *vm_id = &vm->ids[ring];
+
+	unsigned choices[2] = {};
+	unsigned i;
+
+	/* check if the id is still valid */
+	if (vm_id->id && vm_id->last_id_use &&
+	    vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
+		return NULL;
+
+	/* we definately need to flush */
+	vm_id->pd_gpu_addr = ~0ll;
+
+	/* skip over VMID 0, since it is the system VM */
+	for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+		struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+		if (fence == NULL) {
+			/* found a free one */
+			vm_id->id = i;
+			trace_radeon_vm_grab_id(i, ring);
+			return NULL;
+		}
+
+		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+			best[fence->ring] = fence;
+			choices[fence->ring == ring ? 0 : 1] = i;
+		}
+	}
+
+	for (i = 0; i < 2; ++i) {
+		if (choices[i]) {
+			vm_id->id = choices[i];
+			trace_radeon_vm_grab_id(choices[i], ring);
+			return rdev->vm_manager.active[choices[i]];
+		}
+	}
+
+	/* should never happen */
+	BUG();
+	return NULL;
+}
+
+/**
+ * radeon_vm_flush - hardware flush the vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to flush
+ * @ring: ring to use for flush
+ * @updates: last vm update that is waited for
+ *
+ * Flush the vm (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_flush(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     int ring, struct radeon_fence *updates)
+{
+	uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+	struct radeon_vm_id *vm_id = &vm->ids[ring];
+
+	if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
+	    radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
+
+		trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
+		radeon_fence_unref(&vm_id->flushed_updates);
+		vm_id->flushed_updates = radeon_fence_ref(updates);
+		vm_id->pd_gpu_addr = pd_addr;
+		radeon_ring_vm_flush(rdev, &rdev->ring[ring],
+				     vm_id->id, vm_id->pd_gpu_addr);
+
+	}
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence)
+{
+	unsigned vm_id = vm->ids[fence->ring].id;
+
+	radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
+	rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
+
+	radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
+	vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		if (bo_va->vm == vm) {
+			return bo_va;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+	if (bo_va == NULL) {
+		return NULL;
+	}
+	bo_va->vm = vm;
+	bo_va->bo = bo;
+	bo_va->it.start = 0;
+	bo_va->it.last = 0;
+	bo_va->flags = 0;
+	bo_va->ref_count = 1;
+	INIT_LIST_HEAD(&bo_va->bo_list);
+	INIT_LIST_HEAD(&bo_va->vm_status);
+
+	mutex_lock(&vm->mutex);
+	list_add_tail(&bo_va->bo_list, &bo->va);
+	mutex_unlock(&vm->mutex);
+
+	return bo_va;
+}
+
+/**
+ * radeon_vm_set_pages - helper to call the right asic function
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void radeon_vm_set_pages(struct radeon_device *rdev,
+				struct radeon_ib *ib,
+				uint64_t pe,
+				uint64_t addr, unsigned count,
+				uint32_t incr, uint32_t flags)
+{
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+		uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+		radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
+
+	} else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
+		radeon_asic_vm_write_pages(rdev, ib, pe, addr,
+					   count, incr, flags);
+
+	} else {
+		radeon_asic_vm_set_pages(rdev, ib, pe, addr,
+					 count, incr, flags);
+	}
+}
+
+/**
+ * radeon_vm_clear_bo - initially clear the page dir/table
+ *
+ * @rdev: radeon_device pointer
+ * @bo: bo to clear
+ */
+static int radeon_vm_clear_bo(struct radeon_device *rdev,
+			      struct radeon_bo *bo)
+{
+	struct ttm_operation_ctx ctx = { true, false };
+	struct radeon_ib ib;
+	unsigned entries;
+	uint64_t addr;
+	int r;
+
+	r = radeon_bo_reserve(bo, false);
+	if (r)
+		return r;
+
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+	if (r)
+		goto error_unreserve;
+
+	addr = radeon_bo_gpu_offset(bo);
+	entries = radeon_bo_size(bo) / 8;
+
+	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
+	if (r)
+		goto error_unreserve;
+
+	ib.length_dw = 0;
+
+	radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
+	radeon_asic_vm_pad_ib(rdev, &ib);
+	WARN_ON(ib.length_dw > 64);
+
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r)
+		goto error_free;
+
+	ib.fence->is_vm_update = true;
+	radeon_bo_fence(bo, ib.fence, false);
+
+error_free:
+	radeon_ib_free(rdev, &ib);
+
+error_unreserve:
+	radeon_bo_unreserve(bo);
+	return r;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved and gets unreserved by this function!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t soffset,
+			  uint32_t flags)
+{
+	uint64_t size = radeon_bo_size(bo_va->bo);
+	struct radeon_vm *vm = bo_va->vm;
+	unsigned last_pfn, pt_idx;
+	uint64_t eoffset;
+	int r;
+
+	if (soffset) {
+		/* make sure object fit at this offset */
+		eoffset = soffset + size - 1;
+		if (soffset >= eoffset) {
+			r = -EINVAL;
+			goto error_unreserve;
+		}
+
+		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+		if (last_pfn >= rdev->vm_manager.max_pfn) {
+			dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
+				last_pfn, rdev->vm_manager.max_pfn);
+			r = -EINVAL;
+			goto error_unreserve;
+		}
+
+	} else {
+		eoffset = last_pfn = 0;
+	}
+
+	mutex_lock(&vm->mutex);
+	soffset /= RADEON_GPU_PAGE_SIZE;
+	eoffset /= RADEON_GPU_PAGE_SIZE;
+	if (soffset || eoffset) {
+		struct interval_tree_node *it;
+		it = interval_tree_iter_first(&vm->va, soffset, eoffset);
+		if (it && it != &bo_va->it) {
+			struct radeon_bo_va *tmp;
+			tmp = container_of(it, struct radeon_bo_va, it);
+			/* bo and tmp overlap, invalid offset */
+			dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+				"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+				soffset, tmp->bo, tmp->it.start, tmp->it.last);
+			mutex_unlock(&vm->mutex);
+			r = -EINVAL;
+			goto error_unreserve;
+		}
+	}
+
+	if (bo_va->it.start || bo_va->it.last) {
+		/* add a clone of the bo_va to clear the old address */
+		struct radeon_bo_va *tmp;
+		tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+		if (!tmp) {
+			mutex_unlock(&vm->mutex);
+			r = -ENOMEM;
+			goto error_unreserve;
+		}
+		tmp->it.start = bo_va->it.start;
+		tmp->it.last = bo_va->it.last;
+		tmp->vm = vm;
+		tmp->bo = radeon_bo_ref(bo_va->bo);
+
+		interval_tree_remove(&bo_va->it, &vm->va);
+		spin_lock(&vm->status_lock);
+		bo_va->it.start = 0;
+		bo_va->it.last = 0;
+		list_del_init(&bo_va->vm_status);
+		list_add(&tmp->vm_status, &vm->freed);
+		spin_unlock(&vm->status_lock);
+	}
+
+	if (soffset || eoffset) {
+		spin_lock(&vm->status_lock);
+		bo_va->it.start = soffset;
+		bo_va->it.last = eoffset;
+		list_add(&bo_va->vm_status, &vm->cleared);
+		spin_unlock(&vm->status_lock);
+		interval_tree_insert(&bo_va->it, &vm->va);
+	}
+
+	bo_va->flags = flags;
+
+	soffset >>= radeon_vm_block_size;
+	eoffset >>= radeon_vm_block_size;
+
+	BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
+
+	if (eoffset > vm->max_pde_used)
+		vm->max_pde_used = eoffset;
+
+	radeon_bo_unreserve(bo_va->bo);
+
+	/* walk over the address space and allocate the page tables */
+	for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
+		struct radeon_bo *pt;
+
+		if (vm->page_tables[pt_idx].bo)
+			continue;
+
+		/* drop mutex to allocate and clear page table */
+		mutex_unlock(&vm->mutex);
+
+		r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
+				     RADEON_GPU_PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM, 0,
+				     NULL, NULL, &pt);
+		if (r)
+			return r;
+
+		r = radeon_vm_clear_bo(rdev, pt);
+		if (r) {
+			radeon_bo_unref(&pt);
+			return r;
+		}
+
+		/* aquire mutex again */
+		mutex_lock(&vm->mutex);
+		if (vm->page_tables[pt_idx].bo) {
+			/* someone else allocated the pt in the meantime */
+			mutex_unlock(&vm->mutex);
+			radeon_bo_unref(&pt);
+			mutex_lock(&vm->mutex);
+			continue;
+		}
+
+		vm->page_tables[pt_idx].addr = 0;
+		vm->page_tables[pt_idx].bo = pt;
+	}
+
+	mutex_unlock(&vm->mutex);
+	return 0;
+
+error_unreserve:
+	radeon_bo_unreserve(bo_va->bo);
+	return r;
+}
+
+/**
+ * radeon_vm_map_gart - get the physical address of a gart page
+ *
+ * @rdev: radeon_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
+{
+	uint64_t result;
+
+	/* page table offset */
+	result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+	result &= ~RADEON_GPU_PAGE_MASK;
+
+	return result;
+}
+
+/**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+	uint32_t hw_flags = 0;
+
+	hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+	hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+	hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+	if (flags & RADEON_VM_PAGE_SYSTEM) {
+		hw_flags |= R600_PTE_SYSTEM;
+		hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+	}
+	return hw_flags;
+}
+
+/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int radeon_vm_update_page_directory(struct radeon_device *rdev,
+				    struct radeon_vm *vm)
+{
+	struct radeon_bo *pd = vm->page_directory;
+	uint64_t pd_addr = radeon_bo_gpu_offset(pd);
+	uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+	uint64_t last_pde = ~0, last_pt = ~0;
+	unsigned count = 0, pt_idx, ndw;
+	struct radeon_ib ib;
+	int r;
+
+	/* padding, etc. */
+	ndw = 64;
+
+	/* assume the worst case */
+	ndw += vm->max_pde_used * 6;
+
+	/* update too big for an IB */
+	if (ndw > 0xfffff)
+		return -ENOMEM;
+
+	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+	if (r)
+		return r;
+	ib.length_dw = 0;
+
+	/* walk over the address space and update the page directory */
+	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+		struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
+		uint64_t pde, pt;
+
+		if (bo == NULL)
+			continue;
+
+		pt = radeon_bo_gpu_offset(bo);
+		if (vm->page_tables[pt_idx].addr == pt)
+			continue;
+		vm->page_tables[pt_idx].addr = pt;
+
+		pde = pd_addr + pt_idx * 8;
+		if (((last_pde + 8 * count) != pde) ||
+		    ((last_pt + incr * count) != pt)) {
+
+			if (count) {
+				radeon_vm_set_pages(rdev, &ib, last_pde,
+						    last_pt, count, incr,
+						    R600_PTE_VALID);
+			}
+
+			count = 1;
+			last_pde = pde;
+			last_pt = pt;
+		} else {
+			++count;
+		}
+	}
+
+	if (count)
+		radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
+				    incr, R600_PTE_VALID);
+
+	if (ib.length_dw != 0) {
+		radeon_asic_vm_pad_ib(rdev, &ib);
+
+		radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
+		WARN_ON(ib.length_dw > ndw);
+		r = radeon_ib_schedule(rdev, &ib, NULL, false);
+		if (r) {
+			radeon_ib_free(rdev, &ib);
+			return r;
+		}
+		ib.fence->is_vm_update = true;
+		radeon_bo_fence(pd, ib.fence, false);
+	}
+	radeon_ib_free(rdev, &ib);
+
+	return 0;
+}
+
+/**
+ * radeon_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB for the update
+ * @pe_start: first PTE to handle
+ * @pe_end: last PTE to handle
+ * @addr: addr those PTEs should point to
+ * @flags: hw mapping flags
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_frag_ptes(struct radeon_device *rdev,
+				struct radeon_ib *ib,
+				uint64_t pe_start, uint64_t pe_end,
+				uint64_t addr, uint32_t flags)
+{
+	/**
+	 * The MC L1 TLB supports variable sized pages, based on a fragment
+	 * field in the PTE. When this field is set to a non-zero value, page
+	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+	 * flags are considered valid for all PTEs within the fragment range
+	 * and corresponding mappings are assumed to be physically contiguous.
+	 *
+	 * The L1 TLB can store a single PTE for the whole fragment,
+	 * significantly increasing the space available for translation
+	 * caching. This leads to large improvements in throughput when the
+	 * TLB is under pressure.
+	 *
+	 * The L2 TLB distributes small and large fragments into two
+	 * asymmetric partitions. The large fragment cache is significantly
+	 * larger. Thus, we try to use large fragments wherever possible.
+	 * Userspace can support this by aligning virtual base address and
+	 * allocation size to the fragment size.
+	 */
+
+	/* NI is optimized for 256KB fragments, SI and newer for 64KB */
+	uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
+			       (rdev->family == CHIP_ARUBA)) ?
+			R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
+	uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
+			       (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
+
+	uint64_t frag_start = ALIGN(pe_start, frag_align);
+	uint64_t frag_end = pe_end & ~(frag_align - 1);
+
+	unsigned count;
+
+	/* system pages are non continuously */
+	if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
+	    (frag_start >= frag_end)) {
+
+		count = (pe_end - pe_start) / 8;
+		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+				    RADEON_GPU_PAGE_SIZE, flags);
+		return;
+	}
+
+	/* handle the 4K area at the beginning */
+	if (pe_start != frag_start) {
+		count = (frag_start - pe_start) / 8;
+		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+				    RADEON_GPU_PAGE_SIZE, flags);
+		addr += RADEON_GPU_PAGE_SIZE * count;
+	}
+
+	/* handle the area in the middle */
+	count = (frag_end - frag_start) / 8;
+	radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
+			    RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+
+	/* handle the 4K area at the end */
+	if (frag_end != pe_end) {
+		addr += RADEON_GPU_PAGE_SIZE * count;
+		count = (pe_end - frag_end) / 8;
+		radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
+				    RADEON_GPU_PAGE_SIZE, flags);
+	}
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_update_ptes(struct radeon_device *rdev,
+				 struct radeon_vm *vm,
+				 struct radeon_ib *ib,
+				 uint64_t start, uint64_t end,
+				 uint64_t dst, uint32_t flags)
+{
+	uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+	uint64_t last_pte = ~0, last_dst = ~0;
+	unsigned count = 0;
+	uint64_t addr;
+
+	/* walk over the address space and update the page tables */
+	for (addr = start; addr < end; ) {
+		uint64_t pt_idx = addr >> radeon_vm_block_size;
+		struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
+		unsigned nptes;
+		uint64_t pte;
+		int r;
+
+		radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+		r = reservation_object_reserve_shared(pt->tbo.resv);
+		if (r)
+			return r;
+
+		if ((addr & ~mask) == (end & ~mask))
+			nptes = end - addr;
+		else
+			nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+		pte = radeon_bo_gpu_offset(pt);
+		pte += (addr & mask) * 8;
+
+		if ((last_pte + 8 * count) != pte) {
+
+			if (count) {
+				radeon_vm_frag_ptes(rdev, ib, last_pte,
+						    last_pte + 8 * count,
+						    last_dst, flags);
+			}
+
+			count = nptes;
+			last_pte = pte;
+			last_dst = dst;
+		} else {
+			count += nptes;
+		}
+
+		addr += nptes;
+		dst += nptes * RADEON_GPU_PAGE_SIZE;
+	}
+
+	if (count) {
+		radeon_vm_frag_ptes(rdev, ib, last_pte,
+				    last_pte + 8 * count,
+				    last_dst, flags);
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vm_fence_pts - fence page tables after an update
+ *
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @fence: fence to use
+ *
+ * Fence the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_fence_pts(struct radeon_vm *vm,
+				uint64_t start, uint64_t end,
+				struct radeon_fence *fence)
+{
+	unsigned i;
+
+	start >>= radeon_vm_block_size;
+	end = (end - 1) >> radeon_vm_block_size;
+
+	for (i = start; i <= end; ++i)
+		radeon_bo_fence(vm->page_tables[i].bo, fence, true);
+}
+
+/**
+ * radeon_vm_bo_update - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved and mutex must be locked!
+ */
+int radeon_vm_bo_update(struct radeon_device *rdev,
+			struct radeon_bo_va *bo_va,
+			struct ttm_mem_reg *mem)
+{
+	struct radeon_vm *vm = bo_va->vm;
+	struct radeon_ib ib;
+	unsigned nptes, ncmds, ndw;
+	uint64_t addr;
+	uint32_t flags;
+	int r;
+
+	if (!bo_va->it.start) {
+		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+			bo_va->bo, vm);
+		return -EINVAL;
+	}
+
+	spin_lock(&vm->status_lock);
+	if (mem) {
+		if (list_empty(&bo_va->vm_status)) {
+			spin_unlock(&vm->status_lock);
+			return 0;
+		}
+		list_del_init(&bo_va->vm_status);
+	} else {
+		list_del(&bo_va->vm_status);
+		list_add(&bo_va->vm_status, &vm->cleared);
+	}
+	spin_unlock(&vm->status_lock);
+
+	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+	bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
+	if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
+		bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
+
+	if (mem) {
+		addr = mem->start << PAGE_SHIFT;
+		if (mem->mem_type != TTM_PL_SYSTEM) {
+			bo_va->flags |= RADEON_VM_PAGE_VALID;
+		}
+		if (mem->mem_type == TTM_PL_TT) {
+			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+			if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
+				bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
+
+		} else {
+			addr += rdev->vm_manager.vram_base_offset;
+		}
+	} else {
+		addr = 0;
+	}
+
+	trace_radeon_vm_bo_update(bo_va);
+
+	nptes = bo_va->it.last - bo_va->it.start + 1;
+
+	/* reserve space for one command every (1 << BLOCK_SIZE) entries
+	   or 2k dwords (whatever is smaller) */
+	ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
+
+	/* padding, etc. */
+	ndw = 64;
+
+	flags = radeon_vm_page_flags(bo_va->flags);
+	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+		/* only copy commands needed */
+		ndw += ncmds * 7;
+
+	} else if (flags & R600_PTE_SYSTEM) {
+		/* header for write data commands */
+		ndw += ncmds * 4;
+
+		/* body of write data command */
+		ndw += nptes * 2;
+
+	} else {
+		/* set page commands needed */
+		ndw += ncmds * 10;
+
+		/* two extra commands for begin/end of fragment */
+		ndw += 2 * 10;
+	}
+
+	/* update too big for an IB */
+	if (ndw > 0xfffff)
+		return -ENOMEM;
+
+	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+	if (r)
+		return r;
+	ib.length_dw = 0;
+
+	if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
+		unsigned i;
+
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
+	}
+
+	r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+				  bo_va->it.last + 1, addr,
+				  radeon_vm_page_flags(bo_va->flags));
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		return r;
+	}
+
+	radeon_asic_vm_pad_ib(rdev, &ib);
+	WARN_ON(ib.length_dw > ndw);
+
+	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		return r;
+	}
+	ib.fence->is_vm_update = true;
+	radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
+	radeon_fence_unref(&bo_va->last_pt_update);
+	bo_va->last_pt_update = radeon_fence_ref(ib.fence);
+	radeon_ib_free(rdev, &ib);
+
+	return 0;
+}
+
+/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+			  struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va;
+	int r = 0;
+
+	spin_lock(&vm->status_lock);
+	while (!list_empty(&vm->freed)) {
+		bo_va = list_first_entry(&vm->freed,
+			struct radeon_bo_va, vm_status);
+		spin_unlock(&vm->status_lock);
+
+		r = radeon_vm_bo_update(rdev, bo_va, NULL);
+		radeon_bo_unref(&bo_va->bo);
+		radeon_fence_unref(&bo_va->last_pt_update);
+		spin_lock(&vm->status_lock);
+		list_del(&bo_va->vm_status);
+		kfree(bo_va);
+		if (r)
+			break;
+
+	}
+	spin_unlock(&vm->status_lock);
+	return r;
+
+}
+
+/**
+ * radeon_vm_clear_invalids - clear invalidated BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all invalidated BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+			     struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	spin_lock(&vm->status_lock);
+	while (!list_empty(&vm->invalidated)) {
+		bo_va = list_first_entry(&vm->invalidated,
+			struct radeon_bo_va, vm_status);
+		spin_unlock(&vm->status_lock);
+
+		r = radeon_vm_bo_update(rdev, bo_va, NULL);
+		if (r)
+			return r;
+
+		spin_lock(&vm->status_lock);
+	}
+	spin_unlock(&vm->status_lock);
+
+	return 0;
+}
+
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ *
+ * Object have to be reserved!
+ */
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+		      struct radeon_bo_va *bo_va)
+{
+	struct radeon_vm *vm = bo_va->vm;
+
+	list_del(&bo_va->bo_list);
+
+	mutex_lock(&vm->mutex);
+	if (bo_va->it.start || bo_va->it.last)
+		interval_tree_remove(&bo_va->it, &vm->va);
+
+	spin_lock(&vm->status_lock);
+	list_del(&bo_va->vm_status);
+	if (bo_va->it.start || bo_va->it.last) {
+		bo_va->bo = radeon_bo_ref(bo_va->bo);
+		list_add(&bo_va->vm_status, &vm->freed);
+	} else {
+		radeon_fence_unref(&bo_va->last_pt_update);
+		kfree(bo_va);
+	}
+	spin_unlock(&vm->status_lock);
+
+	mutex_unlock(&vm->mutex);
+}
+
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+			     struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		spin_lock(&bo_va->vm->status_lock);
+		if (list_empty(&bo_va->vm_status) &&
+		    (bo_va->it.start || bo_va->it.last))
+			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
+		spin_unlock(&bo_va->vm->status_lock);
+	}
+}
+
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
+		RADEON_VM_PTE_COUNT * 8);
+	unsigned pd_size, pd_entries, pts_size;
+	int i, r;
+
+	vm->ib_bo_va = NULL;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		vm->ids[i].id = 0;
+		vm->ids[i].flushed_updates = NULL;
+		vm->ids[i].last_id_use = NULL;
+	}
+	mutex_init(&vm->mutex);
+	vm->va = RB_ROOT_CACHED;
+	spin_lock_init(&vm->status_lock);
+	INIT_LIST_HEAD(&vm->invalidated);
+	INIT_LIST_HEAD(&vm->freed);
+	INIT_LIST_HEAD(&vm->cleared);
+
+	pd_size = radeon_vm_directory_size(rdev);
+	pd_entries = radeon_vm_num_pdes(rdev);
+
+	/* allocate page table array */
+	pts_size = pd_entries * sizeof(struct radeon_vm_pt);
+	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+	if (vm->page_tables == NULL) {
+		DRM_ERROR("Cannot allocate memory for page table array\n");
+		return -ENOMEM;
+	}
+
+	r = radeon_bo_create(rdev, pd_size, align, true,
+			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+			     NULL, &vm->page_directory);
+	if (r)
+		return r;
+
+	r = radeon_vm_clear_bo(rdev, vm->page_directory);
+	if (r) {
+		radeon_bo_unref(&vm->page_directory);
+		vm->page_directory = NULL;
+		return r;
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va, *tmp;
+	int i, r;
+
+	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+		dev_err(rdev->dev, "still active bo inside vm\n");
+	}
+	rbtree_postorder_for_each_entry_safe(bo_va, tmp,
+					     &vm->va.rb_root, it.rb) {
+		interval_tree_remove(&bo_va->it, &vm->va);
+		r = radeon_bo_reserve(bo_va->bo, false);
+		if (!r) {
+			list_del_init(&bo_va->bo_list);
+			radeon_bo_unreserve(bo_va->bo);
+			radeon_fence_unref(&bo_va->last_pt_update);
+			kfree(bo_va);
+		}
+	}
+	list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+		radeon_bo_unref(&bo_va->bo);
+		radeon_fence_unref(&bo_va->last_pt_update);
+		kfree(bo_va);
+	}
+
+	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+		radeon_bo_unref(&vm->page_tables[i].bo);
+	kfree(vm->page_tables);
+
+	radeon_bo_unref(&vm->page_directory);
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		radeon_fence_unref(&vm->ids[i].flushed_updates);
+		radeon_fence_unref(&vm->ids[i].last_id_use);
+	}
+
+	mutex_destroy(&vm->mutex);
+}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
new file mode 100644
index 0000000..d46b58d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -0,0 +1,642 @@
+cayman 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089A8 VGT_COMPUTE_INDEX
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x000089B0 VGT_HS_OFFCHIP_PARAM
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x0002835C SX_SCATTER_EXPORT_SIZE
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 SPI_GPR_MGMT
+0x000286FC SPI_LDS_MGMT
+0x00028700 SPI_STACK_MGMT
+0x00028704 SPI_WAVE_MGMT_1
+0x00028708 SPI_WAVE_MGMT_2
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A70 IA_ENHANCE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AA8 IA_MULTI_VGT_PARAM
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B90 VGT_GS_INSTANCE_CNT
+0x00028BD4 PA_SC_CENTROID_PRIORITY_0
+0x00028BD8 PA_SC_CENTROID_PRIORITY_1
+0x00028BDC PA_SC_LINE_CNTL
+0x00028BE4 PA_SU_VTX_CNTL
+0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
+0x00028BEC PA_CL_GB_VERT_DISC_ADJ
+0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
+0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
+0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
+0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
+0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
+0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
+0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
+0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
+0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
+0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 0000000..57745c8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,644 @@
+evergreen 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x00008044 WAIT_UNTIL_POLL_CNTL
+0x00008048 WAIT_UNTIL_POLL_MASK
+0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D90 SQ_DYN_GPR_OPTIMIZATION
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
+0x00008D9C SQ_DYN_GPR_LDS_LIMIT
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C0C SQ_GPR_RESOURCE_MGMT_3
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008C18 SQ_THREAD_RESOURCE_MGMT
+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
+0x00008C20 SQ_STACK_RESOURCE_MGMT_1
+0x00008C24 SQ_STACK_RESOURCE_MGMT_2
+0x00008C28 SQ_STACK_RESOURCE_MGMT_3
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E2C SQ_LDS_RESOURCE_MGMT
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009700 VC_CNTL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028728 GDS_ORDERED_WAVE_PER_SE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B5C VGT_LS_SIZE
+0x00028B60 VGT_HS_SIZE
+0x00028B64 VGT_LS_HS_ALLOC
+0x00028B68 VGT_HS_PATCH_CONST
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B90 VGT_GS_INSTANCE_CNT
+0x00028C00 PA_SC_LINE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+0x00028C3C PA_SC_AA_MASK
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r100 b/drivers/gpu/drm/radeon/reg_srcs/r100
new file mode 100644
index 0000000..f7ee062
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r100
@@ -0,0 +1,105 @@
+r100 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1810 FOG_3D_TABLE_START
+0x1814 FOG_3D_TABLE_END
+0x1a14 FOG_TABLE_INDEX
+0x1a18 FOG_TABLE_DATA
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 SE_COORD_FMT
+0x1c60 PP_TXCBLEND_0
+0x1c64 PP_TXABLEND_0
+0x1c68 PP_TFACTOR_0
+0x1c78 PP_TXCBLEND_1
+0x1c7c PP_TXABLEND_1
+0x1c80 PP_TFACTOR_1
+0x1c90 PP_TXCBLEND_2
+0x1c94 PP_TXABLEND_2
+0x1c98 PP_TFACTOR_2
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1d40 PP_BORDER_COLOR0
+0x1d44 PP_BORDER_COLOR1
+0x1d48 PP_BORDER_COLOR2
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9C VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2140 SE_CNTL_STATUS
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2210 SE_TCL_MATERIAL_EMISSIVE_RED
+0x2214 SE_TCL_MATERIAL_EMISSIVE_GREEN
+0x2218 SE_TCL_MATERIAL_EMISSIVE_BLUE
+0x221c SE_TCL_MATERIAL_EMISSIVE_ALPHA
+0x2220 SE_TCL_MATERIAL_AMBIENT_RED
+0x2224 SE_TCL_MATERIAL_AMBIENT_GREEN
+0x2228 SE_TCL_MATERIAL_AMBIENT_BLUE
+0x222c SE_TCL_MATERIAL_AMBIENT_ALPHA
+0x2230 SE_TCL_MATERIAL_DIFFUSE_RED
+0x2234 SE_TCL_MATERIAL_DIFFUSE_GREEN
+0x2238 SE_TCL_MATERIAL_DIFFUSE_BLUE
+0x223c SE_TCL_MATERIAL_DIFFUSE_ALPHA
+0x2240 SE_TCL_MATERIAL_SPECULAR_RED
+0x2244 SE_TCL_MATERIAL_SPECULAR_GREEN
+0x2248 SE_TCL_MATERIAL_SPECULAR_BLUE
+0x224c SE_TCL_MATERIAL_SPECULAR_ALPHA
+0x2250 SE_TCL_SHININESS
+0x2254 SE_TCL_OUTPUT_VTX_FMT
+0x2258 SE_TCL_OUTPUT_VTX_SEL
+0x225c SE_TCL_MATRIX_SELECT_0
+0x2260 SE_TCL_MATRIX_SELECT_1
+0x2264 SE_TCL_UCP_VERT_BLEND_CNTL
+0x2268 SE_TCL_TEXTURE_PROC_CTL
+0x226c SE_TCL_LIGHT_MODEL_CTL
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 SE_TCL_STATE_FLUSH
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x3290 RB3D_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
new file mode 100644
index 0000000..c29ac43
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -0,0 +1,186 @@
+r200 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 RE_CNTL
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1cd8 RE_SCISSOR_TL_0
+0x1cdc RE_SCISSOR_BR_0
+0x1ce0 RE_SCISSOR_TL_1
+0x1ce4 RE_SCISSOR_BR_1
+0x1ce8 RE_SCISSOR_TL_2
+0x1cec RE_SCISSOR_BR_2
+0x1d60 RB3D_DEPTHXY_OFFSET
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9c VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2080 SE_VAP_CNTL
+0x2090 SE_TCL_OUTPUT_VTX_FMT_0
+0x2094 SE_TCL_OUTPUT_VTX_FMT_1
+0x20b0 SE_VTE_CNTL
+0x2140 SE_CNTL_STATUS
+0x2180 SE_VTX_STATE_CNTL
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2230 SE_TCL_MATRIX_SEL_0
+0x2234 SE_TCL_MATRIX_SEL_1
+0x2238 SE_TCL_MATRIX_SEL_2
+0x223c SE_TCL_MATRIX_SEL_3
+0x2240 SE_TCL_MATRIX_SEL_4
+0x2250 SE_TCL_OUTPUT_VTX_COMP_SEL
+0x2254 SE_TCL_INPUT_VTX_VECTOR_ADDR_0
+0x2258 SE_TCL_INPUT_VTX_VECTOR_ADDR_1
+0x225c SE_TCL_INPUT_VTX_VECTOR_ADDR_2
+0x2260 SE_TCL_INPUT_VTX_VECTOR_ADDR_3
+0x2268 SE_TCL_LIGHT_MODEL_CTL_0
+0x226c SE_TCL_LIGHT_MODEL_CTL_1
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x22a8 SE_TCL_TEX_PROC_CTL_2
+0x22ac SE_TCL_TEX_PROC_CTL_3
+0x22b0 SE_TCL_TEX_PROC_CTL_0
+0x22b4 SE_TCL_TEX_PROC_CTL_1
+0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
+0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
+0x22c4 SE_TCL_POINT_SPRITE_CNTL
+0x22d0 SE_PVS_CNTL
+0x22d4 SE_PVS_CONST_CNTL
+0x2648 RE_POINTSIZE
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x26f0 RE_AUX_SCISSOR_CNTL
+0x2c14 PP_BORDER_COLOR_0
+0x2c34 PP_BORDER_COLOR_1
+0x2c54 PP_BORDER_COLOR_2
+0x2c74 PP_BORDER_COLOR_3
+0x2c94 PP_BORDER_COLOR_4
+0x2cb4 PP_BORDER_COLOR_5
+0x2cc4 PP_CNTL_X
+0x2cf8 PP_TRI_PERF
+0x2cfc PP_PERF_CNTL
+0x2d9c PP_TAM_DEBUG3
+0x2ee0 PP_TFACTOR_0
+0x2ee4 PP_TFACTOR_1
+0x2ee8 PP_TFACTOR_2
+0x2eec PP_TFACTOR_3
+0x2ef0 PP_TFACTOR_4
+0x2ef4 PP_TFACTOR_5
+0x2ef8 PP_TFACTOR_6
+0x2efc PP_TFACTOR_7
+0x2f00 PP_TXCBLEND_0
+0x2f04 PP_TXCBLEND2_0
+0x2f08 PP_TXABLEND_0
+0x2f0c PP_TXABLEND2_0
+0x2f10 PP_TXCBLEND_1
+0x2f14 PP_TXCBLEND2_1
+0x2f18 PP_TXABLEND_1
+0x2f1c PP_TXABLEND2_1
+0x2f20 PP_TXCBLEND_2
+0x2f24 PP_TXCBLEND2_2
+0x2f28 PP_TXABLEND_2
+0x2f2c PP_TXABLEND2_2
+0x2f30 PP_TXCBLEND_3
+0x2f34 PP_TXCBLEND2_3
+0x2f38 PP_TXABLEND_3
+0x2f3c PP_TXABLEND2_3
+0x2f40 PP_TXCBLEND_4
+0x2f44 PP_TXCBLEND2_4
+0x2f48 PP_TXABLEND_4
+0x2f4c PP_TXABLEND2_4
+0x2f50 PP_TXCBLEND_5
+0x2f54 PP_TXCBLEND2_5
+0x2f58 PP_TXABLEND_5
+0x2f5c PP_TXABLEND2_5
+0x2f60 PP_TXCBLEND_6
+0x2f64 PP_TXCBLEND2_6
+0x2f68 PP_TXABLEND_6
+0x2f6c PP_TXABLEND2_6
+0x2f70 PP_TXCBLEND_7
+0x2f74 PP_TXCBLEND2_7
+0x2f78 PP_TXABLEND_7
+0x2f7c PP_TXABLEND2_7
+0x2f80 PP_TXCBLEND_8
+0x2f84 PP_TXCBLEND2_8
+0x2f88 PP_TXABLEND_8
+0x2f8c PP_TXABLEND2_8
+0x2f90 PP_TXCBLEND_9
+0x2f94 PP_TXCBLEND2_9
+0x2f98 PP_TXABLEND_9
+0x2f9c PP_TXABLEND2_9
+0x2fa0 PP_TXCBLEND_10
+0x2fa4 PP_TXCBLEND2_10
+0x2fa8 PP_TXABLEND_10
+0x2fac PP_TXABLEND2_10
+0x2fb0 PP_TXCBLEND_11
+0x2fb4 PP_TXCBLEND2_11
+0x2fb8 PP_TXABLEND_11
+0x2fbc PP_TXABLEND2_11
+0x2fc0 PP_TXCBLEND_12
+0x2fc4 PP_TXCBLEND2_12
+0x2fc8 PP_TXABLEND_12
+0x2fcc PP_TXABLEND2_12
+0x2fd0 PP_TXCBLEND_13
+0x2fd4 PP_TXCBLEND2_13
+0x2fd8 PP_TXABLEND_13
+0x2fdc PP_TXABLEND2_13
+0x2fe0 PP_TXCBLEND_14
+0x2fe4 PP_TXCBLEND2_14
+0x2fe8 PP_TXABLEND_14
+0x2fec PP_TXABLEND2_14
+0x2ff0 PP_TXCBLEND_15
+0x2ff4 PP_TXCBLEND2_15
+0x2ff8 PP_TXABLEND_15
+0x2ffc PP_TXABLEND2_15
+0x3218 RB3D_BLENCOLOR
+0x321c RB3D_ABLENDCNTL
+0x3220 RB3D_CBLENDCNTL
+0x3290 RB3D_ZPASS_DATA
+
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
new file mode 100644
index 0000000..e8a1786
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -0,0 +1,714 @@
+r300 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 0000000..722074e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,780 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 0000000..ec0c682
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,756 @@
+r600 0x9400
+0x000287A0 R7xx_CB_SHADER_CONTROL
+0x00028230 R7xx_PA_SC_EDGERULE
+0x000286C8 R7xx_SPI_THREAD_GROUPING
+0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008490 CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088C4 VGT_CACHE_INVALIDATION
+0x00028A50 VGT_ENHANCE
+0x000088CC VGT_ES_PER_GS
+0x00028A2C VGT_GROUP_DECR
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x000088C8 VGT_GS_PER_ES
+0x000088E8 VGT_GS_PER_VS
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x0000895C VGT_INDEX_TYPE
+0x00028408 VGT_INDX_OFFSET
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00008958 VGT_PRIMITIVE_TYPE
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028810 PA_CL_CLIP_CNTL
+0x00008A14 PA_CL_ENHANCE
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028820 PA_CL_NANINF_CNTL
+0x00028E1C PA_CL_POINT_CULL_RAD
+0x00028E18 PA_CL_POINT_SIZE
+0x00028E10 PA_CL_POINT_X_RAD
+0x00028E14 PA_CL_POINT_Y_RAD
+0x00028E2C PA_CL_UCP_0_W
+0x00028E3C PA_CL_UCP_1_W
+0x00028E4C PA_CL_UCP_2_W
+0x00028E5C PA_CL_UCP_3_W
+0x00028E6C PA_CL_UCP_4_W
+0x00028E7C PA_CL_UCP_5_W
+0x00028E20 PA_CL_UCP_0_X
+0x00028E30 PA_CL_UCP_1_X
+0x00028E40 PA_CL_UCP_2_X
+0x00028E50 PA_CL_UCP_3_X
+0x00028E60 PA_CL_UCP_4_X
+0x00028E70 PA_CL_UCP_5_X
+0x00028E24 PA_CL_UCP_0_Y
+0x00028E34 PA_CL_UCP_1_Y
+0x00028E44 PA_CL_UCP_2_Y
+0x00028E54 PA_CL_UCP_3_Y
+0x00028E64 PA_CL_UCP_4_Y
+0x00028E74 PA_CL_UCP_5_Y
+0x00028E28 PA_CL_UCP_0_Z
+0x00028E38 PA_CL_UCP_1_Z
+0x00028E48 PA_CL_UCP_2_Z
+0x00028E58 PA_CL_UCP_3_Z
+0x00028E68 PA_CL_UCP_4_Z
+0x00028E78 PA_CL_UCP_5_Z
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x00028C48 PA_SC_AA_MASK
+0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
+0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
+0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
+0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00008BF0 PA_SC_ENHANCE
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028C00 PA_SC_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00028A4C PA_SC_MODE_CNTL
+0x00028A48 PA_SC_MPASS_PS_CNTL
+0x00008B20 PA_SC_MULTI_CHIP_CNTL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A00 PA_SU_POINT_SIZE
+0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028DFC PA_SU_POLY_OFFSET_CLAMP
+0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C10 SQ_STACK_RESOURCE_MGMT_1
+0x00008C14 SQ_STACK_RESOURCE_MGMT_2
+0x00008C0C SQ_THREAD_RESOURCE_MGMT
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x000288E0 SQ_VTX_SEMANTIC_CLEAR
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000288D8 SQ_PGM_CF_OFFSET_ES
+0x000288DC SQ_PGM_CF_OFFSET_FS
+0x000288D4 SQ_PGM_CF_OFFSET_GS
+0x000288CC SQ_PGM_CF_OFFSET_PS
+0x000288D0 SQ_PGM_CF_OFFSET_VS
+0x00028854 SQ_PGM_EXPORTS_PS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x000288A4 SQ_PGM_RESOURCES_FS
+0x0002887C SQ_PGM_RESOURCES_GS
+0x00028850 SQ_PGM_RESOURCES_PS
+0x00028868 SQ_PGM_RESOURCES_VS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x000286DC SPI_FOG_CNTL
+0x000286E4 SPI_FOG_FUNC_BIAS
+0x000286E0 SPI_FOG_FUNC_SCALE
+0x000286D8 SPI_INPUT_Z
+0x000286D4 SPI_INTERP_CONTROL_0
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286C4 SPI_VS_OUT_CONFIG
+0x00028614 SPI_VS_OUT_ID_0
+0x00028618 SPI_VS_OUT_ID_1
+0x0002861C SPI_VS_OUT_ID_2
+0x00028620 SPI_VS_OUT_ID_3
+0x00028624 SPI_VS_OUT_ID_4
+0x00028628 SPI_VS_OUT_ID_5
+0x0002862C SPI_VS_OUT_ID_6
+0x00028630 SPI_VS_OUT_ID_7
+0x00028634 SPI_VS_OUT_ID_8
+0x00028638 SPI_VS_OUT_ID_9
+0x00028438 SX_ALPHA_REF
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028354 SX_SURFACE_SYNC
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009604 TC_INVALIDATE
+0x00009400 TD_FILTER4
+0x00009404 TD_FILTER4_1
+0x00009408 TD_FILTER4_2
+0x0000940C TD_FILTER4_3
+0x00009410 TD_FILTER4_4
+0x00009414 TD_FILTER4_5
+0x00009418 TD_FILTER4_6
+0x0000941C TD_FILTER4_7
+0x00009420 TD_FILTER4_8
+0x00009424 TD_FILTER4_9
+0x00009428 TD_FILTER4_10
+0x0000942C TD_FILTER4_11
+0x00009430 TD_FILTER4_12
+0x00009434 TD_FILTER4_13
+0x00009438 TD_FILTER4_14
+0x0000943C TD_FILTER4_15
+0x00009440 TD_FILTER4_16
+0x00009444 TD_FILTER4_17
+0x00009448 TD_FILTER4_18
+0x0000944C TD_FILTER4_19
+0x00009450 TD_FILTER4_20
+0x00009454 TD_FILTER4_21
+0x00009458 TD_FILTER4_22
+0x0000945C TD_FILTER4_23
+0x00009460 TD_FILTER4_24
+0x00009464 TD_FILTER4_25
+0x00009468 TD_FILTER4_26
+0x0000946C TD_FILTER4_27
+0x00009470 TD_FILTER4_28
+0x00009474 TD_FILTER4_29
+0x00009478 TD_FILTER4_30
+0x0000947C TD_FILTER4_31
+0x00009480 TD_FILTER4_32
+0x00009484 TD_FILTER4_33
+0x00009488 TD_FILTER4_34
+0x0000948C TD_FILTER4_35
+0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
+0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
+0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
+0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
+0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
+0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
+0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
+0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
+0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
+0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
+0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
+0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
+0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
+0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
+0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
+0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
+0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
+0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
+0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
+0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
+0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
+0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
+0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
+0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
+0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
+0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
+0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
+0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
+0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
+0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
+0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
+0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
+0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
+0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
+0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
+0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
+0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
+0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
+0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
+0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
+0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
+0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
+0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
+0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
+0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
+0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
+0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
+0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
+0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
+0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
+0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
+0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
+0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
+0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
+0x0000A800 TD_GS_SAMPLER0_BORDER_RED
+0x0000A810 TD_GS_SAMPLER1_BORDER_RED
+0x0000A820 TD_GS_SAMPLER2_BORDER_RED
+0x0000A830 TD_GS_SAMPLER3_BORDER_RED
+0x0000A840 TD_GS_SAMPLER4_BORDER_RED
+0x0000A850 TD_GS_SAMPLER5_BORDER_RED
+0x0000A860 TD_GS_SAMPLER6_BORDER_RED
+0x0000A870 TD_GS_SAMPLER7_BORDER_RED
+0x0000A880 TD_GS_SAMPLER8_BORDER_RED
+0x0000A890 TD_GS_SAMPLER9_BORDER_RED
+0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
+0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
+0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
+0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
+0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
+0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
+0x0000A900 TD_GS_SAMPLER16_BORDER_RED
+0x0000A910 TD_GS_SAMPLER17_BORDER_RED
+0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
+0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
+0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
+0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
+0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
+0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
+0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
+0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
+0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
+0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
+0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
+0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
+0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
+0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
+0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
+0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
+0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
+0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
+0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
+0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
+0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
+0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
+0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
+0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
+0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
+0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
+0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
+0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
+0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
+0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
+0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
+0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
+0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
+0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
+0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
+0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
+0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
+0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
+0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
+0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
+0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
+0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
+0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
+0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
+0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
+0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
+0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
+0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
+0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
+0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
+0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
+0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
+0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
+0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
+0x0000A400 TD_PS_SAMPLER0_BORDER_RED
+0x0000A410 TD_PS_SAMPLER1_BORDER_RED
+0x0000A420 TD_PS_SAMPLER2_BORDER_RED
+0x0000A430 TD_PS_SAMPLER3_BORDER_RED
+0x0000A440 TD_PS_SAMPLER4_BORDER_RED
+0x0000A450 TD_PS_SAMPLER5_BORDER_RED
+0x0000A460 TD_PS_SAMPLER6_BORDER_RED
+0x0000A470 TD_PS_SAMPLER7_BORDER_RED
+0x0000A480 TD_PS_SAMPLER8_BORDER_RED
+0x0000A490 TD_PS_SAMPLER9_BORDER_RED
+0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
+0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
+0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
+0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
+0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
+0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
+0x0000A500 TD_PS_SAMPLER16_BORDER_RED
+0x0000A510 TD_PS_SAMPLER17_BORDER_RED
+0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
+0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
+0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
+0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
+0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
+0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
+0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
+0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
+0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
+0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
+0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
+0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
+0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
+0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
+0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
+0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
+0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
+0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
+0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
+0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
+0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
+0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
+0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
+0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
+0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
+0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
+0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
+0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
+0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
+0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
+0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
+0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
+0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
+0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
+0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
+0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
+0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
+0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
+0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
+0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
+0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
+0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
+0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
+0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
+0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
+0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
+0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
+0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
+0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
+0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
+0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
+0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
+0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
+0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
+0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
+0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
+0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
+0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
+0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
+0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
+0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
+0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
+0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
+0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
+0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
+0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
+0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
+0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
+0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
+0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
+0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
+0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
+0x0000A600 TD_VS_SAMPLER0_BORDER_RED
+0x0000A610 TD_VS_SAMPLER1_BORDER_RED
+0x0000A620 TD_VS_SAMPLER2_BORDER_RED
+0x0000A630 TD_VS_SAMPLER3_BORDER_RED
+0x0000A640 TD_VS_SAMPLER4_BORDER_RED
+0x0000A650 TD_VS_SAMPLER5_BORDER_RED
+0x0000A660 TD_VS_SAMPLER6_BORDER_RED
+0x0000A670 TD_VS_SAMPLER7_BORDER_RED
+0x0000A680 TD_VS_SAMPLER8_BORDER_RED
+0x0000A690 TD_VS_SAMPLER9_BORDER_RED
+0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
+0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
+0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
+0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
+0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
+0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
+0x0000A700 TD_VS_SAMPLER16_BORDER_RED
+0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+0x00009508 TA_CNTL_AUX
+0x0002802C DB_DEPTH_CLEAR
+0x00028D34 DB_PREFETCH_LIMIT
+0x00028D30 DB_PRELOAD_CONTROL
+0x00028D0C DB_RENDER_CONTROL
+0x00028D10 DB_RENDER_OVERRIDE
+0x0002880C DB_SHADER_CONTROL
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
+0x00028D2C DB_SRESULTS_COMPARE_STATE1
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028028 DB_STENCIL_CLEAR
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x00028804 CB_BLEND_CONTROL
+0x00028420 CB_BLEND_ALPHA
+0x0002841C CB_BLEND_BLUE
+0x00028418 CB_BLEND_GREEN
+0x00028414 CB_BLEND_RED
+0x0002812C CB_CLEAR_ALPHA
+0x00028128 CB_CLEAR_BLUE
+0x00028124 CB_CLEAR_GREEN
+0x00028120 CB_CLEAR_RED
+0x00028C30 CB_CLRCMP_CONTROL
+0x00028C38 CB_CLRCMP_DST
+0x00028C3C CB_CLRCMP_MSK
+0x00028C34 CB_CLRCMP_SRC
+0x0002842C CB_FOG_BLUE
+0x00028428 CB_FOG_GREEN
+0x00028424 CB_FOG_RED
+0x00008040 WAIT_UNTIL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009838 DB_WATERMARKS
+0x00028D44 DB_ALPHA_TO_MASK
+0x00009700 VC_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rn50 b/drivers/gpu/drm/radeon/reg_srcs/rn50
new file mode 100644
index 0000000..2687b63
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/rn50
@@ -0,0 +1,30 @@
+rn50 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
new file mode 100644
index 0000000..d9f6286
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -0,0 +1,780 @@
+rs600 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
new file mode 100644
index 0000000..78d5e99
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -0,0 +1,496 @@
+rv515 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x208C VAP_INDEX_OFFSET
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x2218 VAP_TEX_TO_COLOR_CNTL
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x2500 VAP_PVS_FLOW_CNTL_ADDRS_LW_0
+0x2504 VAP_PVS_FLOW_CNTL_ADDRS_UW_0
+0x2508 VAP_PVS_FLOW_CNTL_ADDRS_LW_1
+0x250C VAP_PVS_FLOW_CNTL_ADDRS_UW_1
+0x2510 VAP_PVS_FLOW_CNTL_ADDRS_LW_2
+0x2514 VAP_PVS_FLOW_CNTL_ADDRS_UW_2
+0x2518 VAP_PVS_FLOW_CNTL_ADDRS_LW_3
+0x251C VAP_PVS_FLOW_CNTL_ADDRS_UW_3
+0x2520 VAP_PVS_FLOW_CNTL_ADDRS_LW_4
+0x2524 VAP_PVS_FLOW_CNTL_ADDRS_UW_4
+0x2528 VAP_PVS_FLOW_CNTL_ADDRS_LW_5
+0x252C VAP_PVS_FLOW_CNTL_ADDRS_UW_5
+0x2530 VAP_PVS_FLOW_CNTL_ADDRS_LW_6
+0x2534 VAP_PVS_FLOW_CNTL_ADDRS_UW_6
+0x2538 VAP_PVS_FLOW_CNTL_ADDRS_LW_7
+0x253C VAP_PVS_FLOW_CNTL_ADDRS_UW_7
+0x2540 VAP_PVS_FLOW_CNTL_ADDRS_LW_8
+0x2544 VAP_PVS_FLOW_CNTL_ADDRS_UW_8
+0x2548 VAP_PVS_FLOW_CNTL_ADDRS_LW_9
+0x254C VAP_PVS_FLOW_CNTL_ADDRS_UW_9
+0x2550 VAP_PVS_FLOW_CNTL_ADDRS_LW_10
+0x2554 VAP_PVS_FLOW_CNTL_ADDRS_UW_10
+0x2558 VAP_PVS_FLOW_CNTL_ADDRS_LW_11
+0x255C VAP_PVS_FLOW_CNTL_ADDRS_UW_11
+0x2560 VAP_PVS_FLOW_CNTL_ADDRS_LW_12
+0x2564 VAP_PVS_FLOW_CNTL_ADDRS_UW_12
+0x2568 VAP_PVS_FLOW_CNTL_ADDRS_LW_13
+0x256C VAP_PVS_FLOW_CNTL_ADDRS_UW_13
+0x2570 VAP_PVS_FLOW_CNTL_ADDRS_LW_14
+0x2574 VAP_PVS_FLOW_CNTL_ADDRS_UW_14
+0x2578 VAP_PVS_FLOW_CNTL_ADDRS_LW_15
+0x257C VAP_PVS_FLOW_CNTL_ADDRS_UW_15
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4074 RS_IP_0
+0x4078 RS_IP_1
+0x407C RS_IP_2
+0x4080 RS_IP_3
+0x4084 RS_IP_4
+0x4088 RS_IP_5
+0x408C RS_IP_6
+0x4090 RS_IP_7
+0x4094 RS_IP_8
+0x4098 RS_IP_9
+0x409C RS_IP_10
+0x40A0 RS_IP_11
+0x40A4 RS_IP_12
+0x40A8 RS_IP_13
+0x40AC RS_IP_14
+0x40B0 RS_IP_15
+0x4320 RS_INST_0
+0x4324 RS_INST_1
+0x4328 RS_INST_2
+0x432C RS_INST_3
+0x4330 RS_INST_4
+0x4334 RS_INST_5
+0x4338 RS_INST_6
+0x433C RS_INST_7
+0x4340 RS_INST_8
+0x4344 RS_INST_9
+0x4348 RS_INST_10
+0x434C RS_INST_11
+0x4350 RS_INST_12
+0x4354 RS_INST_13
+0x4358 RS_INST_14
+0x435C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4250 GA_US_VECTOR_INDEX
+0x4254 GA_US_VECTOR_DATA
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4620 US_FC_BOOL_CONST
+0x4624 US_FC_CTRL
+0x4630 US_CODE_ADDR
+0x4634 US_CODE_RANGE
+0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4BE0 FG_ALPHA_VALUE
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4EF8 RB3D_CONSTANT_COLOR_AR
+0x4EFC RB3D_CONSTANT_COLOR_GB
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h
new file mode 100644
index 0000000..48a913a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs100d.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS100D_H__
+#define __RS100D_H__
+
+/* Registers */
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
new file mode 100644
index 0000000..4121209
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rs400d.h"
+
+/* This files gather functions specifics to : rs400,rs480 */
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rs400_gart_adjust_size(struct radeon_device *rdev)
+{
+	/* Check gart size */
+	switch (rdev->mc.gtt_size/(1024*1024)) {
+	case 32:
+	case 64:
+	case 128:
+	case 256:
+	case 512:
+	case 1024:
+	case 2048:
+		break;
+	default:
+		DRM_ERROR("Unable to use IGP GART size %uM\n",
+			  (unsigned)(rdev->mc.gtt_size >> 20));
+		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
+		DRM_ERROR("Forcing to 32M GART size\n");
+		rdev->mc.gtt_size = 32 * 1024 * 1024;
+		return;
+	}
+}
+
+void rs400_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	unsigned int timeout = rdev->usec_timeout;
+
+	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
+	do {
+		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
+			break;
+		DRM_UDELAY(1);
+		timeout--;
+	} while (timeout > 0);
+	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
+}
+
+int rs400_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.ptr) {
+		WARN(1, "RS400 GART already initialized\n");
+		return 0;
+	}
+	/* Check gart size */
+	switch(rdev->mc.gtt_size / (1024 * 1024)) {
+	case 32:
+	case 64:
+	case 128:
+	case 256:
+	case 512:
+	case 1024:
+	case 2048:
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	if (rs400_debugfs_pcie_gart_info_init(rdev))
+		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	return radeon_gart_table_ram_alloc(rdev);
+}
+
+int rs400_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t size_reg;
+	uint32_t tmp;
+
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+	/* Check gart size */
+	switch(rdev->mc.gtt_size / (1024 * 1024)) {
+	case 32:
+		size_reg = RS480_VA_SIZE_32MB;
+		break;
+	case 64:
+		size_reg = RS480_VA_SIZE_64MB;
+		break;
+	case 128:
+		size_reg = RS480_VA_SIZE_128MB;
+		break;
+	case 256:
+		size_reg = RS480_VA_SIZE_256MB;
+		break;
+	case 512:
+		size_reg = RS480_VA_SIZE_512MB;
+		break;
+	case 1024:
+		size_reg = RS480_VA_SIZE_1GB;
+		break;
+	case 2048:
+		size_reg = RS480_VA_SIZE_2GB;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* It should be fine to program it to max value */
+	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
+		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
+	} else {
+		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
+		WREG32(RS480_AGP_BASE_2, 0);
+	}
+	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
+	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
+		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+		WREG32(RADEON_BUS_CNTL, tmp);
+	} else {
+		WREG32(RADEON_MC_AGP_LOCATION, tmp);
+		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+		WREG32(RADEON_BUS_CNTL, tmp);
+	}
+	/* Table should be in 32bits address space so ignore bits above. */
+	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
+	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
+
+	WREG32_MC(RS480_GART_BASE, tmp);
+	/* TODO: more tweaking here */
+	WREG32_MC(RS480_GART_FEATURE_ID,
+		  (RS480_TLB_ENABLE |
+		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
+	/* Disable snooping */
+	WREG32_MC(RS480_AGP_MODE_CNTL,
+		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
+	/* Disable AGP mode */
+	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+		tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+		tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+		WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+	} else {
+		tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+		tmp |= RS480_GART_INDEX_REG_EN;
+		WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+	}
+	/* Enable gart */
+	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+	rs400_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void rs400_gart_disable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+}
+
+void rs400_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rs400_gart_disable(rdev);
+	radeon_gart_table_ram_free(rdev);
+}
+
+#define RS400_PTE_UNSNOOPED (1 << 0)
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE  (1 << 3)
+
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+	uint32_t entry;
+
+	entry = (lower_32_bits(addr) & PAGE_MASK) |
+		((upper_32_bits(addr) & 0xff) << 4);
+	if (flags & RADEON_GART_PAGE_READ)
+		entry |= RS400_PTE_READABLE;
+	if (flags & RADEON_GART_PAGE_WRITE)
+		entry |= RS400_PTE_WRITEABLE;
+	if (!(flags & RADEON_GART_PAGE_SNOOP))
+		entry |= RS400_PTE_UNSNOOPED;
+	return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+			 uint64_t entry)
+{
+	u32 *gtt = rdev->gart.ptr;
+	gtt[i] = cpu_to_le32(lower_32_bits(entry));
+}
+
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void rs400_gpu_init(struct radeon_device *rdev)
+{
+	/* FIXME: is this correct ? */
+	r420_pipes_init(rdev);
+	if (rs400_mc_wait_for_idle(rdev)) {
+		pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
+			RREG32(RADEON_MC_STATUS));
+	}
+}
+
+static void rs400_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	rs400_gart_adjust_size(rdev);
+	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
+	/* DDR for all card after R300 & IGP */
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	r100_vram_init_sizes(rdev);
+	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	uint32_t r;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
+	r = RREG32(RS480_NB_MC_DATA);
+	WREG32(RS480_NB_MC_INDEX, 0xff);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+	return r;
+}
+
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
+	WREG32(RS480_NB_MC_DATA, (v));
+	WREG32(RS480_NB_MC_INDEX, 0xff);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
+	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
+		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
+		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
+		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
+		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
+		tmp = RREG32(RS690_HDP_FB_LOCATION);
+		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
+	} else {
+		tmp = RREG32(RADEON_AGP_BASE);
+		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+		tmp = RREG32(RS480_AGP_BASE_2);
+		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
+		tmp = RREG32(RADEON_MC_AGP_LOCATION);
+		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+	}
+	tmp = RREG32_MC(RS480_GART_BASE);
+	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
+	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
+	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x5F);
+	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
+	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x3B);
+	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x3C);
+	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x30);
+	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x31);
+	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x32);
+	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x33);
+	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x34);
+	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x35);
+	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x36);
+	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x37);
+	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rs400_gart_info_list[] = {
+	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
+};
+#endif
+
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static void rs400_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs400_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+
+	r100_mc_resume(rdev, &save);
+}
+
+static int rs400_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	r100_set_common_regs(rdev);
+
+	rs400_mc_program(rdev);
+	/* Resume clock */
+	r300_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs400_gpu_init(rdev);
+	r100_enable_bm(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs400_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int rs400_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs400_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r300_clock_startup(rdev);
+	/* setup MC before calling post tables */
+	rs400_mc_program(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs400_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs400_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	rs400_gart_disable(rdev);
+	return 0;
+}
+
+void rs400_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs400_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rs400_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs400_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs400_gart_init(rdev);
+	if (r)
+		return r;
+	r300_set_reg_safe(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs400_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs400_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h
new file mode 100644
index 0000000..6d8bac5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400d.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS400D_H__
+#define __RS400D_H__
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
new file mode 100644
index 0000000..f16af11
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -0,0 +1,1164 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+/* RS600 / Radeon X1250/X1270 integrated GPU
+ *
+ * This file gather function specific to RS600 which is the IGP of
+ * the X1250/X1270 family supporting intel CPU (while RS690/RS740
+ * is the X1250/X1270 supporting AMD CPU). The display engine are
+ * the avivo one, bios is an atombios, 3D block are the one of the
+ * R4XX family. The GART is different from the RS400 one and is very
+ * close to the one of the R600 family (R600 likely being an evolution
+ * of the RS600 GART block).
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "atom.h"
+#include "rs600d.h"
+
+#include "rs600_reg_safe.h"
+
+static void rs600_gpu_init(struct radeon_device *rdev);
+int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
+void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (avivo_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!avivo_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!avivo_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!avivo_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+	       async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
+	/* Return current update_pending status: */
+	return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
+		AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
+}
+
+void avivo_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+		else
+			tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
+				AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
+				AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+		WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+	u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	} else if (voltage->type == VOLTAGE_VDDC)
+		radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
+
+	dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+		} else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+		}
+	} else {
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+	}
+	WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+	dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+			dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+		} else
+			dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+	WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+	hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		hdp_dyn_cntl &= ~HDP_FORCEON;
+	else
+		hdp_dyn_cntl |= HDP_FORCEON;
+	WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+	/* mc_host_dyn seems to cause hangs from time to time */
+	mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+		mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+	else
+		mc_host_dyn_cntl |= MC_HOST_FORCEON;
+	WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+	dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+		dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+	else
+		dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+	WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->pm.set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+		if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+		if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = rs600_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		else
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		else
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+			break;
+		default:
+			break;
+		}
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+			break;
+		default:
+			break;
+		}
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	struct rv515_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	pci_clear_master(rdev->pdev);
+	mdelay(1);
+	/* reset GA+VAP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset CP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset MC */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	/* Check if GPU is idle */
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	rv515_mc_resume(rdev, &save);
+	return ret;
+}
+
+/*
+ * GART.
+ */
+void rs600_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+}
+
+static int rs600_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		WARN(1, "RS600 GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r) {
+		return r;
+	}
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int rs600_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	/* Enable bus master */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+	/* FIXME: setup default page */
+	WREG32_MC(R_000100_MC_PT0_CNTL,
+		  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+		   S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
+	for (i = 0; i < 19; i++) {
+		WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
+			  S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+			  S_00016C_SYSTEM_ACCESS_MODE_MASK(
+				  V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+			  S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+				  V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+			  S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+			  S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+			  S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
+	}
+	/* enable first context */
+	WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
+		  S_000102_ENABLE_PAGE_TABLE(1) |
+		  S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
+	/* disable all other contexts */
+	for (i = 1; i < 8; i++)
+		WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
+
+	/* setup the page table */
+	WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+		  rdev->gart.table_addr);
+	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+	WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+	/* System context maps to VRAM space */
+	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
+	/* enable page tables */
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
+	tmp = RREG32_MC(R_000009_MC_CNTL1);
+	WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
+	rs600_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void rs600_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* FIXME: disable out of gart access */
+	WREG32_MC(R_000100_MC_PT0_CNTL, 0);
+	tmp = RREG32_MC(R_000009_MC_CNTL1);
+	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rs600_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rs600_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+	addr = addr & 0xFFFFFFFFFFFFF000ULL;
+	addr |= R600_PTE_SYSTEM;
+	if (flags & RADEON_GART_PAGE_VALID)
+		addr |= R600_PTE_VALID;
+	if (flags & RADEON_GART_PAGE_READ)
+		addr |= R600_PTE_READABLE;
+	if (flags & RADEON_GART_PAGE_WRITE)
+		addr |= R600_PTE_WRITEABLE;
+	if (flags & RADEON_GART_PAGE_SNOOP)
+		addr |= R600_PTE_SNOOPED;
+	return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+			 uint64_t entry)
+{
+	void __iomem *ptr = (void *)rdev->gart.ptr;
+	writeq(entry, ptr + (i * 8));
+}
+
+int rs600_irq_set(struct radeon_device *rdev)
+{
+	uint32_t tmp = 0;
+	uint32_t mode_int = 0;
+	u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+		~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	u32 hdmi0;
+	if (ASIC_IS_DCE2(rdev))
+		hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+			~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	else
+		hdmi0 = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		tmp |= S_000040_SW_INT_EN(1);
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
+	}
+	if (rdev->irq.hpd[0]) {
+		hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	}
+	if (rdev->irq.hpd[1]) {
+		hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	}
+	if (rdev->irq.afmt[0]) {
+		hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	}
+	WREG32(R_000040_GEN_INT_CNTL, tmp);
+	WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+	WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+	if (ASIC_IS_DCE2(rdev))
+		WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+
+	/* posting read */
+	RREG32(R_000040_GEN_INT_CNTL);
+
+	return 0;
+}
+
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
+{
+	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
+	uint32_t irq_mask = S_000044_SW_INT(1);
+	u32 tmp;
+
+	if (G_000044_DISPLAY_INT_STAT(irqs)) {
+		rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			WREG32(R_006534_D1MODE_VBLANK_STATUS,
+				S_006534_D1MODE_VBLANK_ACK(1));
+		}
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
+				S_006D34_D2MODE_VBLANK_ACK(1));
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
+	} else {
+		rdev->irq.stat_regs.r500.disp_int = 0;
+	}
+
+	if (ASIC_IS_DCE2(rdev)) {
+		rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+			S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+		if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+			tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+			tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+			WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		}
+	} else
+		rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
+	if (irqs) {
+		WREG32(R_000044_GEN_INT_STATUS, irqs);
+	}
+	return irqs & irq_mask;
+}
+
+void rs600_irq_disable(struct radeon_device *rdev)
+{
+	u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+		~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+	WREG32(R_000040_GEN_INT_CNTL, 0);
+	WREG32(R_006540_DxMODE_INT_MASK, 0);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	rs600_irq_ack(rdev);
+}
+
+int rs600_irq_process(struct radeon_device *rdev)
+{
+	u32 status, msi_rearm;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+
+	status = rs600_irq_ack(rdev);
+	if (!status &&
+	    !rdev->irq.stat_regs.r500.disp_int &&
+	    !rdev->irq.stat_regs.r500.hdmi0_status) {
+		return IRQ_NONE;
+	}
+	while (status ||
+	       rdev->irq.stat_regs.r500.disp_int ||
+	       rdev->irq.stat_regs.r500.hdmi0_status) {
+		/* SW interrupt */
+		if (G_000044_SW_INT(status)) {
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+		}
+		/* Vertical blank interrupts */
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[0]))
+				radeon_crtc_handle_vblank(rdev, 0);
+		}
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[1]))
+				radeon_crtc_handle_vblank(rdev, 1);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
+		if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+			queue_hdmi = true;
+			DRM_DEBUG("HDMI0\n");
+		}
+		status = rs600_irq_ack(rdev);
+	}
+	if (queue_hotplug)
+		schedule_delayed_work(&rdev->hotplug_work, 0);
+	if (queue_hdmi)
+		schedule_work(&rdev->audio_work);
+	if (rdev->msi_enabled) {
+		switch (rdev->family) {
+		case CHIP_RS600:
+		case CHIP_RS690:
+		case CHIP_RS740:
+			msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
+			WREG32(RADEON_BUS_CNTL, msi_rearm);
+			WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
+			break;
+		default:
+			WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+			break;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0)
+		return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
+	else
+		return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
+}
+
+int rs600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+static void rs600_gpu_init(struct radeon_device *rdev)
+{
+	r420_pipes_init(rdev);
+	/* Wait for mc idle */
+	if (rs600_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+}
+
+static void rs600_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	base = RREG32_MC(R_000004_MC_FB_LOCATION);
+	base = G_000004_MC_FB_START(base) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rs600_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+	/* FIXME: implement full support */
+
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	if (rdev->disp_priority == 2) {
+		d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
+		d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
+		d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+		d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+	}
+}
+
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+		S_000070_MC_IND_CITF_ARB0(1));
+	r = RREG32(R_000074_MC_IND_DATA);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+	return r;
+}
+
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+		S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
+	WREG32(R_000074_MC_IND_DATA, v);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+}
+
+static void rs600_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev))
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+}
+
+void rs600_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+}
+
+static void rs600_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs600_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+
+	/* FIXME: What does AGP means for such chipset ? */
+	WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
+	WREG32_MC(R_000006_AGP_BASE, 0);
+	WREG32_MC(R_000007_AGP_BASE_2, 0);
+	/* Program MC */
+	WREG32_MC(R_000004_MC_FB_LOCATION,
+			S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int rs600_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rs600_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs600_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs600_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rs600_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs600_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs600_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs600_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	rs600_gart_disable(rdev);
+	return 0;
+}
+
+void rs600_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	radeon_audio_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs600_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rs600_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	rv515_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs600_mc_init(rdev);
+	rs600_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs600_gart_init(rdev);
+	if (r)
+		return r;
+	rs600_set_safe_registers(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs600_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs600_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644
index 0000000..f1f8941
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS600D_H__
+#define __RS600D_H__
+
+/* Registers */
+#define R_000040_GEN_INT_CNTL                        0x000040
+#define   S_000040_SCRATCH_INT_MASK(x)                 (((x) & 0x1) << 18)
+#define   G_000040_SCRATCH_INT_MASK(x)                 (((x) >> 18) & 0x1)
+#define   C_000040_SCRATCH_INT_MASK                    0xFFFBFFFF
+#define   S_000040_GUI_IDLE_MASK(x)                    (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE_MASK(x)                    (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE_MASK                       0xFFF7FFFF
+#define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+#define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+#define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+#define   S_000040_DMA_VIPH2_INT_EN(x)                 (((x) & 0x1) << 14)
+#define   G_000040_DMA_VIPH2_INT_EN(x)                 (((x) >> 14) & 0x1)
+#define   C_000040_DMA_VIPH2_INT_EN                    0xFFFFBFFF
+#define   S_000040_DMA_VIPH3_INT_EN(x)                 (((x) & 0x1) << 15)
+#define   G_000040_DMA_VIPH3_INT_EN(x)                 (((x) >> 15) & 0x1)
+#define   C_000040_DMA_VIPH3_INT_EN                    0xFFFF7FFF
+#define   S_000040_I2C_INT_EN(x)                       (((x) & 0x1) << 17)
+#define   G_000040_I2C_INT_EN(x)                       (((x) >> 17) & 0x1)
+#define   C_000040_I2C_INT_EN                          0xFFFDFFFF
+#define   S_000040_GUI_IDLE(x)                         (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE(x)                         (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE                            0xFFF7FFFF
+#define   S_000040_VIPH_INT_EN(x)                      (((x) & 0x1) << 24)
+#define   G_000040_VIPH_INT_EN(x)                      (((x) >> 24) & 0x1)
+#define   C_000040_VIPH_INT_EN                         0xFEFFFFFF
+#define   S_000040_SW_INT_EN(x)                        (((x) & 0x1) << 25)
+#define   G_000040_SW_INT_EN(x)                        (((x) >> 25) & 0x1)
+#define   C_000040_SW_INT_EN                           0xFDFFFFFF
+#define   S_000040_GEYSERVILLE(x)                      (((x) & 0x1) << 27)
+#define   G_000040_GEYSERVILLE(x)                      (((x) >> 27) & 0x1)
+#define   C_000040_GEYSERVILLE                         0xF7FFFFFF
+#define   S_000040_HDCP_AUTHORIZED_INT(x)              (((x) & 0x1) << 28)
+#define   G_000040_HDCP_AUTHORIZED_INT(x)              (((x) >> 28) & 0x1)
+#define   C_000040_HDCP_AUTHORIZED_INT                 0xEFFFFFFF
+#define   S_000040_DVI_I2C_INT(x)                      (((x) & 0x1) << 29)
+#define   G_000040_DVI_I2C_INT(x)                      (((x) >> 29) & 0x1)
+#define   C_000040_DVI_I2C_INT                         0xDFFFFFFF
+#define   S_000040_GUIDMA(x)                           (((x) & 0x1) << 30)
+#define   G_000040_GUIDMA(x)                           (((x) >> 30) & 0x1)
+#define   C_000040_GUIDMA                              0xBFFFFFFF
+#define   S_000040_VIDDMA(x)                           (((x) & 0x1) << 31)
+#define   G_000040_VIDDMA(x)                           (((x) >> 31) & 0x1)
+#define   C_000040_VIDDMA                              0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS                      0x000044
+#define   S_000044_DISPLAY_INT_STAT(x)                 (((x) & 0x1) << 0)
+#define   G_000044_DISPLAY_INT_STAT(x)                 (((x) >> 0) & 0x1)
+#define   C_000044_DISPLAY_INT_STAT                    0xFFFFFFFE
+#define   S_000044_VGA_INT_STAT(x)                     (((x) & 0x1) << 1)
+#define   G_000044_VGA_INT_STAT(x)                     (((x) >> 1) & 0x1)
+#define   C_000044_VGA_INT_STAT                        0xFFFFFFFD
+#define   S_000044_CAP0_INT_ACTIVE(x)                  (((x) & 0x1) << 8)
+#define   G_000044_CAP0_INT_ACTIVE(x)                  (((x) >> 8) & 0x1)
+#define   C_000044_CAP0_INT_ACTIVE                     0xFFFFFEFF
+#define   S_000044_DMA_VIPH0_INT(x)                    (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT(x)                    (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT                       0xFFFFEFFF
+#define   S_000044_DMA_VIPH1_INT(x)                    (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT(x)                    (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT                       0xFFFFDFFF
+#define   S_000044_DMA_VIPH2_INT(x)                    (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT(x)                    (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT                       0xFFFFBFFF
+#define   S_000044_DMA_VIPH3_INT(x)                    (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT(x)                    (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT                       0xFFFF7FFF
+#define   S_000044_MC_PROBE_FAULT_STAT(x)              (((x) & 0x1) << 16)
+#define   G_000044_MC_PROBE_FAULT_STAT(x)              (((x) >> 16) & 0x1)
+#define   C_000044_MC_PROBE_FAULT_STAT                 0xFFFEFFFF
+#define   S_000044_I2C_INT(x)                          (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT(x)                          (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT                             0xFFFDFFFF
+#define   S_000044_SCRATCH_INT_STAT(x)                 (((x) & 0x1) << 18)
+#define   G_000044_SCRATCH_INT_STAT(x)                 (((x) >> 18) & 0x1)
+#define   C_000044_SCRATCH_INT_STAT                    0xFFFBFFFF
+#define   S_000044_GUI_IDLE_STAT(x)                    (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT(x)                    (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT                       0xFFF7FFFF
+#define   S_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) & 0x1) << 20)
+#define   G_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) >> 20) & 0x1)
+#define   C_000044_ATI_OVERDRIVE_INT_STAT              0xFFEFFFFF
+#define   S_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) & 0x1) << 21)
+#define   G_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) >> 21) & 0x1)
+#define   C_000044_MC_PROTECTION_FAULT_STAT            0xFFDFFFFF
+#define   S_000044_RBBM_READ_INT_STAT(x)               (((x) & 0x1) << 22)
+#define   G_000044_RBBM_READ_INT_STAT(x)               (((x) >> 22) & 0x1)
+#define   C_000044_RBBM_READ_INT_STAT                  0xFFBFFFFF
+#define   S_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) & 0x1) << 23)
+#define   G_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) >> 23) & 0x1)
+#define   C_000044_CB_CONTEXT_SWITCH_STAT              0xFF7FFFFF
+#define   S_000044_VIPH_INT(x)                         (((x) & 0x1) << 24)
+#define   G_000044_VIPH_INT(x)                         (((x) >> 24) & 0x1)
+#define   C_000044_VIPH_INT                            0xFEFFFFFF
+#define   S_000044_SW_INT(x)                           (((x) & 0x1) << 25)
+#define   G_000044_SW_INT(x)                           (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT                              0xFDFFFFFF
+#define   S_000044_SW_INT_SET(x)                       (((x) & 0x1) << 26)
+#define   G_000044_SW_INT_SET(x)                       (((x) >> 26) & 0x1)
+#define   C_000044_SW_INT_SET                          0xFBFFFFFF
+#define   S_000044_IDCT_INT_STAT(x)                    (((x) & 0x1) << 27)
+#define   G_000044_IDCT_INT_STAT(x)                    (((x) >> 27) & 0x1)
+#define   C_000044_IDCT_INT_STAT                       0xF7FFFFFF
+#define   S_000044_GUIDMA_STAT(x)                      (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_STAT(x)                      (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_STAT                         0xBFFFFFFF
+#define   S_000044_VIDDMA_STAT(x)                      (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_STAT(x)                      (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_STAT                         0x7FFFFFFF
+#define R_00004C_BUS_CNTL                            0x00004C
+#define   S_00004C_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 14)
+#define   G_00004C_BUS_MASTER_DIS(x)                   (((x) >> 14) & 0x1)
+#define   C_00004C_BUS_MASTER_DIS                      0xFFFFBFFF
+#define   S_00004C_BUS_MSI_REARM(x)                    (((x) & 0x1) << 20)
+#define   G_00004C_BUS_MSI_REARM(x)                    (((x) >> 20) & 0x1)
+#define   C_00004C_BUS_MSI_REARM                       0xFFEFFFFF
+#define R_000070_MC_IND_INDEX                        0x000070
+#define   S_000070_MC_IND_ADDR(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000070_MC_IND_ADDR(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000070_MC_IND_ADDR                         0xFFFF0000
+#define   S_000070_MC_IND_SEQ_RBS_0(x)                 (((x) & 0x1) << 16)
+#define   G_000070_MC_IND_SEQ_RBS_0(x)                 (((x) >> 16) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_0                    0xFFFEFFFF
+#define   S_000070_MC_IND_SEQ_RBS_1(x)                 (((x) & 0x1) << 17)
+#define   G_000070_MC_IND_SEQ_RBS_1(x)                 (((x) >> 17) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_1                    0xFFFDFFFF
+#define   S_000070_MC_IND_SEQ_RBS_2(x)                 (((x) & 0x1) << 18)
+#define   G_000070_MC_IND_SEQ_RBS_2(x)                 (((x) >> 18) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_2                    0xFFFBFFFF
+#define   S_000070_MC_IND_SEQ_RBS_3(x)                 (((x) & 0x1) << 19)
+#define   G_000070_MC_IND_SEQ_RBS_3(x)                 (((x) >> 19) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_3                    0xFFF7FFFF
+#define   S_000070_MC_IND_AIC_RBS(x)                   (((x) & 0x1) << 20)
+#define   G_000070_MC_IND_AIC_RBS(x)                   (((x) >> 20) & 0x1)
+#define   C_000070_MC_IND_AIC_RBS                      0xFFEFFFFF
+#define   S_000070_MC_IND_CITF_ARB0(x)                 (((x) & 0x1) << 21)
+#define   G_000070_MC_IND_CITF_ARB0(x)                 (((x) >> 21) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB0                    0xFFDFFFFF
+#define   S_000070_MC_IND_CITF_ARB1(x)                 (((x) & 0x1) << 22)
+#define   G_000070_MC_IND_CITF_ARB1(x)                 (((x) >> 22) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB1                    0xFFBFFFFF
+#define   S_000070_MC_IND_WR_EN(x)                     (((x) & 0x1) << 23)
+#define   G_000070_MC_IND_WR_EN(x)                     (((x) >> 23) & 0x1)
+#define   C_000070_MC_IND_WR_EN                        0xFF7FFFFF
+#define   S_000070_MC_IND_RD_INV(x)                    (((x) & 0x1) << 24)
+#define   G_000070_MC_IND_RD_INV(x)                    (((x) >> 24) & 0x1)
+#define   C_000070_MC_IND_RD_INV                       0xFEFFFFFF
+#define R_000074_MC_IND_DATA                         0x000074
+#define   S_000074_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_000074_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000074_MC_IND_DATA                         0x00000000
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT           0x0060A4
+#define   S_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0060A4_D1CRTC_FRAME_COUNT                  0xFF000000
+#define R_006534_D1MODE_VBLANK_STATUS                0x006534
+#define   S_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006534_D1MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006534_D1MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006534_D1MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006534_D1MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006534_D1MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006534_D1MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006534_D1MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006534_D1MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_006540_DxMODE_INT_MASK                     0x006540
+#define   S_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 0)
+#define   G_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) >> 0) & 0x1)
+#define   C_006540_D1MODE_VBLANK_INT_MASK              0xFFFFFFFE
+#define   S_006540_D1MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 4)
+#define   G_006540_D1MODE_VLINE_INT_MASK(x)            (((x) >> 4) & 0x1)
+#define   C_006540_D1MODE_VLINE_INT_MASK               0xFFFFFFEF
+#define   S_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 8)
+#define   G_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) >> 8) & 0x1)
+#define   C_006540_D2MODE_VBLANK_INT_MASK              0xFFFFFEFF
+#define   S_006540_D2MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 12)
+#define   G_006540_D2MODE_VLINE_INT_MASK(x)            (((x) >> 12) & 0x1)
+#define   C_006540_D2MODE_VLINE_INT_MASK               0xFFFFEFFF
+#define   S_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 30)
+#define   G_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) >> 30) & 0x1)
+#define   C_006540_D1MODE_VBLANK_CP_SEL                0xBFFFFFFF
+#define   S_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 31)
+#define   G_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) >> 31) & 0x1)
+#define   C_006540_D2MODE_VBLANK_CP_SEL                0x7FFFFFFF
+#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT           0x0068A4
+#define   S_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0068A4_D2CRTC_FRAME_COUNT                  0xFF000000
+#define R_006D34_D2MODE_VBLANK_STATUS                0x006D34
+#define   S_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006D34_D2MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006D34_D2MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006D34_D2MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006D34_D2MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_007EDC_DISP_INTERRUPT_STATUS               0x007EDC
+#define   S_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 4)
+#define   G_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) >> 4) & 0x1)
+#define   C_007EDC_LB_D1_VBLANK_INTERRUPT              0xFFFFFFEF
+#define   S_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 5)
+#define   G_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) >> 5) & 0x1)
+#define   C_007EDC_LB_D2_VBLANK_INTERRUPT              0xFFFFFFDF
+#define   S_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 16)
+#define   G_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) >> 16) & 0x1)
+#define   C_007EDC_DACA_AUTODETECT_INTERRUPT           0xFFFEFFFF
+#define   S_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 17)
+#define   G_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) >> 17) & 0x1)
+#define   C_007EDC_DACB_AUTODETECT_INTERRUPT           0xFFFDFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) & 0x1) << 18)
+#define   G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) >> 18) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT       0xFFFBFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) & 0x1) << 19)
+#define   G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) >> 19) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT       0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL               0x007828
+#define   S_007828_DACA_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007828_DACA_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007828_DACA_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007828_DACA_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL           0x007838
+#define   S_007838_DACA_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007838_DACA_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007838_DACA_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL               0x007A28
+#define   S_007A28_DACB_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007A28_DACB_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL           0x007A38
+#define   S_007A38_DACB_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007A38_DACB_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007A38_DACB_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL           0x007D00
+#define   S_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D00_DC_HOT_PLUG_DETECT1_EN              0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS        0x007D04
+#define   S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS      0xFFFFFFFE
+#define   S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_SENSE           0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL       0x007D08
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK         0xFFFFFFFE
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY    0xFFFFFEFF
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_EN          0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL           0x007D10
+#define   S_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D10_DC_HOT_PLUG_DETECT2_EN              0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS        0x007D14
+#define   S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS      0xFFFFFFFE
+#define   S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_SENSE           0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL       0x007D18
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK         0xFFFFFFFE
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY    0xFFFFFEFF
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_EN          0xFFFEFFFF
+#define R_007404_HDMI0_STATUS                          0x007404
+#define   S_007404_HDMI0_AZ_FORMAT_WTRIG(x)            (((x) & 0x1) << 28)
+#define   G_007404_HDMI0_AZ_FORMAT_WTRIG(x)            (((x) >> 28) & 0x1)
+#define   C_007404_HDMI0_AZ_FORMAT_WTRIG               0xEFFFFFFF
+#define   S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x)        (((x) & 0x1) << 29)
+#define   G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x)        (((x) >> 29) & 0x1)
+#define   C_007404_HDMI0_AZ_FORMAT_WTRIG_INT           0xDFFFFFFF
+#define R_007408_HDMI0_AUDIO_PACKET_CONTROL            0x007408
+#define   S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x)       (((x) & 0x1) << 28)
+#define   G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x)       (((x) >> 28) & 0x1)
+#define   C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK          0xEFFFFFFF
+#define   S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x)        (((x) & 0x1) << 29)
+#define   G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x)        (((x) >> 29) & 0x1)
+#define   C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK           0xDFFFFFFF
+
+/* MC registers */
+#define R_000000_MC_STATUS                           0x000000
+#define   S_000000_MC_IDLE(x)                          (((x) & 0x1) << 0)
+#define   G_000000_MC_IDLE(x)                          (((x) >> 0) & 0x1)
+#define   C_000000_MC_IDLE                             0xFFFFFFFE
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+#define R_000009_MC_CNTL1                            0x000009
+#define   S_000009_ENABLE_PAGE_TABLES(x)               (((x) & 0x1) << 26)
+#define   G_000009_ENABLE_PAGE_TABLES(x)               (((x) >> 26) & 0x1)
+#define   C_000009_ENABLE_PAGE_TABLES                  0xFBFFFFFF
+/* FIXME don't know the various field size need feedback from AMD */
+#define R_000100_MC_PT0_CNTL                         0x000100
+#define   S_000100_ENABLE_PT(x)                        (((x) & 0x1) << 0)
+#define   G_000100_ENABLE_PT(x)                        (((x) >> 0) & 0x1)
+#define   C_000100_ENABLE_PT                           0xFFFFFFFE
+#define   S_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_000100_EFFECTIVE_L2_CACHE_SIZE             0xFFFC7FFF
+#define   S_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) & 0x7) << 21)
+#define   G_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) >> 21) & 0x7)
+#define   C_000100_EFFECTIVE_L2_QUEUE_SIZE             0xFF1FFFFF
+#define   S_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) & 0x1) << 28)
+#define   G_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) >> 28) & 0x1)
+#define   C_000100_INVALIDATE_ALL_L1_TLBS              0xEFFFFFFF
+#define   S_000100_INVALIDATE_L2_CACHE(x)              (((x) & 0x1) << 29)
+#define   G_000100_INVALIDATE_L2_CACHE(x)              (((x) >> 29) & 0x1)
+#define   C_000100_INVALIDATE_L2_CACHE                 0xDFFFFFFF
+#define R_000102_MC_PT0_CONTEXT0_CNTL                0x000102
+#define   S_000102_ENABLE_PAGE_TABLE(x)                (((x) & 0x1) << 0)
+#define   G_000102_ENABLE_PAGE_TABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000102_ENABLE_PAGE_TABLE                   0xFFFFFFFE
+#define   S_000102_PAGE_TABLE_DEPTH(x)                 (((x) & 0x3) << 1)
+#define   G_000102_PAGE_TABLE_DEPTH(x)                 (((x) >> 1) & 0x3)
+#define   C_000102_PAGE_TABLE_DEPTH                    0xFFFFFFF9
+#define   V_000102_PAGE_TABLE_FLAT                     0
+/* R600 documentation suggest that this should be a number of pages */
+#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR     0x000112
+#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR    0x000114
+#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR   0x00011C
+#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR      0x00012C
+#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR     0x00013C
+#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR       0x00014C
+#define R_00016C_MC_PT0_CLIENT0_CNTL                 0x00016C
+#define   S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
+#define   G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
+#define   C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE    0xFFFFFFFE
+#define   S_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) & 0x1) << 1)
+#define   G_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) >> 1) & 0x1)
+#define   C_00016C_TRANSLATION_MODE_OVERRIDE           0xFFFFFFFD
+#define   S_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) & 0x3) << 8)
+#define   G_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) >> 8) & 0x3)
+#define   C_00016C_SYSTEM_ACCESS_MODE_MASK             0xFFFFFCFF
+#define   V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY          0
+#define   V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP      1
+#define   V_00016C_SYSTEM_ACCESS_MODE_IN_SYS           2
+#define   V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS       3
+#define   S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) & 0x1) << 10)
+#define   G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) >> 10) & 0x1)
+#define   C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS     0xFFFFFBFF
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH  0
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
+#define   S_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) & 0x7) << 11)
+#define   G_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) >> 11) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_CACHE_SIZE             0xFFFFC7FF
+#define   S_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) & 0x1) << 14)
+#define   G_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) >> 14) & 0x1)
+#define   C_00016C_ENABLE_FRAGMENT_PROCESSING          0xFFFFBFFF
+#define   S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_QUEUE_SIZE             0xFFFC7FFF
+#define   S_00016C_INVALIDATE_L1_TLB(x)                (((x) & 0x1) << 20)
+#define   G_00016C_INVALIDATE_L1_TLB(x)                (((x) >> 20) & 0x1)
+#define   C_00016C_INVALIDATE_L1_TLB                   0xFFEFFFFF
+
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+
+/* PLL regs */
+#define GENERAL_PWRMGT                                 0x8
+#define   GLOBAL_PWRMGT_EN                             (1 << 0)
+#define   MOBILE_SU                                    (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH                         0xc
+#define   NORMAL_POWER_SCLK_HILEN(x)                   ((x) << 0)
+#define   NORMAL_POWER_SCLK_LOLEN(x)                   ((x) << 4)
+#define   REDUCED_POWER_SCLK_HILEN(x)                  ((x) << 8)
+#define   REDUCED_POWER_SCLK_LOLEN(x)                  ((x) << 12)
+#define   POWER_D1_SCLK_HILEN(x)                       ((x) << 16)
+#define   POWER_D1_SCLK_LOLEN(x)                       ((x) << 20)
+#define   STATIC_SCREEN_HILEN(x)                       ((x) << 24)
+#define   STATIC_SCREEN_LOLEN(x)                       ((x) << 28)
+#define DYN_SCLK_VOL_CNTL                              0xe
+#define   IO_CG_VOLTAGE_DROP                           (1 << 0)
+#define   VOLTAGE_DROP_SYNC                            (1 << 2)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 3)
+#define HDP_DYN_CNTL                                   0x10
+#define   HDP_FORCEON                                  (1 << 0)
+#define MC_HOST_DYN_CNTL                               0x1e
+#define   MC_HOST_FORCEON                              (1 << 0)
+#define DYN_BACKBIAS_CNTL                              0x29
+#define   IO_CG_BACKBIAS_EN                            (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL                     0x7ee0
+#define   PWRDN_WAIT_BUSY_OFF                          (1 << 0)
+#define   PWRDN_WAIT_PWRSEQ_OFF                        (1 << 4)
+#define   PWRDN_WAIT_PPLL_OFF                          (1 << 8)
+#define   PWRUP_WAIT_PPLL_ON                           (1 << 12)
+#define   PWRUP_WAIT_MEM_INIT_DONE                     (1 << 16)
+#define   PM_ASSERT_RESET                              (1 << 20)
+#define   PM_PWRDN_PPLL                                (1 << 24)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
new file mode 100644
index 0000000..1bae33e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -0,0 +1,879 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "atom.h"
+#include "rs690d.h"
+
+int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
+		if (G_000090_MC_SYSTEM_IDLE(tmp))
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+static void rs690_gpu_init(struct radeon_device *rdev)
+{
+	/* FIXME: is this correct ? */
+	r420_pipes_init(rdev);
+	if (rs690_mc_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
+	}
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
+};
+
+void rs690_pm_info(struct radeon_device *rdev)
+{
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *info;
+	uint16_t data_offset;
+	uint8_t frev, crev;
+	fixed20_12 tmp;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
+
+		/* Get various system informations from bios */
+		switch (crev) {
+		case 1:
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			if (le16_to_cpu(info->info.usK8MemoryClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+			else if (rdev->clock.default_mclk) {
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+				rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			} else
+				rdev->pm.igp_system_mclk.full = dfixed_const(400);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+			rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
+			break;
+		case 2:
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
+			else if (rdev->clock.default_mclk)
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+			else
+				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
+			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
+			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+			break;
+		default:
+			/* We assume the slower possible clock ie worst case */
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+			rdev->pm.igp_system_mclk.full = dfixed_const(200);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+			break;
+		}
+	} else {
+		/* We assume the slower possible clock ie worst case */
+		rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+		rdev->pm.igp_system_mclk.full = dfixed_const(200);
+		rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+		rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+	}
+	/* Compute various bandwidth */
+	/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
+	tmp.full = dfixed_const(4);
+	rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
+	/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
+	 *              = ht_clk * ht_width / 5
+	 */
+	tmp.full = dfixed_const(5);
+	rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
+						rdev->pm.igp_ht_link_width);
+	rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
+	if (tmp.full < rdev->pm.max_bandwidth.full) {
+		/* HT link is a limiting factor */
+		rdev->pm.max_bandwidth.full = tmp.full;
+	}
+	/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
+	 *                    = (sideport_clk * 14) / 10
+	 */
+	tmp.full = dfixed_const(14);
+	rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+	tmp.full = dfixed_const(10);
+	rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
+}
+
+static void rs690_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+	uint32_t h_addr, l_addr;
+	unsigned long long k8_addr;
+
+	rs400_gart_adjust_size(rdev);
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+	base = G_000100_MC_FB_START(base) << 16;
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	/* Some boards seem to be configured for 128MB of sideport memory,
+	 * but really only have 64MB.  Just skip the sideport and use
+	 * UMA memory.
+	 */
+	if (rdev->mc.igp_sideport_enabled &&
+	    (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
+		base += 128 * 1024 * 1024;
+		rdev->mc.real_vram_size -= 128 * 1024 * 1024;
+		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	}
+
+	/* Use K8 direct mapping for fast fb access. */ 
+	rdev->fastfb_working = false;
+	h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
+	l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
+	k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+	if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)	
+#endif
+	{
+		/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 
+		 * memory is present.
+		 */
+		if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+			DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 
+					(unsigned long long)rdev->mc.aper_base, k8_addr);
+			rdev->mc.aper_base = (resource_size_t)k8_addr;
+			rdev->fastfb_working = true;
+		}
+	}  
+
+	rs690_pm_info(rdev);
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+			      struct drm_display_mode *mode1,
+			      struct drm_display_mode *mode2)
+{
+	u32 tmp;
+
+	/* Guess line buffer size to be 8192 pixels */
+	u32 lb_size = 8192;
+
+	/*
+	 * Line Buffer Setup
+	 * There is a single line buffer shared by both display controllers.
+	 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning can either be done
+	 * manually or via one of four preset allocations specified in bits 1:0:
+	 *  0 - line buffer is divided in half and shared between crtc
+	 *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
+	 *  2 - D1 gets the whole buffer
+	 *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
+	 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
+	 * allocation mode. In manual allocation mode, D1 always starts at 0,
+	 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
+	 */
+	tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
+	tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
+	/* auto */
+	if (mode1 && mode2) {
+		if (mode1->hdisplay > mode2->hdisplay) {
+			if (mode1->hdisplay > 2560)
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+			else
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+		} else if (mode2->hdisplay > mode1->hdisplay) {
+			if (mode2->hdisplay > 2560)
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+			else
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+		} else
+			tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+	} else if (mode1) {
+		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
+	} else if (mode2) {
+		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+	}
+	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
+
+	/* Save number of lines the linebuffer leads before the scanout */
+	if (mode1)
+		rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+	if (mode2)
+		rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
+}
+
+struct rs690_watermark {
+	u32        lb_request_fifo_depth;
+	fixed20_12 num_line_pair;
+	fixed20_12 estimated_width;
+	fixed20_12 worst_case_latency;
+	fixed20_12 consumption_rate;
+	fixed20_12 active_time;
+	fixed20_12 dbpp;
+	fixed20_12 priority_mark_max;
+	fixed20_12 priority_mark;
+	fixed20_12 sclk;
+};
+
+static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+					 struct radeon_crtc *crtc,
+					 struct rs690_watermark *wm,
+					 bool low)
+{
+	struct drm_display_mode *mode = &crtc->base.mode;
+	fixed20_12 a, b, c;
+	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+	fixed20_12 sclk, core_bandwidth, max_bandwidth;
+	u32 selected_sclk;
+
+	if (!crtc->base.enabled) {
+		/* FIXME: wouldn't it better to set priority mark to maximum */
+		wm->lb_request_fifo_depth = 4;
+		return;
+	}
+
+	if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
+	    (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+		selected_sclk = radeon_dpm_get_sclk(rdev, low);
+	else
+		selected_sclk = rdev->pm.current_sclk;
+
+	/* sclk in Mhz */
+	a.full = dfixed_const(100);
+	sclk.full = dfixed_const(selected_sclk);
+	sclk.full = dfixed_div(sclk, a);
+
+	/* core_bandwidth = sclk(Mhz) * 16 */
+	a.full = dfixed_const(16);
+	core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
+
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
+	else
+		wm->num_line_pair.full = dfixed_const(1);
+
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
+		wm->lb_request_fifo_depth = 4;
+	} else {
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+	}
+
+	/* Determine consumption rate
+	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+	 *  vtaps = number of vertical taps,
+	 *  vsc = vertical scaling ratio, defined as source/destination
+	 *  hsc = horizontal scaling ration, defined as source/destination
+	 */
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
+	if (crtc->rmx_type != RMX_OFF) {
+		b.full = dfixed_const(2);
+		if (crtc->vsc.full > b.full)
+			b.full = crtc->vsc.full;
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
+	} else {
+		consumption_time.full = pclk.full;
+	}
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+	/* Determine line time
+	 *  LineTime = total time for one line of displayhtotal
+	 *  LineTime = total number of horizontal pixels
+	 *  pclk = pixel clock period(ns)
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
+
+	/* Determine active time
+	 *  ActiveTime = time of active region of display within one line,
+	 *  hactive = total number of horizontal active pixels
+	 *  htotal = total number of horizontal pixels
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
+
+	/* Maximun bandwidth is the minimun bandwidth of all component */
+	max_bandwidth = core_bandwidth;
+	if (rdev->mc.igp_sideport_enabled) {
+		if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+			rdev->pm.sideport_bandwidth.full)
+			max_bandwidth = rdev->pm.sideport_bandwidth;
+		read_delay_latency.full = dfixed_const(370 * 800);
+		a.full = dfixed_const(1000);
+		b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
+		read_delay_latency.full = dfixed_div(read_delay_latency, b);
+		read_delay_latency.full = dfixed_mul(read_delay_latency, a);
+	} else {
+		if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+			rdev->pm.k8_bandwidth.full)
+			max_bandwidth = rdev->pm.k8_bandwidth;
+		if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
+			rdev->pm.ht_bandwidth.full)
+			max_bandwidth = rdev->pm.ht_bandwidth;
+		read_delay_latency.full = dfixed_const(5000);
+	}
+
+	/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
+	a.full = dfixed_const(16);
+	sclk.full = dfixed_mul(max_bandwidth, a);
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_div(a, sclk);
+	/* Determine chunk time
+	 * ChunkTime = the time it takes the DCP to send one chunk of data
+	 * to the LB which consists of pipeline delay and inter chunk gap
+	 * sclk = system clock(ns)
+	 */
+	a.full = dfixed_const(256 * 13);
+	chunk_time.full = dfixed_mul(sclk, a);
+	a.full = dfixed_const(10);
+	chunk_time.full = dfixed_div(chunk_time, a);
+
+	/* Determine the worst case latency
+	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+	 * WorstCaseLatency = worst case time from urgent to when the MC starts
+	 *                    to return data
+	 * READ_DELAY_IDLE_MAX = constant of 1us
+	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+	 *             which consists of pipeline delay and inter chunk gap
+	 */
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	} else {
+		a.full = dfixed_const(2);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	}
+
+	/* Determine the tolerable latency
+	 * TolerableLatency = Any given request has only 1 line time
+	 *                    for the data to be returned
+	 * LBRequestFifoDepth = Number of chunk requests the LB can
+	 *                      put into the request FIFO for a display
+	 *  LineTime = total time for one line of display
+	 *  ChunkTime = the time it takes the DCP to send one chunk
+	 *              of data to the LB which consists of
+	 *  pipeline delay and inter chunk gap
+	 */
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+		tolerable_latency.full = line_time.full;
+	} else {
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = line_time.full - tolerable_latency.full;
+	}
+	/* We assume worst case 32bits (4 bytes) */
+	wm->dbpp.full = dfixed_const(4 * 8);
+
+	/* Determine the maximum priority mark
+	 *  width = viewport width in pixels
+	 */
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+	/* Determine estimated width */
+	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+		wm->priority_mark.full = dfixed_const(10);
+	} else {
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+	}
+}
+
+static void rs690_compute_mode_priority(struct radeon_device *rdev,
+					struct rs690_watermark *wm0,
+					struct rs690_watermark *wm1,
+					struct drm_display_mode *mode0,
+					struct drm_display_mode *mode1,
+					u32 *d1mode_priority_a_cnt,
+					u32 *d2mode_priority_a_cnt)
+{
+	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+	fixed20_12 a, b;
+
+	*d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+	*d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+
+	if (mode0 && mode1) {
+		if (dfixed_trunc(wm0->dbpp) > 64)
+			a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
+		else
+			a.full = wm0->num_line_pair.full;
+		if (dfixed_trunc(wm1->dbpp) > 64)
+			b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
+		else
+			b.full = wm1->num_line_pair.full;
+		a.full += b.full;
+		fill_rate.full = dfixed_div(wm0->sclk, a);
+		if (wm0->consumption_rate.full > fill_rate.full) {
+			b.full = wm0->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0->active_time);
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm1->consumption_rate.full > fill_rate.full) {
+			b.full = wm1->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1->active_time);
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm0->priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark.full;
+		if (wm0->priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark_max.full;
+		if (wm1->priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark.full;
+		if (wm1->priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark_max.full;
+		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+			*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		}
+	} else if (mode0) {
+		if (dfixed_trunc(wm0->dbpp) > 64)
+			a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
+		else
+			a.full = wm0->num_line_pair.full;
+		fill_rate.full = dfixed_div(wm0->sclk, a);
+		if (wm0->consumption_rate.full > fill_rate.full) {
+			b.full = wm0->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0->active_time);
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm0->priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark.full;
+		if (wm0->priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark_max.full;
+		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+	} else if (mode1) {
+		if (dfixed_trunc(wm1->dbpp) > 64)
+			a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
+		else
+			a.full = wm1->num_line_pair.full;
+		fill_rate.full = dfixed_div(wm1->sclk, a);
+		if (wm1->consumption_rate.full > fill_rate.full) {
+			b.full = wm1->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1->active_time);
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm1->priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark.full;
+		if (wm1->priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark_max.full;
+		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+	}
+}
+
+void rs690_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	struct rs690_watermark wm0_high, wm0_low;
+	struct rs690_watermark wm1_high, wm1_low;
+	u32 tmp;
+	u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
+	u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
+
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	/*
+	 * Set display0/1 priority up in the memory controller for
+	 * modes if the user specifies HIGH for displaypriority
+	 * option.
+	 */
+	if ((rdev->disp_priority == 2) &&
+	    ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
+		tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
+		tmp &= C_000104_MC_DISP0R_INIT_LAT;
+		tmp &= C_000104_MC_DISP1R_INIT_LAT;
+		if (mode0)
+			tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
+		if (mode1)
+			tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
+		WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
+	}
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
+		WREG32(R_006C9C_DCP_CONTROL, 0);
+	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+		WREG32(R_006C9C_DCP_CONTROL, 2);
+
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
+
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);
+
+	tmp = (wm0_high.lb_request_fifo_depth - 1);
+	tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
+	WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
+
+	rs690_compute_mode_priority(rdev,
+				    &wm0_high, &wm1_high,
+				    mode0, mode1,
+				    &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
+	rs690_compute_mode_priority(rdev,
+				    &wm0_low, &wm1_low,
+				    mode0, mode1,
+				    &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
+
+	WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+	WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
+	WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+	WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
+}
+
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	uint32_t r;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
+	r = RREG32(R_00007C_MC_DATA);
+	WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+	return r;
+}
+
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
+		S_000078_MC_IND_WR_EN(1));
+	WREG32(R_00007C_MC_DATA, v);
+	WREG32(R_000078_MC_INDEX, 0x7F);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+}
+
+static void rs690_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs690_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000100_MCCFG_FB_LOCATION,
+			S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int rs690_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rs690_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs690_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs400_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rs690_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs400_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs690_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs690_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	rs400_gart_disable(rdev);
+	return 0;
+}
+
+void rs690_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	radeon_audio_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs400_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rs690_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	rv515_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs690_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs400_gart_init(rdev);
+	if (r)
+		return r;
+	rs600_set_safe_registers(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs690_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs400_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
new file mode 100644
index 0000000..8af3ccf
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS690D_H__
+#define __RS690D_H__
+
+/* Registers */
+#define R_00001E_K8_FB_LOCATION                      0x00001E
+#define R_00005F_MC_MISC_UMA_CNTL                    0x00005F
+#define   G_00005F_K8_ADDR_EXT(x)                      (((x) >> 0) & 0xFF)
+#define R_000078_MC_INDEX                            0x000078
+#define   S_000078_MC_IND_ADDR(x)                      (((x) & 0x1FF) << 0)
+#define   G_000078_MC_IND_ADDR(x)                      (((x) >> 0) & 0x1FF)
+#define   C_000078_MC_IND_ADDR                         0xFFFFFE00
+#define   S_000078_MC_IND_WR_EN(x)                     (((x) & 0x1) << 9)
+#define   G_000078_MC_IND_WR_EN(x)                     (((x) >> 9) & 0x1)
+#define   C_000078_MC_IND_WR_EN                        0xFFFFFDFF
+#define R_00007C_MC_DATA                             0x00007C
+#define   S_00007C_MC_DATA(x)                          (((x) & 0xFFFFFFFF) << 0)
+#define   G_00007C_MC_DATA(x)                          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00007C_MC_DATA                             0x00000000
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006520_DC_LB_MEMORY_SPLIT                  0x006520
+#define   S_006520_DC_LB_MEMORY_SPLIT(x)               (((x) & 0x3) << 0)
+#define   G_006520_DC_LB_MEMORY_SPLIT(x)               (((x) >> 0) & 0x3)
+#define   C_006520_DC_LB_MEMORY_SPLIT                  0xFFFFFFFC
+#define   S_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) & 0x1) << 2)
+#define   G_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) >> 2) & 0x1)
+#define   C_006520_DC_LB_MEMORY_SPLIT_MODE             0xFFFFFFFB
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF    0
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q      1
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY          2
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q      3
+#define   S_006520_DC_LB_DISP1_END_ADR(x)              (((x) & 0x7FF) << 4)
+#define   G_006520_DC_LB_DISP1_END_ADR(x)              (((x) >> 4) & 0x7FF)
+#define   C_006520_DC_LB_DISP1_END_ADR                 0xFFFF800F
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006C9C_DCP_CONTROL                         0x006C9C
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D58_LB_MAX_REQ_OUTSTANDING              0x006D58
+#define   S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 0)
+#define   G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) >> 0) & 0xF)
+#define   C_006D58_LB_D1_MAX_REQ_OUTSTANDING           0xFFFFFFF0
+#define   S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 16)
+#define   G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) >> 16) & 0xF)
+#define   C_006D58_LB_D2_MAX_REQ_OUTSTANDING           0xFFF0FFFF
+
+
+#define R_000090_MC_SYSTEM_STATUS                    0x000090
+#define   S_000090_MC_SYSTEM_IDLE(x)                   (((x) & 0x1) << 0)
+#define   G_000090_MC_SYSTEM_IDLE(x)                   (((x) >> 0) & 0x1)
+#define   C_000090_MC_SYSTEM_IDLE                      0xFFFFFFFE
+#define   S_000090_MC_SEQUENCER_IDLE(x)                (((x) & 0x1) << 1)
+#define   G_000090_MC_SEQUENCER_IDLE(x)                (((x) >> 1) & 0x1)
+#define   C_000090_MC_SEQUENCER_IDLE                   0xFFFFFFFD
+#define   S_000090_MC_ARBITER_IDLE(x)                  (((x) & 0x1) << 2)
+#define   G_000090_MC_ARBITER_IDLE(x)                  (((x) >> 2) & 0x1)
+#define   C_000090_MC_ARBITER_IDLE                     0xFFFFFFFB
+#define   S_000090_MC_SELECT_PM(x)                     (((x) & 0x1) << 3)
+#define   G_000090_MC_SELECT_PM(x)                     (((x) >> 3) & 0x1)
+#define   C_000090_MC_SELECT_PM                        0xFFFFFFF7
+#define   S_000090_RESERVED4(x)                        (((x) & 0xF) << 4)
+#define   G_000090_RESERVED4(x)                        (((x) >> 4) & 0xF)
+#define   C_000090_RESERVED4                           0xFFFFFF0F
+#define   S_000090_RESERVED8(x)                        (((x) & 0xF) << 8)
+#define   G_000090_RESERVED8(x)                        (((x) >> 8) & 0xF)
+#define   C_000090_RESERVED8                           0xFFFFF0FF
+#define   S_000090_RESERVED12(x)                       (((x) & 0xF) << 12)
+#define   G_000090_RESERVED12(x)                       (((x) >> 12) & 0xF)
+#define   C_000090_RESERVED12                          0xFFFF0FFF
+#define   S_000090_MCA_INIT_EXECUTED(x)                (((x) & 0x1) << 16)
+#define   G_000090_MCA_INIT_EXECUTED(x)                (((x) >> 16) & 0x1)
+#define   C_000090_MCA_INIT_EXECUTED                   0xFFFEFFFF
+#define   S_000090_MCA_IDLE(x)                         (((x) & 0x1) << 17)
+#define   G_000090_MCA_IDLE(x)                         (((x) >> 17) & 0x1)
+#define   C_000090_MCA_IDLE                            0xFFFDFFFF
+#define   S_000090_MCA_SEQ_IDLE(x)                     (((x) & 0x1) << 18)
+#define   G_000090_MCA_SEQ_IDLE(x)                     (((x) >> 18) & 0x1)
+#define   C_000090_MCA_SEQ_IDLE                        0xFFFBFFFF
+#define   S_000090_MCA_ARB_IDLE(x)                     (((x) & 0x1) << 19)
+#define   G_000090_MCA_ARB_IDLE(x)                     (((x) >> 19) & 0x1)
+#define   C_000090_MCA_ARB_IDLE                        0xFFF7FFFF
+#define   S_000090_RESERVED20(x)                       (((x) & 0xFFF) << 20)
+#define   G_000090_RESERVED20(x)                       (((x) >> 20) & 0xFFF)
+#define   C_000090_RESERVED20                          0x000FFFFF
+#define R_000100_MCCFG_FB_LOCATION                   0x000100
+#define   S_000100_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000100_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000100_MC_FB_START                         0xFFFF0000
+#define   S_000100_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000100_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000100_MC_FB_TOP                           0x0000FFFF
+#define R_000104_MC_INIT_MISC_LAT_TIMER              0x000104
+#define   S_000104_MC_CPR_INIT_LAT(x)                  (((x) & 0xF) << 0)
+#define   G_000104_MC_CPR_INIT_LAT(x)                  (((x) >> 0) & 0xF)
+#define   C_000104_MC_CPR_INIT_LAT                     0xFFFFFFF0
+#define   S_000104_MC_VF_INIT_LAT(x)                   (((x) & 0xF) << 4)
+#define   G_000104_MC_VF_INIT_LAT(x)                   (((x) >> 4) & 0xF)
+#define   C_000104_MC_VF_INIT_LAT                      0xFFFFFF0F
+#define   S_000104_MC_DISP0R_INIT_LAT(x)               (((x) & 0xF) << 8)
+#define   G_000104_MC_DISP0R_INIT_LAT(x)               (((x) >> 8) & 0xF)
+#define   C_000104_MC_DISP0R_INIT_LAT                  0xFFFFF0FF
+#define   S_000104_MC_DISP1R_INIT_LAT(x)               (((x) & 0xF) << 12)
+#define   G_000104_MC_DISP1R_INIT_LAT(x)               (((x) >> 12) & 0xF)
+#define   C_000104_MC_DISP1R_INIT_LAT                  0xFFFF0FFF
+#define   S_000104_MC_FIXED_INIT_LAT(x)                (((x) & 0xF) << 16)
+#define   G_000104_MC_FIXED_INIT_LAT(x)                (((x) >> 16) & 0xF)
+#define   C_000104_MC_FIXED_INIT_LAT                   0xFFF0FFFF
+#define   S_000104_MC_E2R_INIT_LAT(x)                  (((x) & 0xF) << 20)
+#define   G_000104_MC_E2R_INIT_LAT(x)                  (((x) >> 20) & 0xF)
+#define   C_000104_MC_E2R_INIT_LAT                     0xFF0FFFFF
+#define   S_000104_SAME_PAGE_PRIO(x)                   (((x) & 0xF) << 24)
+#define   G_000104_SAME_PAGE_PRIO(x)                   (((x) >> 24) & 0xF)
+#define   C_000104_SAME_PAGE_PRIO                      0xF0FFFFFF
+#define   S_000104_MC_GLOBW_INIT_LAT(x)                (((x) & 0xF) << 28)
+#define   G_000104_MC_GLOBW_INIT_LAT(x)                (((x) >> 28) & 0xF)
+#define   C_000104_MC_GLOBW_INIT_LAT                   0x0FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
new file mode 100644
index 0000000..694b7b3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -0,0 +1,1077 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rs780d.h"
+#include "r600_dpm.h"
+#include "rs780_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+static struct igp_ps *rs780_get_ps(struct radeon_ps *rps)
+{
+	struct igp_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static struct igp_power_info *rs780_get_pi(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+	struct radeon_mode_info *minfo = &rdev->mode_info;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	int i;
+
+	/* defaults */
+	pi->crtc_id = 0;
+	pi->refresh_rate = 60;
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc = (struct drm_crtc *)minfo->crtcs[i];
+		if (crtc && crtc->enabled) {
+			radeon_crtc = to_radeon_crtc(crtc);
+			pi->crtc_id = radeon_crtc->crtc_id;
+			if (crtc->mode.htotal && crtc->mode.vtotal)
+				pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
+			break;
+		}
+	}
+}
+
+static void rs780_voltage_scaling_enable(struct radeon_device *rdev, bool enable);
+
+static int rs780_initialize_dpm_power_state(struct radeon_device *rdev,
+					    struct radeon_ps *boot_ps)
+{
+	struct atom_clock_dividers dividers;
+	struct igp_ps *default_state = rs780_get_ps(boot_ps);
+	int i, ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     default_state->sclk_low, false, &dividers);
+	if (ret)
+		return ret;
+
+	r600_engine_clock_entry_set_reference_divider(rdev, 0, dividers.ref_div);
+	r600_engine_clock_entry_set_feedback_divider(rdev, 0, dividers.fb_div);
+	r600_engine_clock_entry_set_post_divider(rdev, 0, dividers.post_div);
+
+	if (dividers.enable_post_div)
+		r600_engine_clock_entry_enable_post_divider(rdev, 0, true);
+	else
+		r600_engine_clock_entry_enable_post_divider(rdev, 0, false);
+
+	r600_engine_clock_entry_set_step_time(rdev, 0, R600_SST_DFLT);
+	r600_engine_clock_entry_enable_pulse_skipping(rdev, 0, false);
+
+	r600_engine_clock_entry_enable(rdev, 0, true);
+	for (i = 1; i < R600_PM_NUMBER_OF_SCLKS; i++)
+		r600_engine_clock_entry_enable(rdev, i, false);
+
+	r600_enable_mclk_control(rdev, false);
+	r600_voltage_control_enable_pins(rdev, 0);
+
+	return 0;
+}
+
+static int rs780_initialize_dpm_parameters(struct radeon_device *rdev,
+					   struct radeon_ps *boot_ps)
+{
+	int ret = 0;
+	int i;
+
+	r600_set_bsp(rdev, R600_BSU_DFLT, R600_BSP_DFLT);
+
+	r600_set_at(rdev, 0, 0, 0, 0);
+
+	r600_set_git(rdev, R600_GICST_DFLT);
+
+	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+		r600_set_tc(rdev, i, 0, 0);
+
+	r600_select_td(rdev, R600_TD_DFLT);
+	r600_set_vrc(rdev, 0);
+
+	r600_set_tpu(rdev, R600_TPU_DFLT);
+	r600_set_tpc(rdev, R600_TPC_DFLT);
+
+	r600_set_sstu(rdev, R600_SSTU_DFLT);
+	r600_set_sst(rdev, R600_SST_DFLT);
+
+	r600_set_fctu(rdev, R600_FCTU_DFLT);
+	r600_set_fct(rdev, R600_FCT_DFLT);
+
+	r600_set_vddc3d_oorsu(rdev, R600_VDDC3DOORSU_DFLT);
+	r600_set_vddc3d_oorphc(rdev, R600_VDDC3DOORPHC_DFLT);
+	r600_set_vddc3d_oorsdc(rdev, R600_VDDC3DOORSDC_DFLT);
+	r600_set_ctxcgtt3d_rphc(rdev, R600_CTXCGTT3DRPHC_DFLT);
+	r600_set_ctxcgtt3d_rsdc(rdev, R600_CTXCGTT3DRSDC_DFLT);
+
+	r600_vid_rt_set_vru(rdev, R600_VRU_DFLT);
+	r600_vid_rt_set_vrt(rdev, R600_VOLTAGERESPONSETIME_DFLT);
+	r600_vid_rt_set_ssu(rdev, R600_SPLLSTEPUNIT_DFLT);
+
+	ret = rs780_initialize_dpm_power_state(rdev, boot_ps);
+
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW,     0);
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM,  0);
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_HIGH,    0);
+
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW,    0);
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_HIGH,   0);
+
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW,    0);
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_HIGH,   0);
+
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,    R600_DISPLAY_WATERMARK_HIGH);
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM, R600_DISPLAY_WATERMARK_HIGH);
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_HIGH,   R600_DISPLAY_WATERMARK_HIGH);
+
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_CTXSW, false);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+
+	r600_power_level_set_enter_index(rdev, R600_POWER_LEVEL_LOW);
+
+	r600_set_vrc(rdev, RS780_CGFTV_DFLT);
+
+	return ret;
+}
+
+static void rs780_start_dpm(struct radeon_device *rdev)
+{
+	r600_enable_sclk_control(rdev, false);
+	r600_enable_mclk_control(rdev, false);
+
+	r600_dynamicpm_enable(rdev, true);
+
+	radeon_wait_for_vblank(rdev, 0);
+	radeon_wait_for_vblank(rdev, 1);
+
+	r600_enable_spll_bypass(rdev, true);
+	r600_wait_for_spll_change(rdev);
+	r600_enable_spll_bypass(rdev, false);
+	r600_wait_for_spll_change(rdev);
+
+	r600_enable_spll_bypass(rdev, true);
+	r600_wait_for_spll_change(rdev);
+	r600_enable_spll_bypass(rdev, false);
+	r600_wait_for_spll_change(rdev);
+
+	r600_enable_sclk_control(rdev, true);
+}
+
+
+static void rs780_preset_ranges_slow_clk_fbdiv_en(struct radeon_device *rdev)
+{
+	WREG32_P(FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1, RANGE_SLOW_CLK_FEEDBACK_DIV_EN,
+		 ~RANGE_SLOW_CLK_FEEDBACK_DIV_EN);
+
+	WREG32_P(FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1,
+		 RANGE0_SLOW_CLK_FEEDBACK_DIV(RS780_SLOWCLKFEEDBACKDIV_DFLT),
+		 ~RANGE0_SLOW_CLK_FEEDBACK_DIV_MASK);
+}
+
+static void rs780_preset_starting_fbdiv(struct radeon_device *rdev)
+{
+	u32 fbdiv = (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+
+	WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fbdiv),
+		 ~STARTING_FEEDBACK_DIV_MASK);
+
+	WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fbdiv),
+		 ~FORCED_FEEDBACK_DIV_MASK);
+
+	WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
+}
+
+static void rs780_voltage_scaling_init(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+	struct drm_device *dev = rdev->ddev;
+	u32 fv_throt_pwm_fb_div_range[3];
+	u32 fv_throt_pwm_range[4];
+
+	if (dev->pdev->device == 0x9614) {
+		fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
+		fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
+		fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
+	} else if ((dev->pdev->device == 0x9714) ||
+		   (dev->pdev->device == 0x9715)) {
+		fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
+		fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
+		fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
+	} else {
+		fv_throt_pwm_fb_div_range[0] = RS780_FVTHROTPWMFBDIVRANGEREG0_DFLT;
+		fv_throt_pwm_fb_div_range[1] = RS780_FVTHROTPWMFBDIVRANGEREG1_DFLT;
+		fv_throt_pwm_fb_div_range[2] = RS780_FVTHROTPWMFBDIVRANGEREG2_DFLT;
+	}
+
+	if (pi->pwm_voltage_control) {
+		fv_throt_pwm_range[0] = pi->min_voltage;
+		fv_throt_pwm_range[1] = pi->min_voltage;
+		fv_throt_pwm_range[2] = pi->max_voltage;
+		fv_throt_pwm_range[3] = pi->max_voltage;
+	} else {
+		fv_throt_pwm_range[0] = pi->invert_pwm_required ?
+			RS780_FVTHROTPWMRANGE3_GPIO_DFLT : RS780_FVTHROTPWMRANGE0_GPIO_DFLT;
+		fv_throt_pwm_range[1] = pi->invert_pwm_required ?
+			RS780_FVTHROTPWMRANGE2_GPIO_DFLT : RS780_FVTHROTPWMRANGE1_GPIO_DFLT;
+		fv_throt_pwm_range[2] = pi->invert_pwm_required ?
+			RS780_FVTHROTPWMRANGE1_GPIO_DFLT : RS780_FVTHROTPWMRANGE2_GPIO_DFLT;
+		fv_throt_pwm_range[3] = pi->invert_pwm_required ?
+			RS780_FVTHROTPWMRANGE0_GPIO_DFLT : RS780_FVTHROTPWMRANGE3_GPIO_DFLT;
+	}
+
+	WREG32_P(FVTHROT_PWM_CTRL_REG0,
+		 STARTING_PWM_HIGHTIME(pi->max_voltage),
+		 ~STARTING_PWM_HIGHTIME_MASK);
+
+	WREG32_P(FVTHROT_PWM_CTRL_REG0,
+		 NUMBER_OF_CYCLES_IN_PERIOD(pi->num_of_cycles_in_period),
+		 ~NUMBER_OF_CYCLES_IN_PERIOD_MASK);
+
+	WREG32_P(FVTHROT_PWM_CTRL_REG0, FORCE_STARTING_PWM_HIGHTIME,
+		 ~FORCE_STARTING_PWM_HIGHTIME);
+
+	if (pi->invert_pwm_required)
+		WREG32_P(FVTHROT_PWM_CTRL_REG0, INVERT_PWM_WAVEFORM, ~INVERT_PWM_WAVEFORM);
+	else
+		WREG32_P(FVTHROT_PWM_CTRL_REG0, 0, ~INVERT_PWM_WAVEFORM);
+
+	rs780_voltage_scaling_enable(rdev, true);
+
+	WREG32(FVTHROT_PWM_CTRL_REG1,
+	       (MIN_PWM_HIGHTIME(pi->min_voltage) |
+		MAX_PWM_HIGHTIME(pi->max_voltage)));
+
+	WREG32(FVTHROT_PWM_US_REG0, RS780_FVTHROTPWMUSREG0_DFLT);
+	WREG32(FVTHROT_PWM_US_REG1, RS780_FVTHROTPWMUSREG1_DFLT);
+	WREG32(FVTHROT_PWM_DS_REG0, RS780_FVTHROTPWMDSREG0_DFLT);
+	WREG32(FVTHROT_PWM_DS_REG1, RS780_FVTHROTPWMDSREG1_DFLT);
+
+	WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1,
+		 RANGE0_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[0]),
+		 ~RANGE0_PWM_FEEDBACK_DIV_MASK);
+
+	WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG2,
+	       (RANGE1_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[1]) |
+		RANGE2_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[2])));
+
+	WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG3,
+	       (RANGE0_PWM(fv_throt_pwm_range[1]) |
+		RANGE1_PWM(fv_throt_pwm_range[2])));
+	WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG4,
+	       (RANGE2_PWM(fv_throt_pwm_range[1]) |
+		RANGE3_PWM(fv_throt_pwm_range[2])));
+}
+
+static void rs780_clk_scaling_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(FVTHROT_CNTRL_REG, ENABLE_FV_THROT | ENABLE_FV_UPDATE,
+			 ~(ENABLE_FV_THROT | ENABLE_FV_UPDATE));
+	else
+		WREG32_P(FVTHROT_CNTRL_REG, 0,
+			 ~(ENABLE_FV_THROT | ENABLE_FV_UPDATE));
+}
+
+static void rs780_voltage_scaling_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(FVTHROT_CNTRL_REG, ENABLE_FV_THROT_IO, ~ENABLE_FV_THROT_IO);
+	else
+		WREG32_P(FVTHROT_CNTRL_REG, 0, ~ENABLE_FV_THROT_IO);
+}
+
+static void rs780_set_engine_clock_wfc(struct radeon_device *rdev)
+{
+	WREG32(FVTHROT_UTC0, RS780_FVTHROTUTC0_DFLT);
+	WREG32(FVTHROT_UTC1, RS780_FVTHROTUTC1_DFLT);
+	WREG32(FVTHROT_UTC2, RS780_FVTHROTUTC2_DFLT);
+	WREG32(FVTHROT_UTC3, RS780_FVTHROTUTC3_DFLT);
+	WREG32(FVTHROT_UTC4, RS780_FVTHROTUTC4_DFLT);
+
+	WREG32(FVTHROT_DTC0, RS780_FVTHROTDTC0_DFLT);
+	WREG32(FVTHROT_DTC1, RS780_FVTHROTDTC1_DFLT);
+	WREG32(FVTHROT_DTC2, RS780_FVTHROTDTC2_DFLT);
+	WREG32(FVTHROT_DTC3, RS780_FVTHROTDTC3_DFLT);
+	WREG32(FVTHROT_DTC4, RS780_FVTHROTDTC4_DFLT);
+}
+
+static void rs780_set_engine_clock_sc(struct radeon_device *rdev)
+{
+	WREG32_P(FVTHROT_FBDIV_REG2,
+		 FB_DIV_TIMER_VAL(RS780_FBDIVTIMERVAL_DFLT),
+		 ~FB_DIV_TIMER_VAL_MASK);
+
+	WREG32_P(FVTHROT_CNTRL_REG,
+		 REFRESH_RATE_DIVISOR(0) | MINIMUM_CIP(0xf),
+		 ~(REFRESH_RATE_DIVISOR_MASK | MINIMUM_CIP_MASK));
+}
+
+static void rs780_set_engine_clock_tdc(struct radeon_device *rdev)
+{
+	WREG32_P(FVTHROT_CNTRL_REG, 0, ~(FORCE_TREND_SEL | TREND_SEL_MODE));
+}
+
+static void rs780_set_engine_clock_ssc(struct radeon_device *rdev)
+{
+	WREG32(FVTHROT_FB_US_REG0, RS780_FVTHROTFBUSREG0_DFLT);
+	WREG32(FVTHROT_FB_US_REG1, RS780_FVTHROTFBUSREG1_DFLT);
+	WREG32(FVTHROT_FB_DS_REG0, RS780_FVTHROTFBDSREG0_DFLT);
+	WREG32(FVTHROT_FB_DS_REG1, RS780_FVTHROTFBDSREG1_DFLT);
+
+	WREG32_P(FVTHROT_FBDIV_REG1, MAX_FEEDBACK_STEP(1), ~MAX_FEEDBACK_STEP_MASK);
+}
+
+static void rs780_program_at(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+
+	WREG32(FVTHROT_TARGET_REG, 30000000 / pi->refresh_rate);
+	WREG32(FVTHROT_CB1, 1000000 * 5 / pi->refresh_rate);
+	WREG32(FVTHROT_CB2, 1000000 * 10 / pi->refresh_rate);
+	WREG32(FVTHROT_CB3, 1000000 * 30 / pi->refresh_rate);
+	WREG32(FVTHROT_CB4, 1000000 * 50 / pi->refresh_rate);
+}
+
+static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
+{
+	WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
+}
+
+static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
+{
+	struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
+
+	if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
+	    (current_state->min_voltage == RS780_VDDC_LEVEL_HIGH))
+		return;
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+
+	udelay(1);
+
+	WREG32_P(FVTHROT_PWM_CTRL_REG0,
+		 STARTING_PWM_HIGHTIME(voltage),
+		 ~STARTING_PWM_HIGHTIME_MASK);
+
+	WREG32_P(FVTHROT_PWM_CTRL_REG0,
+		 FORCE_STARTING_PWM_HIGHTIME, ~FORCE_STARTING_PWM_HIGHTIME);
+
+	WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1, 0,
+		~RANGE_PWM_FEEDBACK_DIV_EN);
+
+	udelay(1);
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+}
+
+static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
+{
+	struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
+
+	if (current_state->sclk_low == current_state->sclk_high)
+		return;
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+
+	WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
+		 ~FORCED_FEEDBACK_DIV_MASK);
+	WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
+		 ~STARTING_FEEDBACK_DIV_MASK);
+	WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
+
+	udelay(100);
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+}
+
+static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
+					  struct radeon_ps *new_ps,
+					  struct radeon_ps *old_ps)
+{
+	struct atom_clock_dividers min_dividers, max_dividers, current_max_dividers;
+	struct igp_ps *new_state = rs780_get_ps(new_ps);
+	struct igp_ps *old_state = rs780_get_ps(old_ps);
+	int ret;
+
+	if ((new_state->sclk_high == old_state->sclk_high) &&
+	    (new_state->sclk_low == old_state->sclk_low))
+		return 0;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     new_state->sclk_low, false, &min_dividers);
+	if (ret)
+		return ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     new_state->sclk_high, false, &max_dividers);
+	if (ret)
+		return ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     old_state->sclk_high, false, &current_max_dividers);
+	if (ret)
+		return ret;
+
+	if ((min_dividers.ref_div != max_dividers.ref_div) ||
+	    (min_dividers.post_div != max_dividers.post_div) ||
+	    (max_dividers.ref_div != current_max_dividers.ref_div) ||
+	    (max_dividers.post_div != current_max_dividers.post_div))
+		return -EINVAL;
+
+	rs780_force_fbdiv(rdev, max_dividers.fb_div);
+
+	if (max_dividers.fb_div > min_dividers.fb_div) {
+		WREG32_P(FVTHROT_FBDIV_REG0,
+			 MIN_FEEDBACK_DIV(min_dividers.fb_div) |
+			 MAX_FEEDBACK_DIV(max_dividers.fb_div),
+			 ~(MIN_FEEDBACK_DIV_MASK | MAX_FEEDBACK_DIV_MASK));
+
+		WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
+	}
+
+	return 0;
+}
+
+static void rs780_set_engine_clock_spc(struct radeon_device *rdev,
+				       struct radeon_ps *new_ps,
+				       struct radeon_ps *old_ps)
+{
+	struct igp_ps *new_state = rs780_get_ps(new_ps);
+	struct igp_ps *old_state = rs780_get_ps(old_ps);
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+
+	if ((new_state->sclk_high == old_state->sclk_high) &&
+	    (new_state->sclk_low == old_state->sclk_low))
+		return;
+
+	if (pi->crtc_id == 0)
+		WREG32_P(CG_INTGFX_MISC, 0, ~FVTHROT_VBLANK_SEL);
+	else
+		WREG32_P(CG_INTGFX_MISC, FVTHROT_VBLANK_SEL, ~FVTHROT_VBLANK_SEL);
+
+}
+
+static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
+					      struct radeon_ps *new_ps,
+					      struct radeon_ps *old_ps)
+{
+	struct igp_ps *new_state = rs780_get_ps(new_ps);
+	struct igp_ps *old_state = rs780_get_ps(old_ps);
+
+	if ((new_state->sclk_high == old_state->sclk_high) &&
+	    (new_state->sclk_low == old_state->sclk_low))
+		return;
+
+	if (new_state->sclk_high == new_state->sclk_low)
+		return;
+
+	rs780_clk_scaling_enable(rdev, true);
+}
+
+static u32 rs780_get_voltage_for_vddc_level(struct radeon_device *rdev,
+					    enum rs780_vddc_level vddc)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+
+	if (vddc == RS780_VDDC_LEVEL_HIGH)
+		return pi->max_voltage;
+	else if (vddc == RS780_VDDC_LEVEL_LOW)
+		return pi->min_voltage;
+	else
+		return pi->max_voltage;
+}
+
+static void rs780_enable_voltage_scaling(struct radeon_device *rdev,
+					 struct radeon_ps *new_ps)
+{
+	struct igp_ps *new_state = rs780_get_ps(new_ps);
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+	enum rs780_vddc_level vddc_high, vddc_low;
+
+	udelay(100);
+
+	if ((new_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
+	    (new_state->min_voltage == RS780_VDDC_LEVEL_HIGH))
+		return;
+
+	vddc_high = rs780_get_voltage_for_vddc_level(rdev,
+						     new_state->max_voltage);
+	vddc_low = rs780_get_voltage_for_vddc_level(rdev,
+						    new_state->min_voltage);
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+
+	udelay(1);
+	if (vddc_high > vddc_low) {
+		WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1,
+			 RANGE_PWM_FEEDBACK_DIV_EN, ~RANGE_PWM_FEEDBACK_DIV_EN);
+
+		WREG32_P(FVTHROT_PWM_CTRL_REG0, 0, ~FORCE_STARTING_PWM_HIGHTIME);
+	} else if (vddc_high == vddc_low) {
+		if (pi->max_voltage != vddc_high) {
+			WREG32_P(FVTHROT_PWM_CTRL_REG0,
+				 STARTING_PWM_HIGHTIME(vddc_high),
+				 ~STARTING_PWM_HIGHTIME_MASK);
+
+			WREG32_P(FVTHROT_PWM_CTRL_REG0,
+				 FORCE_STARTING_PWM_HIGHTIME,
+				 ~FORCE_STARTING_PWM_HIGHTIME);
+		}
+	}
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+}
+
+static void rs780_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+						     struct radeon_ps *new_ps,
+						     struct radeon_ps *old_ps)
+{
+	struct igp_ps *new_state = rs780_get_ps(new_ps);
+	struct igp_ps *current_state = rs780_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->sclk_high >= current_state->sclk_high)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+static void rs780_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+						    struct radeon_ps *new_ps,
+						    struct radeon_ps *old_ps)
+{
+	struct igp_ps *new_state = rs780_get_ps(new_ps);
+	struct igp_ps *current_state = rs780_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->sclk_high < current_state->sclk_high)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+int rs780_dpm_enable(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+	int ret;
+
+	rs780_get_pm_mode_parameters(rdev);
+	rs780_disable_vbios_powersaving(rdev);
+
+	if (r600_dynamicpm_enabled(rdev))
+		return -EINVAL;
+	ret = rs780_initialize_dpm_parameters(rdev, boot_ps);
+	if (ret)
+		return ret;
+	rs780_start_dpm(rdev);
+
+	rs780_preset_ranges_slow_clk_fbdiv_en(rdev);
+	rs780_preset_starting_fbdiv(rdev);
+	if (pi->voltage_control)
+		rs780_voltage_scaling_init(rdev);
+	rs780_clk_scaling_enable(rdev, true);
+	rs780_set_engine_clock_sc(rdev);
+	rs780_set_engine_clock_wfc(rdev);
+	rs780_program_at(rdev);
+	rs780_set_engine_clock_tdc(rdev);
+	rs780_set_engine_clock_ssc(rdev);
+
+	if (pi->gfx_clock_gating)
+		r600_gfx_clockgating_enable(rdev, true);
+
+	return 0;
+}
+
+void rs780_dpm_disable(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+
+	r600_dynamicpm_enable(rdev, false);
+
+	rs780_clk_scaling_enable(rdev, false);
+	rs780_voltage_scaling_enable(rdev, false);
+
+	if (pi->gfx_clock_gating)
+		r600_gfx_clockgating_enable(rdev, false);
+
+	if (rdev->irq.installed &&
+	    (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+}
+
+int rs780_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
+	struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
+	int ret;
+
+	rs780_get_pm_mode_parameters(rdev);
+
+	rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+
+	if (pi->voltage_control) {
+		rs780_force_voltage(rdev, pi->max_voltage);
+		mdelay(5);
+	}
+
+	ret = rs780_set_engine_clock_scaling(rdev, new_ps, old_ps);
+	if (ret)
+		return ret;
+	rs780_set_engine_clock_spc(rdev, new_ps, old_ps);
+
+	rs780_activate_engine_clk_scaling(rdev, new_ps, old_ps);
+
+	if (pi->voltage_control)
+		rs780_enable_voltage_scaling(rdev, new_ps);
+
+	rs780_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+
+	return 0;
+}
+
+void rs780_dpm_setup_asic(struct radeon_device *rdev)
+{
+
+}
+
+void rs780_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+	rs780_get_pm_mode_parameters(rdev);
+	rs780_program_at(rdev);
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+};
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					     struct radeon_ps *rps,
+					     struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					     u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (r600_is_uvd_state(rps->class, rps->class2)) {
+		if ((rps->vclk == 0) || (rps->dclk == 0)) {
+			rps->vclk = RS780_DEFAULT_VCLK_FREQ;
+			rps->dclk = RS780_DEFAULT_DCLK_FREQ;
+		}
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		rdev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void rs780_parse_pplib_clock_info(struct radeon_device *rdev,
+					 struct radeon_ps *rps,
+					 union pplib_clock_info *clock_info)
+{
+	struct igp_ps *ps = rs780_get_ps(rps);
+	u32 sclk;
+
+	sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+	sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+	ps->sclk_low = sclk;
+	sclk = le16_to_cpu(clock_info->rs780.usHighEngineClockLow);
+	sclk |= clock_info->rs780.ucHighEngineClockHigh << 16;
+	ps->sclk_high = sclk;
+	switch (le16_to_cpu(clock_info->rs780.usVDDC)) {
+	case ATOM_PPLIB_RS780_VOLTAGE_NONE:
+	default:
+		ps->min_voltage = RS780_VDDC_LEVEL_UNKNOWN;
+		ps->max_voltage = RS780_VDDC_LEVEL_UNKNOWN;
+		break;
+	case ATOM_PPLIB_RS780_VOLTAGE_LOW:
+		ps->min_voltage = RS780_VDDC_LEVEL_LOW;
+		ps->max_voltage = RS780_VDDC_LEVEL_LOW;
+		break;
+	case ATOM_PPLIB_RS780_VOLTAGE_HIGH:
+		ps->min_voltage = RS780_VDDC_LEVEL_HIGH;
+		ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
+		break;
+	case ATOM_PPLIB_RS780_VOLTAGE_VARIABLE:
+		ps->min_voltage = RS780_VDDC_LEVEL_LOW;
+		ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
+		break;
+	}
+	ps->flags = le32_to_cpu(clock_info->rs780.ulFlags);
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		ps->sclk_low = rdev->clock.default_sclk;
+		ps->sclk_high = rdev->clock.default_sclk;
+		ps->min_voltage = RS780_VDDC_LEVEL_HIGH;
+		ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
+	}
+}
+
+static int rs780_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i;
+	union pplib_clock_info *clock_info;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	struct igp_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		if (power_info->pplib.ucStateEntrySize - 1) {
+			clock_info = (union pplib_clock_info *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+				 (power_state->v1.ucClockStateIndices[0] *
+				  power_info->pplib.ucClockInfoSize));
+			ps = kzalloc(sizeof(struct igp_ps), GFP_KERNEL);
+			if (ps == NULL) {
+				kfree(rdev->pm.dpm.ps);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.ps[i].ps_priv = ps;
+			rs780_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+							 non_clock_info,
+							 power_info->pplib.ucNonClockSize);
+			rs780_parse_pplib_clock_info(rdev,
+						     &rdev->pm.dpm.ps[i],
+						     clock_info);
+		}
+	}
+	rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
+	return 0;
+}
+
+int rs780_dpm_init(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *info;
+	u16 data_offset;
+	u8 frev, crev;
+	int ret;
+
+	pi = kzalloc(sizeof(struct igp_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = pi;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = rs780_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	pi->voltage_control = false;
+	pi->gfx_clock_gating = true;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
+
+		/* Get various system informations from bios */
+		switch (crev) {
+		case 1:
+			pi->num_of_cycles_in_period =
+				info->info.ucNumberOfCyclesInPeriod;
+			pi->num_of_cycles_in_period |=
+				info->info.ucNumberOfCyclesInPeriodHi << 8;
+			pi->invert_pwm_required =
+				(pi->num_of_cycles_in_period & 0x8000) ? true : false;
+			pi->boot_voltage = info->info.ucStartingPWM_HighTime;
+			pi->max_voltage = info->info.ucMaxNBVoltage;
+			pi->max_voltage |= info->info.ucMaxNBVoltageHigh << 8;
+			pi->min_voltage = info->info.ucMinNBVoltage;
+			pi->min_voltage |= info->info.ucMinNBVoltageHigh << 8;
+			pi->inter_voltage_low =
+				le16_to_cpu(info->info.usInterNBVoltageLow);
+			pi->inter_voltage_high =
+				le16_to_cpu(info->info.usInterNBVoltageHigh);
+			pi->voltage_control = true;
+			pi->bootup_uma_clk = info->info.usK8MemoryClock * 100;
+			break;
+		case 2:
+			pi->num_of_cycles_in_period =
+				le16_to_cpu(info->info_2.usNumberOfCyclesInPeriod);
+			pi->invert_pwm_required =
+				(pi->num_of_cycles_in_period & 0x8000) ? true : false;
+			pi->boot_voltage =
+				le16_to_cpu(info->info_2.usBootUpNBVoltage);
+			pi->max_voltage =
+				le16_to_cpu(info->info_2.usMaxNBVoltage);
+			pi->min_voltage =
+				le16_to_cpu(info->info_2.usMinNBVoltage);
+			pi->system_config =
+				le32_to_cpu(info->info_2.ulSystemConfig);
+			pi->pwm_voltage_control =
+				(pi->system_config & 0x4) ? true : false;
+			pi->voltage_control = true;
+			pi->bootup_uma_clk = le32_to_cpu(info->info_2.ulBootUpUMAClock);
+			break;
+		default:
+			DRM_ERROR("No integrated system info for your GPU\n");
+			return -EINVAL;
+		}
+		if (pi->min_voltage > pi->max_voltage)
+			pi->voltage_control = false;
+		if (pi->pwm_voltage_control) {
+			if ((pi->num_of_cycles_in_period == 0) ||
+			    (pi->max_voltage == 0) ||
+			    (pi->min_voltage == 0))
+				pi->voltage_control = false;
+		} else {
+			if ((pi->num_of_cycles_in_period == 0) ||
+			    (pi->max_voltage == 0))
+				pi->voltage_control = false;
+		}
+
+		return 0;
+	}
+	radeon_dpm_fini(rdev);
+	return -EINVAL;
+}
+
+void rs780_dpm_print_power_state(struct radeon_device *rdev,
+				 struct radeon_ps *rps)
+{
+	struct igp_ps *ps = rs780_get_ps(rps);
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	printk("\t\tpower level 0    sclk: %u vddc_index: %d\n",
+	       ps->sclk_low, ps->min_voltage);
+	printk("\t\tpower level 1    sclk: %u vddc_index: %d\n",
+	       ps->sclk_high, ps->max_voltage);
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+void rs780_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+}
+
+u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct igp_ps *requested_state = rs780_get_ps(rdev->pm.dpm.requested_ps);
+
+	if (low)
+		return requested_state->sclk_low;
+	else
+		return requested_state->sclk_high;
+}
+
+u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+
+	return pi->bootup_uma_clk;
+}
+
+void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						       struct seq_file *m)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct igp_ps *ps = rs780_get_ps(rps);
+	u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK;
+	u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+	u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1;
+	u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 +
+		((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1;
+	u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) /
+		(post_div * ref_div);
+
+	seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+
+	/* guess based on the current sclk */
+	if (sclk < (ps->sclk_low + 500))
+		seq_printf(m, "power level 0    sclk: %u vddc_index: %d\n",
+			   ps->sclk_low, ps->min_voltage);
+	else
+		seq_printf(m, "power level 1    sclk: %u vddc_index: %d\n",
+			   ps->sclk_high, ps->max_voltage);
+}
+
+/* get the current sclk in 10 khz units */
+u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK;
+	u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+	u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1;
+	u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 +
+		((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1;
+	u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) /
+		(post_div * ref_div);
+
+	return sclk;
+}
+
+/* get the current mclk in 10 khz units */
+u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+
+	return pi->bootup_uma_clk;
+}
+
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct igp_ps *ps = rs780_get_ps(rps);
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	rs780_clk_scaling_enable(rdev, false);
+	rs780_voltage_scaling_enable(rdev, false);
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		if (pi->voltage_control)
+			rs780_force_voltage(rdev, pi->max_voltage);
+
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     ps->sclk_high, false, &dividers);
+		if (ret)
+			return ret;
+
+		rs780_force_fbdiv(rdev, dividers.fb_div);
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     ps->sclk_low, false, &dividers);
+		if (ret)
+			return ret;
+
+		rs780_force_fbdiv(rdev, dividers.fb_div);
+
+		if (pi->voltage_control)
+			rs780_force_voltage(rdev, pi->min_voltage);
+	} else {
+		if (pi->voltage_control)
+			rs780_force_voltage(rdev, pi->max_voltage);
+
+		if (ps->sclk_high != ps->sclk_low) {
+			WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
+			rs780_clk_scaling_enable(rdev, true);
+		}
+
+		if (pi->voltage_control) {
+			rs780_voltage_scaling_enable(rdev, true);
+			rs780_enable_voltage_scaling(rdev, rps);
+		}
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.h b/drivers/gpu/drm/radeon/rs780_dpm.h
new file mode 100644
index 0000000..47a40b1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780_dpm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RS780_DPM_H__
+#define __RS780_DPM_H__
+
+enum rs780_vddc_level {
+	RS780_VDDC_LEVEL_UNKNOWN = 0,
+	RS780_VDDC_LEVEL_LOW = 1,
+	RS780_VDDC_LEVEL_HIGH = 2,
+};
+
+struct igp_power_info {
+	/* flags */
+	bool invert_pwm_required;
+	bool pwm_voltage_control;
+	bool voltage_control;
+	bool gfx_clock_gating;
+	/* stored values */
+	u32 system_config;
+	u32 bootup_uma_clk;
+	u16 max_voltage;
+	u16 min_voltage;
+	u16 boot_voltage;
+	u16 inter_voltage_low;
+	u16 inter_voltage_high;
+	u16 num_of_cycles_in_period;
+	/* variable */
+	int crtc_id;
+	int refresh_rate;
+};
+
+struct igp_ps {
+	enum rs780_vddc_level min_voltage;
+	enum rs780_vddc_level max_voltage;
+	u32 sclk_low;
+	u32 sclk_high;
+	u32 flags;
+};
+
+#define RS780_CGFTV_DFLT                 0x0303000f
+#define RS780_FBDIVTIMERVAL_DFLT         0x2710
+
+#define RS780_FVTHROTUTC0_DFLT   0x04010040
+#define RS780_FVTHROTUTC1_DFLT   0x04010040
+#define RS780_FVTHROTUTC2_DFLT   0x04010040
+#define RS780_FVTHROTUTC3_DFLT   0x04010040
+#define RS780_FVTHROTUTC4_DFLT   0x04010040
+
+#define RS780_FVTHROTDTC0_DFLT 0x04010040
+#define RS780_FVTHROTDTC1_DFLT 0x04010040
+#define RS780_FVTHROTDTC2_DFLT 0x04010040
+#define RS780_FVTHROTDTC3_DFLT 0x04010040
+#define RS780_FVTHROTDTC4_DFLT 0x04010040
+
+#define RS780_FVTHROTFBUSREG0_DFLT       0x00001001
+#define RS780_FVTHROTFBUSREG1_DFLT       0x00002002
+#define RS780_FVTHROTFBDSREG0_DFLT       0x00004001
+#define RS780_FVTHROTFBDSREG1_DFLT       0x00020010
+
+#define RS780_FVTHROTPWMUSREG0_DFLT      0x00002001
+#define RS780_FVTHROTPWMUSREG1_DFLT      0x00004003
+#define RS780_FVTHROTPWMDSREG0_DFLT      0x00002001
+#define RS780_FVTHROTPWMDSREG1_DFLT      0x00004003
+
+#define RS780_FVTHROTPWMFBDIVRANGEREG0_DFLT  0x37
+#define RS780_FVTHROTPWMFBDIVRANGEREG1_DFLT  0x4b
+#define RS780_FVTHROTPWMFBDIVRANGEREG2_DFLT  0x8b
+
+#define RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT  0x8b
+#define RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT  0x8c
+#define RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT  0xb5
+
+#define RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT  0x8d
+#define RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT  0x8e
+#define RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT  0xBa
+
+#define RS780_FVTHROTPWMRANGE0_GPIO_DFLT  0x1a
+#define RS780_FVTHROTPWMRANGE1_GPIO_DFLT  0x1a
+#define RS780_FVTHROTPWMRANGE2_GPIO_DFLT  0x0
+#define RS780_FVTHROTPWMRANGE3_GPIO_DFLT  0x0
+
+#define RS780_SLOWCLKFEEDBACKDIV_DFLT 110
+
+#define RS780_CGCLKGATING_DFLT           0x0000E204
+
+#define RS780_DEFAULT_VCLK_FREQ  53300 /* 10 khz */
+#define RS780_DEFAULT_DCLK_FREQ  40000 /* 10 khz */
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h
new file mode 100644
index 0000000..cfbe9a4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780d.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RS780D_H__
+#define __RS780D_H__
+
+#define CG_SPLL_FUNC_CNTL                                 0x600
+#       define SPLL_RESET                                (1 << 0)
+#       define SPLL_SLEEP                                (1 << 1)
+#       define SPLL_REF_DIV(x)                           ((x) << 2)
+#       define SPLL_REF_DIV_MASK                         (7 << 2)
+#       define SPLL_REF_DIV_SHIFT                        2
+#       define SPLL_FB_DIV(x)                            ((x) << 5)
+#       define SPLL_FB_DIV_MASK                          (0xff << 2)
+#       define SPLL_FB_DIV_SHIFT                         2
+#       define SPLL_PULSEEN                              (1 << 13)
+#       define SPLL_PULSENUM(x)                          ((x) << 14)
+#       define SPLL_PULSENUM_MASK                        (3 << 14)
+#       define SPLL_SW_HILEN(x)                          ((x) << 16)
+#       define SPLL_SW_HILEN_MASK                        (0xf << 16)
+#       define SPLL_SW_HILEN_SHIFT                       16
+#       define SPLL_SW_LOLEN(x)                          ((x) << 20)
+#       define SPLL_SW_LOLEN_MASK                        (0xf << 20)
+#       define SPLL_SW_LOLEN_SHIFT                       20
+#       define SPLL_DIVEN                                (1 << 24)
+#       define SPLL_BYPASS_EN                            (1 << 25)
+#       define SPLL_CHG_STATUS                           (1 << 29)
+#       define SPLL_CTLREQ                               (1 << 30)
+#       define SPLL_CTLACK                               (1 << 31)
+
+/* RS780/RS880 PM */
+#define	FVTHROT_CNTRL_REG				0x3000
+#define		DONT_WAIT_FOR_FBDIV_WRAP		(1 << 0)
+#define		MINIMUM_CIP(x)				((x) << 1)
+#define		MINIMUM_CIP_SHIFT			1
+#define		MINIMUM_CIP_MASK			0x1fffffe
+#define		REFRESH_RATE_DIVISOR(x)			((x) << 25)
+#define		REFRESH_RATE_DIVISOR_SHIFT		25
+#define		REFRESH_RATE_DIVISOR_MASK		(0x3 << 25)
+#define		ENABLE_FV_THROT				(1 << 27)
+#define		ENABLE_FV_UPDATE			(1 << 28)
+#define		TREND_SEL_MODE				(1 << 29)
+#define		FORCE_TREND_SEL				(1 << 30)
+#define		ENABLE_FV_THROT_IO			(1 << 31)
+#define	FVTHROT_TARGET_REG				0x3004
+#define		TARGET_IDLE_COUNT(x)			((x) << 0)
+#define		TARGET_IDLE_COUNT_MASK			0xffffff
+#define		TARGET_IDLE_COUNT_SHIFT			0
+#define	FVTHROT_CB1					0x3008
+#define	FVTHROT_CB2					0x300c
+#define	FVTHROT_CB3					0x3010
+#define	FVTHROT_CB4					0x3014
+#define	FVTHROT_UTC0					0x3018
+#define	FVTHROT_UTC1					0x301c
+#define	FVTHROT_UTC2					0x3020
+#define	FVTHROT_UTC3					0x3024
+#define	FVTHROT_UTC4					0x3028
+#define	FVTHROT_DTC0					0x302c
+#define	FVTHROT_DTC1					0x3030
+#define	FVTHROT_DTC2					0x3034
+#define	FVTHROT_DTC3					0x3038
+#define	FVTHROT_DTC4					0x303c
+#define	FVTHROT_FBDIV_REG0				0x3040
+#define		MIN_FEEDBACK_DIV(x)			((x) << 0)
+#define		MIN_FEEDBACK_DIV_MASK			0xfff
+#define		MIN_FEEDBACK_DIV_SHIFT			0
+#define		MAX_FEEDBACK_DIV(x)			((x) << 12)
+#define		MAX_FEEDBACK_DIV_MASK			(0xfff << 12)
+#define		MAX_FEEDBACK_DIV_SHIFT			12
+#define	FVTHROT_FBDIV_REG1				0x3044
+#define		MAX_FEEDBACK_STEP(x)			((x) << 0)
+#define		MAX_FEEDBACK_STEP_MASK			0xfff
+#define		MAX_FEEDBACK_STEP_SHIFT			0
+#define		STARTING_FEEDBACK_DIV(x)		((x) << 12)
+#define		STARTING_FEEDBACK_DIV_MASK		(0xfff << 12)
+#define		STARTING_FEEDBACK_DIV_SHIFT		12
+#define		FORCE_FEEDBACK_DIV			(1 << 24)
+#define	FVTHROT_FBDIV_REG2				0x3048
+#define		FORCED_FEEDBACK_DIV(x)			((x) << 0)
+#define		FORCED_FEEDBACK_DIV_MASK		0xfff
+#define		FORCED_FEEDBACK_DIV_SHIFT		0
+#define		FB_DIV_TIMER_VAL(x)			((x) << 12)
+#define		FB_DIV_TIMER_VAL_MASK			(0xffff << 12)
+#define		FB_DIV_TIMER_VAL_SHIFT			12
+#define	FVTHROT_FB_US_REG0				0x304c
+#define	FVTHROT_FB_US_REG1				0x3050
+#define	FVTHROT_FB_DS_REG0				0x3054
+#define	FVTHROT_FB_DS_REG1				0x3058
+#define	FVTHROT_PWM_CTRL_REG0				0x305c
+#define		STARTING_PWM_HIGHTIME(x)		((x) << 0)
+#define		STARTING_PWM_HIGHTIME_MASK		0xfff
+#define		STARTING_PWM_HIGHTIME_SHIFT		0
+#define		NUMBER_OF_CYCLES_IN_PERIOD(x)		((x) << 12)
+#define		NUMBER_OF_CYCLES_IN_PERIOD_MASK		(0xfff << 12)
+#define		NUMBER_OF_CYCLES_IN_PERIOD_SHIFT	12
+#define		FORCE_STARTING_PWM_HIGHTIME		(1 << 24)
+#define		INVERT_PWM_WAVEFORM			(1 << 25)
+#define	FVTHROT_PWM_CTRL_REG1				0x3060
+#define		MIN_PWM_HIGHTIME(x)			((x) << 0)
+#define		MIN_PWM_HIGHTIME_MASK			0xfff
+#define		MIN_PWM_HIGHTIME_SHIFT			0
+#define		MAX_PWM_HIGHTIME(x)			((x) << 12)
+#define		MAX_PWM_HIGHTIME_MASK			(0xfff << 12)
+#define		MAX_PWM_HIGHTIME_SHIFT			12
+#define	FVTHROT_PWM_US_REG0				0x3064
+#define	FVTHROT_PWM_US_REG1				0x3068
+#define	FVTHROT_PWM_DS_REG0				0x306c
+#define	FVTHROT_PWM_DS_REG1				0x3070
+#define	FVTHROT_STATUS_REG0				0x3074
+#define		CURRENT_FEEDBACK_DIV_MASK		0xfff
+#define		CURRENT_FEEDBACK_DIV_SHIFT		0
+#define	FVTHROT_STATUS_REG1				0x3078
+#define	FVTHROT_STATUS_REG2				0x307c
+#define	CG_INTGFX_MISC					0x3080
+#define		FVTHROT_VBLANK_SEL			(1 << 9)
+#define	FVTHROT_PWM_FEEDBACK_DIV_REG1			0x308c
+#define		RANGE0_PWM_FEEDBACK_DIV(x)		((x) << 0)
+#define		RANGE0_PWM_FEEDBACK_DIV_MASK		0xfff
+#define		RANGE0_PWM_FEEDBACK_DIV_SHIFT		0
+#define		RANGE_PWM_FEEDBACK_DIV_EN		(1 << 12)
+#define	FVTHROT_PWM_FEEDBACK_DIV_REG2			0x3090
+#define		RANGE1_PWM_FEEDBACK_DIV(x)		((x) << 0)
+#define		RANGE1_PWM_FEEDBACK_DIV_MASK		0xfff
+#define		RANGE1_PWM_FEEDBACK_DIV_SHIFT		0
+#define		RANGE2_PWM_FEEDBACK_DIV(x)		((x) << 12)
+#define		RANGE2_PWM_FEEDBACK_DIV_MASK		(0xfff << 12)
+#define		RANGE2_PWM_FEEDBACK_DIV_SHIFT		12
+#define	FVTHROT_PWM_FEEDBACK_DIV_REG3			0x3094
+#define		RANGE0_PWM(x)				((x) << 0)
+#define		RANGE0_PWM_MASK				0xfff
+#define		RANGE0_PWM_SHIFT			0
+#define		RANGE1_PWM(x)				((x) << 12)
+#define		RANGE1_PWM_MASK				(0xfff << 12)
+#define		RANGE1_PWM_SHIFT			12
+#define	FVTHROT_PWM_FEEDBACK_DIV_REG4			0x3098
+#define		RANGE2_PWM(x)				((x) << 0)
+#define		RANGE2_PWM_MASK				0xfff
+#define		RANGE2_PWM_SHIFT			0
+#define		RANGE3_PWM(x)				((x) << 12)
+#define		RANGE3_PWM_MASK				(0xfff << 12)
+#define		RANGE3_PWM_SHIFT			12
+#define	FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1		0x30ac
+#define		RANGE0_SLOW_CLK_FEEDBACK_DIV(x)		((x) << 0)
+#define		RANGE0_SLOW_CLK_FEEDBACK_DIV_MASK	0xfff
+#define		RANGE0_SLOW_CLK_FEEDBACK_DIV_SHIFT	0
+#define		RANGE_SLOW_CLK_FEEDBACK_DIV_EN		(1 << 12)
+
+#define	GFX_MACRO_BYPASS_CNTL				0x30c0
+#define		SPLL_BYPASS_CNTL			(1 << 0)
+#define		UPLL_BYPASS_CNTL			(1 << 1)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h
new file mode 100644
index 0000000..c5b3983
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv200d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV200D_H__
+#define __RV200D_H__
+
+#define R_00015C_AGP_BASE_2                          0x00015C
+#define   S_00015C_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_00015C_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_00015C_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h
new file mode 100644
index 0000000..e5a70b0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv250d.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV250D_H__
+#define __RV250D_H__
+
+#define R_00000D_SCLK_CNTL_M6                        0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h
new file mode 100644
index 0000000..c75c5ed
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv350d.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV350D_H__
+#define __RV350D_H__
+
+/* RV350, RV380 registers */
+/* #define R_00000D_SCLK_CNTL                           0x00000D */
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
+#define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_PX                            0xFBFFFFFF
+#define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TX                            0xF7FFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
new file mode 100644
index 0000000..ffbd2c0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -0,0 +1,1303 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "rv515d.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rv515_reg_safe.h"
+
+/* This files gather functions specifics to: rv515 */
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+static void rv515_gpu_init(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+void rv515_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+	}
+	if (rv515_debugfs_pipes_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+	if (rv515_debugfs_ga_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+}
+
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	r = radeon_ring_lock(rdev, ring, 64);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  ISYNC_ANY2D_IDLE3D |
+			  ISYNC_ANY3D_IDLE2D |
+			  ISYNC_WAIT_IDLEGUI |
+			  ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+	radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+	radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+	radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+	radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+	radeon_ring_write(ring,
+			  ((6 << MS_X0_SHIFT) |
+			   (6 << MS_Y0_SHIFT) |
+			   (6 << MS_X1_SHIFT) |
+			   (6 << MS_Y1_SHIFT) |
+			   (6 << MS_X2_SHIFT) |
+			   (6 << MS_Y2_SHIFT) |
+			   (6 << MSBD0_Y_SHIFT) |
+			   (6 << MSBD0_X_SHIFT)));
+	radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+	radeon_ring_write(ring,
+			  ((6 << MS_X3_SHIFT) |
+			   (6 << MS_Y3_SHIFT) |
+			   (6 << MS_X4_SHIFT) |
+			   (6 << MS_Y4_SHIFT) |
+			   (6 << MS_X5_SHIFT) |
+			   (6 << MS_Y5_SHIFT) |
+			   (6 << MSBD1_SHIFT)));
+	radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+	radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+	radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+	radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+	radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+	radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+	radeon_ring_write(ring, PACKET0(0x20C8, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring, false);
+}
+
+int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(MC_STATUS);
+		if (tmp & MC_STATUS_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+void rv515_vga_render_disable(struct radeon_device *rdev)
+{
+	WREG32(R_000300_VGA_RENDER_CONTROL,
+		RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+}
+
+static void rv515_gpu_init(struct radeon_device *rdev)
+{
+	unsigned pipe_select_current, gb_pipe_select, tmp;
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while resetting GPU. Bad things might happen.\n");
+	}
+	rv515_vga_render_disable(rdev);
+	r420_pipes_init(rdev);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	pipe_select_current = (tmp >> 2) & 3;
+	tmp = (1 << pipe_select_current) |
+	      (((gb_pipe_select >> 8) & 0xF) << 4);
+	WREG32_PLL(0x000D, tmp);
+	if (r100_gui_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait GUI idle while resetting GPU. Bad things might happen.\n");
+	}
+	if (rv515_mc_wait_for_idle(rdev)) {
+		pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
+	}
+}
+
+static void rv515_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_width = 128;
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
+	switch (tmp) {
+	case 0:
+		rdev->mc.vram_width = 64;
+		break;
+	case 1:
+		rdev->mc.vram_width = 128;
+		break;
+	default:
+		rdev->mc.vram_width = 128;
+		break;
+	}
+}
+
+static void rv515_mc_init(struct radeon_device *rdev)
+{
+
+	rv515_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	unsigned long flags;
+	uint32_t r;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
+	r = RREG32(MC_IND_DATA);
+	WREG32(MC_IND_INDEX, 0);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+
+	return r;
+}
+
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+	WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
+	WREG32(MC_IND_DATA, (v));
+	WREG32(MC_IND_INDEX, 0);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(GB_PIPE_SELECT);
+	seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+	tmp = RREG32(SU_REG_DEST);
+	seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
+	tmp = RREG32(GB_TILE_CONFIG);
+	seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+	tmp = RREG32(DST_PIPE_CONFIG);
+	seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+	return 0;
+}
+
+static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(0x2140);
+	seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
+	radeon_asic_reset(rdev);
+	tmp = RREG32(0x425C);
+	seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rv515_pipes_info_list[] = {
+	{"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
+};
+
+static struct drm_info_list rv515_ga_info_list[] = {
+	{"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
+};
+#endif
+
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+
+	save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
+	save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
+
+	/* disable VGA render */
+	WREG32(R_000300_VGA_RENDER_CONTROL, 0);
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+				radeon_wait_for_vblank(rdev, i);
+				WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~AVIVO_CRTC_EN;
+			WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+
+	radeon_mc_wait_for_idle(rdev);
+
+	if (rdev->family >= CHIP_R600) {
+		if (rdev->family >= CHIP_RV770)
+			blackout = RREG32(R700_MC_CITF_CNTL);
+		else
+			blackout = RREG32(R600_CITF_CNTL);
+		if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+			/* Block CPU access */
+			WREG32(R600_BIF_FB_EN, 0);
+			/* blackout the MC */
+			blackout |= R600_BLACKOUT_MASK;
+			if (rdev->family >= CHIP_RV770)
+				WREG32(R700_MC_CITF_CNTL, blackout);
+			else
+				WREG32(R600_CITF_CNTL, blackout);
+		}
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+	/* lock double buffered regs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+			if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+				tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+				WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (!(tmp & 1)) {
+				tmp |= 1;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+		}
+	}
+}
+
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+	u32 tmp, frame_count;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->family >= CHIP_RV770) {
+			if (i == 0) {
+				WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			} else {
+				WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			}
+		}
+		WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+		WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+	}
+	WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x7) != 3) {
+				tmp &= ~0x7;
+				tmp |= 0x3;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+				tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+				WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		/* unblackout the MC */
+		if (rdev->family >= CHIP_RV770)
+			tmp = RREG32(R700_MC_CITF_CNTL);
+		else
+			tmp = RREG32(R600_CITF_CNTL);
+		tmp &= ~R600_BLACKOUT_MASK;
+		if (rdev->family >= CHIP_RV770)
+			WREG32(R700_MC_CITF_CNTL, tmp);
+		else
+			WREG32(R600_CITF_CNTL, tmp);
+		/* allow CPU access */
+		WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	/* Unlock vga access */
+	WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
+	mdelay(1);
+	WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void rv515_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rv515_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Write VRAM size in case we are limiting it */
+	WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000001_MC_FB_LOCATION,
+			S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32_MC(R_000002_MC_AGP_LOCATION,
+			S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32_MC(R_000004_MC_AGP_BASE_2,
+			S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+	} else {
+		WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
+		WREG32_MC(R_000003_MC_AGP_BASE, 0);
+		WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
+	}
+
+	rv515_mc_resume(rdev, &save);
+}
+
+void rv515_clock_startup(struct radeon_device *rdev)
+{
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_atom_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	WREG32_PLL(R_00000F_CP_DYN_CNTL,
+		RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
+	WREG32_PLL(R_000011_E2_DYN_CNTL,
+		RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
+	WREG32_PLL(R_000013_IDCT_DYN_CNTL,
+		RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
+}
+
+static int rv515_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rv515_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rv515_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int rv515_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r =  rv515_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rv515_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	return 0;
+}
+
+void rv515_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
+}
+
+void rv515_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rv370_pcie_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rv515_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	rv515_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rv370_pcie_gart_init(rdev);
+	if (r)
+		return r;
+	rv515_set_safe_registers(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->accel_working = true;
+	r = rv515_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv370_pcie_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc)
+{
+	int index_reg = 0x6578 + crtc->crtc_offset;
+	int data_reg = 0x657c + crtc->crtc_offset;
+
+	WREG32(0x659C + crtc->crtc_offset, 0x0);
+	WREG32(0x6594 + crtc->crtc_offset, 0x705);
+	WREG32(0x65A4 + crtc->crtc_offset, 0x10001);
+	WREG32(0x65D8 + crtc->crtc_offset, 0x0);
+	WREG32(0x65B0 + crtc->crtc_offset, 0x0);
+	WREG32(0x65C0 + crtc->crtc_offset, 0x0);
+	WREG32(0x65D4 + crtc->crtc_offset, 0x0);
+	WREG32(index_reg, 0x0);
+	WREG32(data_reg, 0x841880A8);
+	WREG32(index_reg, 0x1);
+	WREG32(data_reg, 0x84208680);
+	WREG32(index_reg, 0x2);
+	WREG32(data_reg, 0xBFF880B0);
+	WREG32(index_reg, 0x100);
+	WREG32(data_reg, 0x83D88088);
+	WREG32(index_reg, 0x101);
+	WREG32(data_reg, 0x84608680);
+	WREG32(index_reg, 0x102);
+	WREG32(data_reg, 0xBFF080D0);
+	WREG32(index_reg, 0x200);
+	WREG32(data_reg, 0x83988068);
+	WREG32(index_reg, 0x201);
+	WREG32(data_reg, 0x84A08680);
+	WREG32(index_reg, 0x202);
+	WREG32(data_reg, 0xBFF080F8);
+	WREG32(index_reg, 0x300);
+	WREG32(data_reg, 0x83588058);
+	WREG32(index_reg, 0x301);
+	WREG32(data_reg, 0x84E08660);
+	WREG32(index_reg, 0x302);
+	WREG32(data_reg, 0xBFF88120);
+	WREG32(index_reg, 0x400);
+	WREG32(data_reg, 0x83188040);
+	WREG32(index_reg, 0x401);
+	WREG32(data_reg, 0x85008660);
+	WREG32(index_reg, 0x402);
+	WREG32(data_reg, 0xBFF88150);
+	WREG32(index_reg, 0x500);
+	WREG32(data_reg, 0x82D88030);
+	WREG32(index_reg, 0x501);
+	WREG32(data_reg, 0x85408640);
+	WREG32(index_reg, 0x502);
+	WREG32(data_reg, 0xBFF88180);
+	WREG32(index_reg, 0x600);
+	WREG32(data_reg, 0x82A08018);
+	WREG32(index_reg, 0x601);
+	WREG32(data_reg, 0x85808620);
+	WREG32(index_reg, 0x602);
+	WREG32(data_reg, 0xBFF081B8);
+	WREG32(index_reg, 0x700);
+	WREG32(data_reg, 0x82608010);
+	WREG32(index_reg, 0x701);
+	WREG32(data_reg, 0x85A08600);
+	WREG32(index_reg, 0x702);
+	WREG32(data_reg, 0x800081F0);
+	WREG32(index_reg, 0x800);
+	WREG32(data_reg, 0x8228BFF8);
+	WREG32(index_reg, 0x801);
+	WREG32(data_reg, 0x85E085E0);
+	WREG32(index_reg, 0x802);
+	WREG32(data_reg, 0xBFF88228);
+	WREG32(index_reg, 0x10000);
+	WREG32(data_reg, 0x82A8BF00);
+	WREG32(index_reg, 0x10001);
+	WREG32(data_reg, 0x82A08CC0);
+	WREG32(index_reg, 0x10002);
+	WREG32(data_reg, 0x8008BEF8);
+	WREG32(index_reg, 0x10100);
+	WREG32(data_reg, 0x81F0BF28);
+	WREG32(index_reg, 0x10101);
+	WREG32(data_reg, 0x83608CA0);
+	WREG32(index_reg, 0x10102);
+	WREG32(data_reg, 0x8018BED0);
+	WREG32(index_reg, 0x10200);
+	WREG32(data_reg, 0x8148BF38);
+	WREG32(index_reg, 0x10201);
+	WREG32(data_reg, 0x84408C80);
+	WREG32(index_reg, 0x10202);
+	WREG32(data_reg, 0x8008BEB8);
+	WREG32(index_reg, 0x10300);
+	WREG32(data_reg, 0x80B0BF78);
+	WREG32(index_reg, 0x10301);
+	WREG32(data_reg, 0x85008C20);
+	WREG32(index_reg, 0x10302);
+	WREG32(data_reg, 0x8020BEA0);
+	WREG32(index_reg, 0x10400);
+	WREG32(data_reg, 0x8028BF90);
+	WREG32(index_reg, 0x10401);
+	WREG32(data_reg, 0x85E08BC0);
+	WREG32(index_reg, 0x10402);
+	WREG32(data_reg, 0x8018BE90);
+	WREG32(index_reg, 0x10500);
+	WREG32(data_reg, 0xBFB8BFB0);
+	WREG32(index_reg, 0x10501);
+	WREG32(data_reg, 0x86C08B40);
+	WREG32(index_reg, 0x10502);
+	WREG32(data_reg, 0x8010BE90);
+	WREG32(index_reg, 0x10600);
+	WREG32(data_reg, 0xBF58BFC8);
+	WREG32(index_reg, 0x10601);
+	WREG32(data_reg, 0x87A08AA0);
+	WREG32(index_reg, 0x10602);
+	WREG32(data_reg, 0x8010BE98);
+	WREG32(index_reg, 0x10700);
+	WREG32(data_reg, 0xBF10BFF0);
+	WREG32(index_reg, 0x10701);
+	WREG32(data_reg, 0x886089E0);
+	WREG32(index_reg, 0x10702);
+	WREG32(data_reg, 0x8018BEB0);
+	WREG32(index_reg, 0x10800);
+	WREG32(data_reg, 0xBED8BFE8);
+	WREG32(index_reg, 0x10801);
+	WREG32(data_reg, 0x89408940);
+	WREG32(index_reg, 0x10802);
+	WREG32(data_reg, 0xBFE8BED8);
+	WREG32(index_reg, 0x20000);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20001);
+	WREG32(data_reg, 0x90008000);
+	WREG32(index_reg, 0x20002);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20003);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20100);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20101);
+	WREG32(data_reg, 0x8FE0BF70);
+	WREG32(index_reg, 0x20102);
+	WREG32(data_reg, 0xBFE880C0);
+	WREG32(index_reg, 0x20103);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20200);
+	WREG32(data_reg, 0x8018BFF8);
+	WREG32(index_reg, 0x20201);
+	WREG32(data_reg, 0x8F80BF08);
+	WREG32(index_reg, 0x20202);
+	WREG32(data_reg, 0xBFD081A0);
+	WREG32(index_reg, 0x20203);
+	WREG32(data_reg, 0xBFF88000);
+	WREG32(index_reg, 0x20300);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20301);
+	WREG32(data_reg, 0x8EE0BEC0);
+	WREG32(index_reg, 0x20302);
+	WREG32(data_reg, 0xBFB082A0);
+	WREG32(index_reg, 0x20303);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20400);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20401);
+	WREG32(data_reg, 0x8E00BEA0);
+	WREG32(index_reg, 0x20402);
+	WREG32(data_reg, 0xBF8883C0);
+	WREG32(index_reg, 0x20403);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20500);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20501);
+	WREG32(data_reg, 0x8D00BE90);
+	WREG32(index_reg, 0x20502);
+	WREG32(data_reg, 0xBF588500);
+	WREG32(index_reg, 0x20503);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20600);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20601);
+	WREG32(data_reg, 0x8BC0BE98);
+	WREG32(index_reg, 0x20602);
+	WREG32(data_reg, 0xBF308660);
+	WREG32(index_reg, 0x20603);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20700);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20701);
+	WREG32(data_reg, 0x8A80BEB0);
+	WREG32(index_reg, 0x20702);
+	WREG32(data_reg, 0xBF0087C0);
+	WREG32(index_reg, 0x20703);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20800);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20801);
+	WREG32(data_reg, 0x8920BED0);
+	WREG32(index_reg, 0x20802);
+	WREG32(data_reg, 0xBED08920);
+	WREG32(index_reg, 0x20803);
+	WREG32(data_reg, 0x80008010);
+	WREG32(index_reg, 0x30000);
+	WREG32(data_reg, 0x90008000);
+	WREG32(index_reg, 0x30001);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x30100);
+	WREG32(data_reg, 0x8FE0BF90);
+	WREG32(index_reg, 0x30101);
+	WREG32(data_reg, 0xBFF880A0);
+	WREG32(index_reg, 0x30200);
+	WREG32(data_reg, 0x8F60BF40);
+	WREG32(index_reg, 0x30201);
+	WREG32(data_reg, 0xBFE88180);
+	WREG32(index_reg, 0x30300);
+	WREG32(data_reg, 0x8EC0BF00);
+	WREG32(index_reg, 0x30301);
+	WREG32(data_reg, 0xBFC88280);
+	WREG32(index_reg, 0x30400);
+	WREG32(data_reg, 0x8DE0BEE0);
+	WREG32(index_reg, 0x30401);
+	WREG32(data_reg, 0xBFA083A0);
+	WREG32(index_reg, 0x30500);
+	WREG32(data_reg, 0x8CE0BED0);
+	WREG32(index_reg, 0x30501);
+	WREG32(data_reg, 0xBF7884E0);
+	WREG32(index_reg, 0x30600);
+	WREG32(data_reg, 0x8BA0BED8);
+	WREG32(index_reg, 0x30601);
+	WREG32(data_reg, 0xBF508640);
+	WREG32(index_reg, 0x30700);
+	WREG32(data_reg, 0x8A60BEE8);
+	WREG32(index_reg, 0x30701);
+	WREG32(data_reg, 0xBF2087A0);
+	WREG32(index_reg, 0x30800);
+	WREG32(data_reg, 0x8900BF00);
+	WREG32(index_reg, 0x30801);
+	WREG32(data_reg, 0xBF008900);
+}
+
+struct rv515_watermark {
+	u32        lb_request_fifo_depth;
+	fixed20_12 num_line_pair;
+	fixed20_12 estimated_width;
+	fixed20_12 worst_case_latency;
+	fixed20_12 consumption_rate;
+	fixed20_12 active_time;
+	fixed20_12 dbpp;
+	fixed20_12 priority_mark_max;
+	fixed20_12 priority_mark;
+	fixed20_12 sclk;
+};
+
+static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+					 struct radeon_crtc *crtc,
+					 struct rv515_watermark *wm,
+					 bool low)
+{
+	struct drm_display_mode *mode = &crtc->base.mode;
+	fixed20_12 a, b, c;
+	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+	fixed20_12 sclk;
+	u32 selected_sclk;
+
+	if (!crtc->base.enabled) {
+		/* FIXME: wouldn't it better to set priority mark to maximum */
+		wm->lb_request_fifo_depth = 4;
+		return;
+	}
+
+	/* rv6xx, rv7xx */
+	if ((rdev->family >= CHIP_RV610) &&
+	    (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+		selected_sclk = radeon_dpm_get_sclk(rdev, low);
+	else
+		selected_sclk = rdev->pm.current_sclk;
+
+	/* sclk in Mhz */
+	a.full = dfixed_const(100);
+	sclk.full = dfixed_const(selected_sclk);
+	sclk.full = dfixed_div(sclk, a);
+
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
+	else
+		wm->num_line_pair.full = dfixed_const(1);
+
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
+		wm->lb_request_fifo_depth = 4;
+	} else {
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+	}
+
+	/* Determine consumption rate
+	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+	 *  vtaps = number of vertical taps,
+	 *  vsc = vertical scaling ratio, defined as source/destination
+	 *  hsc = horizontal scaling ration, defined as source/destination
+	 */
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
+	if (crtc->rmx_type != RMX_OFF) {
+		b.full = dfixed_const(2);
+		if (crtc->vsc.full > b.full)
+			b.full = crtc->vsc.full;
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
+	} else {
+		consumption_time.full = pclk.full;
+	}
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+	/* Determine line time
+	 *  LineTime = total time for one line of displayhtotal
+	 *  LineTime = total number of horizontal pixels
+	 *  pclk = pixel clock period(ns)
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
+
+	/* Determine active time
+	 *  ActiveTime = time of active region of display within one line,
+	 *  hactive = total number of horizontal active pixels
+	 *  htotal = total number of horizontal pixels
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
+
+	/* Determine chunk time
+	 * ChunkTime = the time it takes the DCP to send one chunk of data
+	 * to the LB which consists of pipeline delay and inter chunk gap
+	 * sclk = system clock(Mhz)
+	 */
+	a.full = dfixed_const(600 * 1000);
+	chunk_time.full = dfixed_div(a, sclk);
+	read_delay_latency.full = dfixed_const(1000);
+
+	/* Determine the worst case latency
+	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+	 * WorstCaseLatency = worst case time from urgent to when the MC starts
+	 *                    to return data
+	 * READ_DELAY_IDLE_MAX = constant of 1us
+	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+	 *             which consists of pipeline delay and inter chunk gap
+	 */
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	} else {
+		wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
+	}
+
+	/* Determine the tolerable latency
+	 * TolerableLatency = Any given request has only 1 line time
+	 *                    for the data to be returned
+	 * LBRequestFifoDepth = Number of chunk requests the LB can
+	 *                      put into the request FIFO for a display
+	 *  LineTime = total time for one line of display
+	 *  ChunkTime = the time it takes the DCP to send one chunk
+	 *              of data to the LB which consists of
+	 *  pipeline delay and inter chunk gap
+	 */
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+		tolerable_latency.full = line_time.full;
+	} else {
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = line_time.full - tolerable_latency.full;
+	}
+	/* We assume worst case 32bits (4 bytes) */
+	wm->dbpp.full = dfixed_const(2 * 16);
+
+	/* Determine the maximum priority mark
+	 *  width = viewport width in pixels
+	 */
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+	/* Determine estimated width */
+	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+		wm->priority_mark.full = wm->priority_mark_max.full;
+	} else {
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+	}
+}
+
+static void rv515_compute_mode_priority(struct radeon_device *rdev,
+					struct rv515_watermark *wm0,
+					struct rv515_watermark *wm1,
+					struct drm_display_mode *mode0,
+					struct drm_display_mode *mode1,
+					u32 *d1mode_priority_a_cnt,
+					u32 *d2mode_priority_a_cnt)
+{
+	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+	fixed20_12 a, b;
+
+	*d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
+	*d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
+
+	if (mode0 && mode1) {
+		if (dfixed_trunc(wm0->dbpp) > 64)
+			a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
+		else
+			a.full = wm0->num_line_pair.full;
+		if (dfixed_trunc(wm1->dbpp) > 64)
+			b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
+		else
+			b.full = wm1->num_line_pair.full;
+		a.full += b.full;
+		fill_rate.full = dfixed_div(wm0->sclk, a);
+		if (wm0->consumption_rate.full > fill_rate.full) {
+			b.full = wm0->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0->active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			priority_mark02.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm1->consumption_rate.full > fill_rate.full) {
+			b.full = wm1->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1->active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			priority_mark12.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm0->priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark.full;
+		if (wm0->priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark_max.full;
+		if (wm1->priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark.full;
+		if (wm1->priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark_max.full;
+		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			*d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+			*d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+		}
+	} else if (mode0) {
+		if (dfixed_trunc(wm0->dbpp) > 64)
+			a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
+		else
+			a.full = wm0->num_line_pair.full;
+		fill_rate.full = dfixed_div(wm0->sclk, a);
+		if (wm0->consumption_rate.full > fill_rate.full) {
+			b.full = wm0->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0->active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			priority_mark02.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm0->worst_case_latency,
+						wm0->consumption_rate);
+			b.full = dfixed_const(16);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm0->priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark.full;
+		if (wm0->priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0->priority_mark_max.full;
+		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			*d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+	} else if (mode1) {
+		if (dfixed_trunc(wm1->dbpp) > 64)
+			a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
+		else
+			a.full = wm1->num_line_pair.full;
+		fill_rate.full = dfixed_div(wm1->sclk, a);
+		if (wm1->consumption_rate.full > fill_rate.full) {
+			b.full = wm1->consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1->active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			priority_mark12.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm1->worst_case_latency,
+						wm1->consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm1->priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark.full;
+		if (wm1->priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1->priority_mark_max.full;
+		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			*d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+	}
+}
+
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	struct rv515_watermark wm0_high, wm0_low;
+	struct rv515_watermark wm1_high, wm1_low;
+	u32 tmp;
+	u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
+	u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
+
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false);
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false);
+
+	tmp = wm0_high.lb_request_fifo_depth;
+	tmp |= wm1_high.lb_request_fifo_depth << 16;
+	WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
+
+	rv515_compute_mode_priority(rdev,
+				    &wm0_high, &wm1_high,
+				    mode0, mode1,
+				    &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
+	rv515_compute_mode_priority(rdev,
+				    &wm0_low, &wm1_low,
+				    mode0, mode1,
+				    &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
+
+	WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+	WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
+	WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+	WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
+}
+
+void rv515_bandwidth_update(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	/*
+	 * Set display0/1 priority up in the memory controller for
+	 * modes if the user specifies HIGH for displaypriority
+	 * option.
+	 */
+	if ((rdev->disp_priority == 2) &&
+	    (rdev->family == CHIP_RV515)) {
+		tmp = RREG32_MC(MC_MISC_LAT_TIMER);
+		tmp &= ~MC_DISP1R_INIT_LAT_MASK;
+		tmp &= ~MC_DISP0R_INIT_LAT_MASK;
+		if (mode1)
+			tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
+		if (mode0)
+			tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
+		WREG32_MC(MC_MISC_LAT_TIMER, tmp);
+	}
+	rv515_bandwidth_avivo_update(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
new file mode 100644
index 0000000..6927a20
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -0,0 +1,638 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV515D_H__
+#define __RV515D_H__
+
+/*
+ * RV515 registers
+ */
+#define PCIE_INDEX			0x0030
+#define PCIE_DATA			0x0034
+#define	MC_IND_INDEX			0x0070
+#define		MC_IND_WR_EN				(1 << 24)
+#define	MC_IND_DATA			0x0074
+#define	RBBM_SOFT_RESET			0x00F0
+#define	CONFIG_MEMSIZE			0x00F8
+#define HDP_FB_LOCATION			0x0134
+#define	CP_CSQ_CNTL			0x0740
+#define	CP_CSQ_MODE			0x0744
+#define	CP_CSQ_ADDR			0x07F0
+#define	CP_CSQ_DATA			0x07F4
+#define	CP_CSQ_STAT			0x07F8
+#define	CP_CSQ2_STAT			0x07FC
+#define	RBBM_STATUS			0x0E40
+#define	DST_PIPE_CONFIG			0x170C
+#define	WAIT_UNTIL			0x1720
+#define		WAIT_2D_IDLE				(1 << 14)
+#define		WAIT_3D_IDLE				(1 << 15)
+#define		WAIT_2D_IDLECLEAN			(1 << 16)
+#define		WAIT_3D_IDLECLEAN			(1 << 17)
+#define	ISYNC_CNTL			0x1724
+#define		ISYNC_ANY2D_IDLE3D			(1 << 0)
+#define		ISYNC_ANY3D_IDLE2D			(1 << 1)
+#define		ISYNC_TRIG2D_IDLE3D			(1 << 2)
+#define		ISYNC_TRIG3D_IDLE2D			(1 << 3)
+#define		ISYNC_WAIT_IDLEGUI			(1 << 4)
+#define		ISYNC_CPSCRATCH_IDLEGUI			(1 << 5)
+#define	VAP_INDEX_OFFSET		0x208C
+#define	VAP_PVS_STATE_FLUSH_REG		0x2284
+#define	GB_ENABLE			0x4008
+#define	GB_MSPOS0			0x4010
+#define		MS_X0_SHIFT				0
+#define		MS_Y0_SHIFT				4
+#define		MS_X1_SHIFT				8
+#define		MS_Y1_SHIFT				12
+#define		MS_X2_SHIFT				16
+#define		MS_Y2_SHIFT				20
+#define		MSBD0_Y_SHIFT				24
+#define		MSBD0_X_SHIFT				28
+#define	GB_MSPOS1			0x4014
+#define		MS_X3_SHIFT				0
+#define		MS_Y3_SHIFT				4
+#define		MS_X4_SHIFT				8
+#define		MS_Y4_SHIFT				12
+#define		MS_X5_SHIFT				16
+#define		MS_Y5_SHIFT				20
+#define		MSBD1_SHIFT				24
+#define GB_TILE_CONFIG			0x4018
+#define		ENABLE_TILING				(1 << 0)
+#define		PIPE_COUNT_MASK				0x0000000E
+#define		PIPE_COUNT_SHIFT			1
+#define		TILE_SIZE_8				(0 << 4)
+#define		TILE_SIZE_16				(1 << 4)
+#define		TILE_SIZE_32				(2 << 4)
+#define		SUBPIXEL_1_12				(0 << 16)
+#define		SUBPIXEL_1_16				(1 << 16)
+#define	GB_SELECT			0x401C
+#define	GB_AA_CONFIG			0x4020
+#define	GB_PIPE_SELECT			0x402C
+#define	GA_ENHANCE			0x4274
+#define		GA_DEADLOCK_CNTL			(1 << 0)
+#define		GA_FASTSYNC_CNTL			(1 << 1)
+#define	GA_POLY_MODE			0x4288
+#define		FRONT_PTYPE_POINT			(0 << 4)
+#define		FRONT_PTYPE_LINE			(1 << 4)
+#define		FRONT_PTYPE_TRIANGE			(2 << 4)
+#define		BACK_PTYPE_POINT			(0 << 7)
+#define		BACK_PTYPE_LINE				(1 << 7)
+#define		BACK_PTYPE_TRIANGE			(2 << 7)
+#define	GA_ROUND_MODE			0x428C
+#define		GEOMETRY_ROUND_TRUNC			(0 << 0)
+#define		GEOMETRY_ROUND_NEAREST			(1 << 0)
+#define		COLOR_ROUND_TRUNC			(0 << 2)
+#define		COLOR_ROUND_NEAREST			(1 << 2)
+#define	SU_REG_DEST			0x42C8
+#define	RB3D_DSTCACHE_CTLSTAT		0x4E4C
+#define		RB3D_DC_FLUSH				(2 << 0)
+#define		RB3D_DC_FREE				(2 << 2)
+#define		RB3D_DC_FINISH				(1 << 4)
+#define ZB_ZCACHE_CTLSTAT		0x4F18
+#define		ZC_FLUSH				(1 << 0)
+#define		ZC_FREE					(1 << 1)
+#define DC_LB_MEMORY_SPLIT		0x6520
+#define		DC_LB_MEMORY_SPLIT_MASK			0x00000003
+#define		DC_LB_MEMORY_SPLIT_SHIFT		0
+#define		DC_LB_MEMORY_SPLIT_D1HALF_D2HALF	0
+#define		DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q		1
+#define		DC_LB_MEMORY_SPLIT_D1_ONLY		2
+#define		DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q		3
+#define		DC_LB_MEMORY_SPLIT_SHIFT_MODE		(1 << 2)
+#define		DC_LB_DISP1_END_ADR_SHIFT		4
+#define		DC_LB_DISP1_END_ADR_MASK		0x00007FF0
+#define D1MODE_PRIORITY_A_CNT		0x6548
+#define		MODE_PRIORITY_MARK_MASK			0x00007FFF
+#define		MODE_PRIORITY_OFF			(1 << 16)
+#define		MODE_PRIORITY_ALWAYS_ON			(1 << 20)
+#define		MODE_PRIORITY_FORCE_MASK		(1 << 24)
+#define D1MODE_PRIORITY_B_CNT		0x654C
+#define LB_MAX_REQ_OUTSTANDING		0x6D58
+#define		LB_D1_MAX_REQ_OUTSTANDING_MASK		0x0000000F
+#define		LB_D1_MAX_REQ_OUTSTANDING_SHIFT		0
+#define		LB_D2_MAX_REQ_OUTSTANDING_MASK		0x000F0000
+#define		LB_D2_MAX_REQ_OUTSTANDING_SHIFT		16
+#define D2MODE_PRIORITY_A_CNT		0x6D48
+#define D2MODE_PRIORITY_B_CNT		0x6D4C
+
+/* ix[MC] registers */
+#define MC_FB_LOCATION			0x01
+#define		MC_FB_START_MASK			0x0000FFFF
+#define		MC_FB_START_SHIFT			0
+#define		MC_FB_TOP_MASK				0xFFFF0000
+#define		MC_FB_TOP_SHIFT				16
+#define MC_AGP_LOCATION			0x02
+#define		MC_AGP_START_MASK			0x0000FFFF
+#define		MC_AGP_START_SHIFT			0
+#define		MC_AGP_TOP_MASK				0xFFFF0000
+#define		MC_AGP_TOP_SHIFT			16
+#define MC_AGP_BASE			0x03
+#define MC_AGP_BASE_2			0x04
+#define	MC_CNTL				0x5
+#define		MEM_NUM_CHANNELS_MASK			0x00000003
+#define	MC_STATUS			0x08
+#define		MC_STATUS_IDLE				(1 << 4)
+#define	MC_MISC_LAT_TIMER		0x09
+#define		MC_CPR_INIT_LAT_MASK			0x0000000F
+#define		MC_VF_INIT_LAT_MASK			0x000000F0
+#define		MC_DISP0R_INIT_LAT_MASK			0x00000F00
+#define		MC_DISP0R_INIT_LAT_SHIFT		8
+#define		MC_DISP1R_INIT_LAT_MASK			0x0000F000
+#define		MC_DISP1R_INIT_LAT_SHIFT		12
+#define		MC_FIXED_INIT_LAT_MASK			0x000F0000
+#define		MC_E2R_INIT_LAT_MASK			0x00F00000
+#define		SAME_PAGE_PRIO_MASK			0x0F000000
+#define		MC_GLOBW_INIT_LAT_MASK			0xF0000000
+
+
+/*
+ * PM4 packet
+ */
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_000300_VGA_RENDER_CONTROL                  0x000300
+#define   S_000300_VGA_BLINK_RATE(x)                   (((x) & 0x1F) << 0)
+#define   G_000300_VGA_BLINK_RATE(x)                   (((x) >> 0) & 0x1F)
+#define   C_000300_VGA_BLINK_RATE                      0xFFFFFFE0
+#define   S_000300_VGA_BLINK_MODE(x)                   (((x) & 0x3) << 5)
+#define   G_000300_VGA_BLINK_MODE(x)                   (((x) >> 5) & 0x3)
+#define   C_000300_VGA_BLINK_MODE                      0xFFFFFF9F
+#define   S_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) & 0x1) << 7)
+#define   G_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) >> 7) & 0x1)
+#define   C_000300_VGA_CURSOR_BLINK_INVERT             0xFFFFFF7F
+#define   S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) & 0x1) << 8)
+#define   G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) >> 8) & 0x1)
+#define   C_000300_VGA_EXTD_ADDR_COUNT_ENABLE          0xFFFFFEFF
+#define   S_000300_VGA_VSTATUS_CNTL(x)                 (((x) & 0x3) << 16)
+#define   G_000300_VGA_VSTATUS_CNTL(x)                 (((x) >> 16) & 0x3)
+#define   C_000300_VGA_VSTATUS_CNTL                    0xFFFCFFFF
+#define   S_000300_VGA_LOCK_8DOT(x)                    (((x) & 0x1) << 24)
+#define   G_000300_VGA_LOCK_8DOT(x)                    (((x) >> 24) & 0x1)
+#define   C_000300_VGA_LOCK_8DOT                       0xFEFFFFFF
+#define   S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
+#define   G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
+#define   C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL    0xFDFFFFFF
+#define R_000310_VGA_MEMORY_BASE_ADDRESS             0x000310
+#define   S_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) & 0xFFFFFFFF) << 0)
+#define   G_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000310_VGA_MEMORY_BASE_ADDRESS             0x00000000
+#define R_000328_VGA_HDP_CONTROL                     0x000328
+#define   S_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) & 0x1) << 0)
+#define   G_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) >> 0) & 0x1)
+#define   C_000328_VGA_MEM_PAGE_SELECT_EN              0xFFFFFFFE
+#define   S_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) & 0x1) << 8)
+#define   G_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) >> 8) & 0x1)
+#define   C_000328_VGA_RBBM_LOCK_DISABLE               0xFFFFFEFF
+#define   S_000328_VGA_SOFT_RESET(x)                   (((x) & 0x1) << 16)
+#define   G_000328_VGA_SOFT_RESET(x)                   (((x) >> 16) & 0x1)
+#define   C_000328_VGA_SOFT_RESET                      0xFFFEFFFF
+#define   S_000328_VGA_TEST_RESET_CONTROL(x)           (((x) & 0x1) << 24)
+#define   G_000328_VGA_TEST_RESET_CONTROL(x)           (((x) >> 24) & 0x1)
+#define   C_000328_VGA_TEST_RESET_CONTROL              0xFEFFFFFF
+#define R_000330_D1VGA_CONTROL                       0x000330
+#define   S_000330_D1VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000330_D1VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000330_D1VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000330_D1VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000330_D1VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000330_D1VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000330_D1VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000330_D1VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000330_D1VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000330_D1VGA_ROTATE                        0xFCFFFFFF
+#define R_000338_D2VGA_CONTROL                       0x000338
+#define   S_000338_D2VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000338_D2VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000338_D2VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000338_D2VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000338_D2VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000338_D2VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000338_D2VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000338_D2VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000338_D2VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000338_D2VGA_ROTATE                        0xFCFFFFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006080_D1CRTC_CONTROL                      0x006080
+#define   S_006080_D1CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006080_D1CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006080_D1CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006080_D1CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006080_D1CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006080_D1CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0060E8_D1CRTC_UPDATE_LOCK                  0x0060E8
+#define   S_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0060E8_D1CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x006110
+#define   S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x006118
+#define   S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+#define R_006880_D2CRTC_CONTROL                      0x006880
+#define   S_006880_D2CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006880_D2CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006880_D2CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006880_D2CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006880_D2CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006880_D2CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0068E8_D2CRTC_UPDATE_LOCK                  0x0068E8
+#define   S_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0068E8_D2CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x006910
+#define   S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x006918
+#define   S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+
+
+#define R_000001_MC_FB_LOCATION                      0x000001
+#define   S_000001_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000001_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000001_MC_FB_START                         0xFFFF0000
+#define   S_000001_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000001_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000001_MC_FB_TOP                           0x0000FFFF
+#define R_000002_MC_AGP_LOCATION                     0x000002
+#define   S_000002_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000002_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000002_MC_AGP_START                        0xFFFF0000
+#define   S_000002_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000002_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000002_MC_AGP_TOP                          0x0000FFFF
+#define R_000003_MC_AGP_BASE                         0x000003
+#define   S_000003_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000003_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000003_AGP_BASE_ADDR                       0x00000000
+#define R_000004_MC_AGP_BASE_2                       0x000004
+#define   S_000004_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000004_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000004_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+
+#define R_00000F_CP_DYN_CNTL                         0x00000F
+#define   S_00000F_CP_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_00000F_CP_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_00000F_CP_FORCEON                          0xFFFFFFFE
+#define   S_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_00000F_CP_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_00000F_CP_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_00000F_CP_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_00000F_CP_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_00000F_CP_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_00000F_CP_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_00000F_CP_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_00000F_CP_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_00000F_CP_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_00000F_CP_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_00000F_CP_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_00000F_CP_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_00000F_CP_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_00000F_CP_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_00000F_CP_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_00000F_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_00000F_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_00000F_SPARE                               0xFF3FFFFF
+#define   S_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_00000F_CP_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000011_E2_DYN_CNTL                         0x000011
+#define   S_000011_E2_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_000011_E2_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_000011_E2_FORCEON                          0xFFFFFFFE
+#define   S_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_000011_E2_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_000011_E2_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_000011_E2_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_000011_E2_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_000011_E2_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_000011_E2_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_000011_E2_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_000011_E2_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_000011_E2_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_000011_E2_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_000011_E2_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_000011_E2_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_000011_E2_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_000011_E2_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_000011_E2_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_000011_E2_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_000011_E2_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_000011_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000011_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000011_SPARE                               0xFF3FFFFF
+#define   S_000011_E2_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_000011_E2_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_000011_E2_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000013_IDCT_DYN_CNTL                       0x000013
+#define   S_000013_IDCT_FORCEON(x)                     (((x) & 0x1) << 0)
+#define   G_000013_IDCT_FORCEON(x)                     (((x) >> 0) & 0x1)
+#define   C_000013_IDCT_FORCEON                        0xFFFFFFFE
+#define   S_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 1)
+#define   G_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 1) & 0x1)
+#define   C_000013_IDCT_MAX_DYN_STOP_LAT               0xFFFFFFFD
+#define   S_000013_IDCT_CLOCK_STATUS(x)                (((x) & 0x1) << 2)
+#define   G_000013_IDCT_CLOCK_STATUS(x)                (((x) >> 2) & 0x1)
+#define   C_000013_IDCT_CLOCK_STATUS                   0xFFFFFFFB
+#define   S_000013_IDCT_PROG_SHUTOFF(x)                (((x) & 0x1) << 3)
+#define   G_000013_IDCT_PROG_SHUTOFF(x)                (((x) >> 3) & 0x1)
+#define   C_000013_IDCT_PROG_SHUTOFF                   0xFFFFFFF7
+#define   S_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) & 0xFF) << 4)
+#define   G_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) >> 4) & 0xFF)
+#define   C_000013_IDCT_PROG_DELAY_VALUE               0xFFFFF00F
+#define   S_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) & 0xFF) << 12)
+#define   G_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) >> 12) & 0xFF)
+#define   C_000013_IDCT_LOWER_POWER_IDLE               0xFFF00FFF
+#define   S_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) & 0x1) << 20)
+#define   G_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) >> 20) & 0x1)
+#define   C_000013_IDCT_LOWER_POWER_IGNORE             0xFFEFFFFF
+#define   S_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) & 0x1) << 21)
+#define   G_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) >> 21) & 0x1)
+#define   C_000013_IDCT_NORMAL_POWER_IGNORE            0xFFDFFFFF
+#define   S_000013_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000013_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000013_SPARE                               0xFF3FFFFF
+#define   S_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) & 0xFF) << 24)
+#define   G_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) >> 24) & 0xFF)
+#define   C_000013_IDCT_NORMAL_POWER_BUSY              0x00FFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
new file mode 100644
index 0000000..6986051
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -0,0 +1,2160 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv6xxd.h"
+#include "r600_dpm.h"
+#include "rv6xx_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
+					u32 unscaled_count, u32 unit);
+
+static struct rv6xx_ps *rv6xx_get_ps(struct radeon_ps *rps)
+{
+	struct rv6xx_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static struct rv6xx_power_info *rv6xx_get_pi(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+static void rv6xx_force_pcie_gen1(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	tmp &= LC_GEN2_EN;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	tmp |= LC_INITIATE_LINK_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (!(RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE))
+			break;
+		udelay(1);
+	}
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	tmp &= ~LC_INITIATE_LINK_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+}
+
+static void rv6xx_enable_pcie_gen2_support(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+
+	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+		tmp |= LC_GEN2_EN;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+	}
+}
+
+static void rv6xx_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
+					       bool enable)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+	if (enable)
+		tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
+	else
+		tmp |= LC_HW_VOLTAGE_IF_CONTROL(0);
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+}
+
+static void rv6xx_enable_l0s(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
+	tmp |= LC_L0S_INACTIVITY(3);
+	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
+}
+
+static void rv6xx_enable_l1(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+	tmp &= ~LC_L1_INACTIVITY_MASK;
+	tmp |= LC_L1_INACTIVITY(4);
+	tmp &= ~LC_PMI_TO_L1_DIS;
+	tmp &= ~LC_ASPM_TO_L1_DIS;
+	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
+}
+
+static void rv6xx_enable_pll_sleep_in_l1(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
+	tmp |= LC_L1_INACTIVITY(8);
+	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
+
+	/* NOTE, this is a PCIE indirect reg, not PCIE PORT */
+	tmp = RREG32_PCIE(PCIE_P_CNTL);
+	tmp |= P_PLL_PWRDN_IN_L1L23;
+	tmp &= ~P_PLL_BUF_PDNB;
+	tmp &= ~P_PLL_PDNB;
+	tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
+	WREG32_PCIE(PCIE_P_CNTL, tmp);
+}
+
+static int rv6xx_convert_clock_to_stepping(struct radeon_device *rdev,
+					   u32 clock, struct rv6xx_sclk_stepping *step)
+{
+	int ret;
+	struct atom_clock_dividers dividers;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	if (dividers.enable_post_div)
+		step->post_divider = 2 + (dividers.post_div & 0xF) + (dividers.post_div >> 4);
+	else
+		step->post_divider = 1;
+
+	step->vco_frequency = clock * step->post_divider;
+
+	return 0;
+}
+
+static void rv6xx_output_stepping(struct radeon_device *rdev,
+				  u32 step_index, struct rv6xx_sclk_stepping *step)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	u32 ref_clk = rdev->clock.spll.reference_freq;
+	u32 fb_divider;
+	u32 spll_step_count = rv6xx_scale_count_given_unit(rdev,
+							   R600_SPLLSTEPTIME_DFLT *
+							   pi->spll_ref_div,
+							   R600_SPLLSTEPUNIT_DFLT);
+
+	r600_engine_clock_entry_enable(rdev, step_index, true);
+	r600_engine_clock_entry_enable_pulse_skipping(rdev, step_index, false);
+
+	if (step->post_divider == 1)
+		r600_engine_clock_entry_enable_post_divider(rdev, step_index, false);
+	else {
+		u32 lo_len = (step->post_divider - 2) / 2;
+		u32 hi_len = step->post_divider - 2 - lo_len;
+
+		r600_engine_clock_entry_enable_post_divider(rdev, step_index, true);
+		r600_engine_clock_entry_set_post_divider(rdev, step_index, (hi_len << 4) | lo_len);
+	}
+
+	fb_divider = ((step->vco_frequency * pi->spll_ref_div) / ref_clk) >>
+		pi->fb_div_scale;
+
+	r600_engine_clock_entry_set_reference_divider(rdev, step_index,
+						      pi->spll_ref_div - 1);
+	r600_engine_clock_entry_set_feedback_divider(rdev, step_index, fb_divider);
+	r600_engine_clock_entry_set_step_time(rdev, step_index, spll_step_count);
+
+}
+
+static struct rv6xx_sclk_stepping rv6xx_next_vco_step(struct radeon_device *rdev,
+						      struct rv6xx_sclk_stepping *cur,
+						      bool increasing_vco, u32 step_size)
+{
+	struct rv6xx_sclk_stepping next;
+
+	next.post_divider = cur->post_divider;
+
+	if (increasing_vco)
+		next.vco_frequency = (cur->vco_frequency * (100 + step_size)) / 100;
+	else
+		next.vco_frequency = (cur->vco_frequency * 100 + 99 + step_size) / (100 + step_size);
+
+	return next;
+}
+
+static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
+				    struct rv6xx_sclk_stepping *cur,
+				    struct rv6xx_sclk_stepping *target)
+{
+	return (cur->post_divider > target->post_divider) &&
+		((cur->vco_frequency * target->post_divider) <=
+		 (target->vco_frequency * (cur->post_divider - 1)));
+}
+
+static struct rv6xx_sclk_stepping rv6xx_next_post_div_step(struct radeon_device *rdev,
+							   struct rv6xx_sclk_stepping *cur,
+							   struct rv6xx_sclk_stepping *target)
+{
+	struct rv6xx_sclk_stepping next = *cur;
+
+	while (rv6xx_can_step_post_div(rdev, &next, target))
+		next.post_divider--;
+
+	return next;
+}
+
+static bool rv6xx_reached_stepping_target(struct radeon_device *rdev,
+					  struct rv6xx_sclk_stepping *cur,
+					  struct rv6xx_sclk_stepping *target,
+					  bool increasing_vco)
+{
+	return (increasing_vco && (cur->vco_frequency >= target->vco_frequency)) ||
+		(!increasing_vco && (cur->vco_frequency <= target->vco_frequency));
+}
+
+static void rv6xx_generate_steps(struct radeon_device *rdev,
+				 u32 low, u32 high,
+				 u32 start_index, u8 *end_index)
+{
+	struct rv6xx_sclk_stepping cur;
+	struct rv6xx_sclk_stepping target;
+	bool increasing_vco;
+	u32 step_index = start_index;
+
+	rv6xx_convert_clock_to_stepping(rdev, low, &cur);
+	rv6xx_convert_clock_to_stepping(rdev, high, &target);
+
+	rv6xx_output_stepping(rdev, step_index++, &cur);
+
+	increasing_vco = (target.vco_frequency >= cur.vco_frequency);
+
+	if (target.post_divider > cur.post_divider)
+		cur.post_divider = target.post_divider;
+
+	while (1) {
+		struct rv6xx_sclk_stepping next;
+
+		if (rv6xx_can_step_post_div(rdev, &cur, &target))
+			next = rv6xx_next_post_div_step(rdev, &cur, &target);
+		else
+			next = rv6xx_next_vco_step(rdev, &cur, increasing_vco, R600_VCOSTEPPCT_DFLT);
+
+		if (rv6xx_reached_stepping_target(rdev, &next, &target, increasing_vco)) {
+			struct rv6xx_sclk_stepping tiny =
+				rv6xx_next_vco_step(rdev, &target, !increasing_vco, R600_ENDINGVCOSTEPPCT_DFLT);
+			tiny.post_divider = next.post_divider;
+
+			if (!rv6xx_reached_stepping_target(rdev, &tiny, &cur, !increasing_vco))
+				rv6xx_output_stepping(rdev, step_index++, &tiny);
+
+			if ((next.post_divider != target.post_divider) &&
+			    (next.vco_frequency != target.vco_frequency)) {
+				struct rv6xx_sclk_stepping final_vco;
+
+				final_vco.vco_frequency = target.vco_frequency;
+				final_vco.post_divider = next.post_divider;
+
+				rv6xx_output_stepping(rdev, step_index++, &final_vco);
+			}
+
+			rv6xx_output_stepping(rdev, step_index++, &target);
+			break;
+		} else
+			rv6xx_output_stepping(rdev, step_index++, &next);
+
+		cur = next;
+	}
+
+	*end_index = (u8)step_index - 1;
+
+}
+
+static void rv6xx_generate_single_step(struct radeon_device *rdev,
+				       u32 clock, u32 index)
+{
+	struct rv6xx_sclk_stepping step;
+
+	rv6xx_convert_clock_to_stepping(rdev, clock, &step);
+	rv6xx_output_stepping(rdev, index, &step);
+}
+
+static void rv6xx_invalidate_intermediate_steps_range(struct radeon_device *rdev,
+						      u32 start_index, u32 end_index)
+{
+	u32 step_index;
+
+	for (step_index = start_index + 1; step_index < end_index; step_index++)
+		r600_engine_clock_entry_enable(rdev, step_index, false);
+}
+
+static void rv6xx_set_engine_spread_spectrum_clk_s(struct radeon_device *rdev,
+						   u32 index, u32 clk_s)
+{
+	WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
+		 CLKS(clk_s), ~CLKS_MASK);
+}
+
+static void rv6xx_set_engine_spread_spectrum_clk_v(struct radeon_device *rdev,
+						   u32 index, u32 clk_v)
+{
+	WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
+		 CLKV(clk_v), ~CLKV_MASK);
+}
+
+static void rv6xx_enable_engine_spread_spectrum(struct radeon_device *rdev,
+						u32 index, bool enable)
+{
+	if (enable)
+		WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
+			 SSEN, ~SSEN);
+	else
+		WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
+			 0, ~SSEN);
+}
+
+static void rv6xx_set_memory_spread_spectrum_clk_s(struct radeon_device *rdev,
+						   u32 clk_s)
+{
+	WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKS(clk_s), ~CLKS_MASK);
+}
+
+static void rv6xx_set_memory_spread_spectrum_clk_v(struct radeon_device *rdev,
+						   u32 clk_v)
+{
+	WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKV(clk_v), ~CLKV_MASK);
+}
+
+static void rv6xx_enable_memory_spread_spectrum(struct radeon_device *rdev,
+						bool enable)
+{
+	if (enable)
+		WREG32_P(CG_MPLL_SPREAD_SPECTRUM, SSEN, ~SSEN);
+	else
+		WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+}
+
+static void rv6xx_enable_dynamic_spread_spectrum(struct radeon_device *rdev,
+						 bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+}
+
+static void rv6xx_memory_clock_entry_enable_post_divider(struct radeon_device *rdev,
+							 u32 index, bool enable)
+{
+	if (enable)
+		WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
+			 LEVEL0_MPLL_DIV_EN, ~LEVEL0_MPLL_DIV_EN);
+	else
+		WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), 0, ~LEVEL0_MPLL_DIV_EN);
+}
+
+static void rv6xx_memory_clock_entry_set_post_divider(struct radeon_device *rdev,
+						      u32 index, u32 divider)
+{
+	WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
+		 LEVEL0_MPLL_POST_DIV(divider), ~LEVEL0_MPLL_POST_DIV_MASK);
+}
+
+static void rv6xx_memory_clock_entry_set_feedback_divider(struct radeon_device *rdev,
+							  u32 index, u32 divider)
+{
+	WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), LEVEL0_MPLL_FB_DIV(divider),
+		 ~LEVEL0_MPLL_FB_DIV_MASK);
+}
+
+static void rv6xx_memory_clock_entry_set_reference_divider(struct radeon_device *rdev,
+							   u32 index, u32 divider)
+{
+	WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
+		 LEVEL0_MPLL_REF_DIV(divider), ~LEVEL0_MPLL_REF_DIV_MASK);
+}
+
+static void rv6xx_vid_response_set_brt(struct radeon_device *rdev, u32 rt)
+{
+	WREG32_P(VID_RT, BRT(rt), ~BRT_MASK);
+}
+
+static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device *rdev)
+{
+	WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
+}
+
+static u32 rv6xx_clocks_per_unit(u32 unit)
+{
+	u32 tmp = 1 << (2 * unit);
+
+	return tmp;
+}
+
+static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
+					u32 unscaled_count, u32 unit)
+{
+	u32 count_per_unit = rv6xx_clocks_per_unit(unit);
+
+	return (unscaled_count + count_per_unit - 1) / count_per_unit;
+}
+
+static u32 rv6xx_compute_count_for_delay(struct radeon_device *rdev,
+					 u32 delay_us, u32 unit)
+{
+	u32 ref_clk = rdev->clock.spll.reference_freq;
+
+	return rv6xx_scale_count_given_unit(rdev, delay_us * (ref_clk / 100), unit);
+}
+
+static void rv6xx_calculate_engine_speed_stepping_parameters(struct radeon_device *rdev,
+							     struct rv6xx_ps *state)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	pi->hw.sclks[R600_POWER_LEVEL_LOW] =
+		state->low.sclk;
+	pi->hw.sclks[R600_POWER_LEVEL_MEDIUM] =
+		state->medium.sclk;
+	pi->hw.sclks[R600_POWER_LEVEL_HIGH] =
+		state->high.sclk;
+
+	pi->hw.low_sclk_index = R600_POWER_LEVEL_LOW;
+	pi->hw.medium_sclk_index = R600_POWER_LEVEL_MEDIUM;
+	pi->hw.high_sclk_index = R600_POWER_LEVEL_HIGH;
+}
+
+static void rv6xx_calculate_memory_clock_stepping_parameters(struct radeon_device *rdev,
+							     struct rv6xx_ps *state)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	pi->hw.mclks[R600_POWER_LEVEL_CTXSW] =
+		state->high.mclk;
+	pi->hw.mclks[R600_POWER_LEVEL_HIGH] =
+		state->high.mclk;
+	pi->hw.mclks[R600_POWER_LEVEL_MEDIUM] =
+		state->medium.mclk;
+	pi->hw.mclks[R600_POWER_LEVEL_LOW] =
+		state->low.mclk;
+
+	pi->hw.high_mclk_index = R600_POWER_LEVEL_HIGH;
+
+	if (state->high.mclk == state->medium.mclk)
+		pi->hw.medium_mclk_index =
+			pi->hw.high_mclk_index;
+	else
+		pi->hw.medium_mclk_index = R600_POWER_LEVEL_MEDIUM;
+
+
+	if (state->medium.mclk == state->low.mclk)
+		pi->hw.low_mclk_index =
+			pi->hw.medium_mclk_index;
+	else
+		pi->hw.low_mclk_index = R600_POWER_LEVEL_LOW;
+}
+
+static void rv6xx_calculate_voltage_stepping_parameters(struct radeon_device *rdev,
+							struct rv6xx_ps *state)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	pi->hw.vddc[R600_POWER_LEVEL_CTXSW] = state->high.vddc;
+	pi->hw.vddc[R600_POWER_LEVEL_HIGH] = state->high.vddc;
+	pi->hw.vddc[R600_POWER_LEVEL_MEDIUM] = state->medium.vddc;
+	pi->hw.vddc[R600_POWER_LEVEL_LOW] = state->low.vddc;
+
+	pi->hw.backbias[R600_POWER_LEVEL_CTXSW] =
+		(state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
+	pi->hw.backbias[R600_POWER_LEVEL_HIGH] =
+		(state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
+	pi->hw.backbias[R600_POWER_LEVEL_MEDIUM] =
+		(state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
+	pi->hw.backbias[R600_POWER_LEVEL_LOW] =
+		(state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
+
+	pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH] =
+		(state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
+	pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM] =
+		(state->medium.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
+	pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW] =
+		(state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
+
+	pi->hw.high_vddc_index = R600_POWER_LEVEL_HIGH;
+
+	if ((state->high.vddc == state->medium.vddc) &&
+	    ((state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
+	     (state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
+		pi->hw.medium_vddc_index =
+			pi->hw.high_vddc_index;
+	else
+		pi->hw.medium_vddc_index = R600_POWER_LEVEL_MEDIUM;
+
+	if ((state->medium.vddc == state->low.vddc) &&
+	    ((state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
+	     (state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
+		pi->hw.low_vddc_index =
+			pi->hw.medium_vddc_index;
+	else
+		pi->hw.medium_vddc_index = R600_POWER_LEVEL_LOW;
+}
+
+static inline u32 rv6xx_calculate_vco_frequency(u32 ref_clock,
+						struct atom_clock_dividers *dividers,
+						u32 fb_divider_scale)
+{
+	return ref_clock * ((dividers->fb_div & ~1) << fb_divider_scale) /
+		(dividers->ref_div + 1);
+}
+
+static inline u32 rv6xx_calculate_spread_spectrum_clk_v(u32 vco_freq, u32 ref_freq,
+							u32 ss_rate, u32 ss_percent,
+							u32 fb_divider_scale)
+{
+	u32 fb_divider = vco_freq / ref_freq;
+
+	return (ss_percent * ss_rate * 4 * (fb_divider * fb_divider) /
+		(5375 * ((vco_freq * 10) / (4096 >> fb_divider_scale))));
+}
+
+static inline u32 rv6xx_calculate_spread_spectrum_clk_s(u32 ss_rate, u32 ref_freq)
+{
+	return (((ref_freq * 10) / (ss_rate * 2)) - 1) / 4;
+}
+
+static void rv6xx_program_engine_spread_spectrum(struct radeon_device *rdev,
+						 u32 clock, enum r600_power_level level)
+{
+	u32 ref_clk = rdev->clock.spll.reference_freq;
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	struct radeon_atom_ss ss;
+	u32 vco_freq, clk_v, clk_s;
+
+	rv6xx_enable_engine_spread_spectrum(rdev, level, false);
+
+	if (clock && pi->sclk_ss) {
+		if (radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, clock, false, &dividers) == 0) {
+			vco_freq = rv6xx_calculate_vco_frequency(ref_clk, &dividers,
+								 pi->fb_div_scale);
+
+			if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+							     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+				clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
+									      (ref_clk / (dividers.ref_div + 1)),
+									      ss.rate,
+									      ss.percentage,
+									      pi->fb_div_scale);
+
+				clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
+									      (ref_clk / (dividers.ref_div + 1)));
+
+				rv6xx_set_engine_spread_spectrum_clk_v(rdev, level, clk_v);
+				rv6xx_set_engine_spread_spectrum_clk_s(rdev, level, clk_s);
+				rv6xx_enable_engine_spread_spectrum(rdev, level, true);
+			}
+		}
+	}
+}
+
+static void rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_program_engine_spread_spectrum(rdev,
+					     pi->hw.sclks[R600_POWER_LEVEL_HIGH],
+					     R600_POWER_LEVEL_HIGH);
+
+	rv6xx_program_engine_spread_spectrum(rdev,
+					     pi->hw.sclks[R600_POWER_LEVEL_MEDIUM],
+					     R600_POWER_LEVEL_MEDIUM);
+
+}
+
+static int rv6xx_program_mclk_stepping_entry(struct radeon_device *rdev,
+					     u32 entry, u32 clock)
+{
+	struct atom_clock_dividers dividers;
+
+	if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, clock, false, &dividers))
+	    return -EINVAL;
+
+
+	rv6xx_memory_clock_entry_set_reference_divider(rdev, entry, dividers.ref_div);
+	rv6xx_memory_clock_entry_set_feedback_divider(rdev, entry, dividers.fb_div);
+	rv6xx_memory_clock_entry_set_post_divider(rdev, entry, dividers.post_div);
+
+	if (dividers.enable_post_div)
+		rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, true);
+	else
+		rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, false);
+
+	return 0;
+}
+
+static void rv6xx_program_mclk_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	int i;
+
+	for (i = 1; i < R600_PM_NUMBER_OF_MCLKS; i++) {
+		if (pi->hw.mclks[i])
+			rv6xx_program_mclk_stepping_entry(rdev, i,
+							  pi->hw.mclks[i]);
+	}
+}
+
+static void rv6xx_find_memory_clock_with_highest_vco(struct radeon_device *rdev,
+						     u32 requested_memory_clock,
+						     u32 ref_clk,
+						     struct atom_clock_dividers *dividers,
+						     u32 *vco_freq)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	struct atom_clock_dividers req_dividers;
+	u32 vco_freq_temp;
+
+	if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
+					   requested_memory_clock, false, &req_dividers) == 0) {
+		vco_freq_temp = rv6xx_calculate_vco_frequency(ref_clk, &req_dividers,
+							      pi->fb_div_scale);
+
+		if (vco_freq_temp > *vco_freq) {
+			*dividers = req_dividers;
+			*vco_freq = vco_freq_temp;
+		}
+	}
+}
+
+static void rv6xx_program_mclk_spread_spectrum_parameters(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	u32 ref_clk = rdev->clock.mpll.reference_freq;
+	struct atom_clock_dividers dividers;
+	struct radeon_atom_ss ss;
+	u32 vco_freq = 0, clk_v, clk_s;
+
+	rv6xx_enable_memory_spread_spectrum(rdev, false);
+
+	if (pi->mclk_ss) {
+		rv6xx_find_memory_clock_with_highest_vco(rdev,
+							 pi->hw.mclks[pi->hw.high_mclk_index],
+							 ref_clk,
+							 &dividers,
+							 &vco_freq);
+
+		rv6xx_find_memory_clock_with_highest_vco(rdev,
+							 pi->hw.mclks[pi->hw.medium_mclk_index],
+							 ref_clk,
+							 &dividers,
+							 &vco_freq);
+
+		rv6xx_find_memory_clock_with_highest_vco(rdev,
+							 pi->hw.mclks[pi->hw.low_mclk_index],
+							 ref_clk,
+							 &dividers,
+							 &vco_freq);
+
+		if (vco_freq) {
+			if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+							     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+				clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
+									     (ref_clk / (dividers.ref_div + 1)),
+									     ss.rate,
+									     ss.percentage,
+									     pi->fb_div_scale);
+
+				clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
+									     (ref_clk / (dividers.ref_div + 1)));
+
+				rv6xx_set_memory_spread_spectrum_clk_v(rdev, clk_v);
+				rv6xx_set_memory_spread_spectrum_clk_s(rdev, clk_s);
+				rv6xx_enable_memory_spread_spectrum(rdev, true);
+			}
+		}
+	}
+}
+
+static int rv6xx_program_voltage_stepping_entry(struct radeon_device *rdev,
+						u32 entry, u16 voltage)
+{
+	u32 mask, set_pins;
+	int ret;
+
+	ret = radeon_atom_get_voltage_gpio_settings(rdev, voltage,
+						    SET_VOLTAGE_TYPE_ASIC_VDDC,
+						    &set_pins, &mask);
+	if (ret)
+		return ret;
+
+	r600_voltage_control_program_voltages(rdev, entry, set_pins);
+
+	return 0;
+}
+
+static void rv6xx_program_voltage_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	int i;
+
+	for (i = 1; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++)
+		rv6xx_program_voltage_stepping_entry(rdev, i,
+						     pi->hw.vddc[i]);
+
+}
+
+static void rv6xx_program_backbias_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if (pi->hw.backbias[1])
+		WREG32_P(VID_UPPER_GPIO_CNTL, MEDIUM_BACKBIAS_VALUE, ~MEDIUM_BACKBIAS_VALUE);
+	else
+		WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~MEDIUM_BACKBIAS_VALUE);
+
+	if (pi->hw.backbias[2])
+		WREG32_P(VID_UPPER_GPIO_CNTL, HIGH_BACKBIAS_VALUE, ~HIGH_BACKBIAS_VALUE);
+	else
+		WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~HIGH_BACKBIAS_VALUE);
+}
+
+static void rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_program_engine_spread_spectrum(rdev,
+					     pi->hw.sclks[R600_POWER_LEVEL_LOW],
+					     R600_POWER_LEVEL_LOW);
+}
+
+static void rv6xx_program_mclk_stepping_parameters_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if (pi->hw.mclks[0])
+		rv6xx_program_mclk_stepping_entry(rdev, 0,
+						  pi->hw.mclks[0]);
+}
+
+static void rv6xx_program_voltage_stepping_parameters_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_program_voltage_stepping_entry(rdev, 0,
+					     pi->hw.vddc[0]);
+
+}
+
+static void rv6xx_program_backbias_stepping_parameters_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if (pi->hw.backbias[0])
+		WREG32_P(VID_UPPER_GPIO_CNTL, LOW_BACKBIAS_VALUE, ~LOW_BACKBIAS_VALUE);
+	else
+		WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~LOW_BACKBIAS_VALUE);
+}
+
+static u32 calculate_memory_refresh_rate(struct radeon_device *rdev,
+					 u32 engine_clock)
+{
+	u32 dram_rows, dram_refresh_rate;
+	u32 tmp;
+
+	tmp = (RREG32(RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+	dram_rows = 1 << (tmp + 10);
+	dram_refresh_rate = 1 << ((RREG32(MC_SEQ_RESERVE_M) & 0x3) + 3);
+
+	return ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+}
+
+static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	u32 sqm_ratio;
+	u32 arb_refresh_rate;
+	u32 high_clock;
+
+	if (pi->hw.sclks[R600_POWER_LEVEL_HIGH] <
+	    (pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40))
+		high_clock = pi->hw.sclks[R600_POWER_LEVEL_HIGH];
+	else
+		high_clock =
+			pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40;
+
+	radeon_atom_set_engine_dram_timings(rdev, high_clock, 0);
+
+	sqm_ratio = (STATE0(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_LOW]) |
+		     STATE1(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_MEDIUM]) |
+		     STATE2(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]) |
+		     STATE3(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]));
+	WREG32(SQM_RATIO, sqm_ratio);
+
+	arb_refresh_rate =
+		(POWERMODE0(calculate_memory_refresh_rate(rdev,
+							  pi->hw.sclks[R600_POWER_LEVEL_LOW])) |
+		 POWERMODE1(calculate_memory_refresh_rate(rdev,
+							  pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
+		 POWERMODE2(calculate_memory_refresh_rate(rdev,
+							  pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
+		 POWERMODE3(calculate_memory_refresh_rate(rdev,
+							  pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
+	WREG32(ARB_RFSH_RATE, arb_refresh_rate);
+}
+
+static void rv6xx_program_mpll_timing_parameters(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	r600_set_mpll_lock_time(rdev, R600_MPLLLOCKTIME_DFLT *
+				pi->mpll_ref_div);
+	r600_set_mpll_reset_time(rdev, R600_MPLLRESETTIME_DFLT);
+}
+
+static void rv6xx_program_bsp(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	u32 ref_clk = rdev->clock.spll.reference_freq;
+
+	r600_calculate_u_and_p(R600_ASI_DFLT,
+			       ref_clk, 16,
+			       &pi->bsp,
+			       &pi->bsu);
+
+	r600_set_bsp(rdev, pi->bsu, pi->bsp);
+}
+
+static void rv6xx_program_at(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	r600_set_at(rdev,
+		    (pi->hw.rp[0] * pi->bsp) / 200,
+		    (pi->hw.rp[1] * pi->bsp) / 200,
+		    (pi->hw.lp[2] * pi->bsp) / 200,
+		    (pi->hw.lp[1] * pi->bsp) / 200);
+}
+
+static void rv6xx_program_git(struct radeon_device *rdev)
+{
+	r600_set_git(rdev, R600_GICST_DFLT);
+}
+
+static void rv6xx_program_tp(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+		r600_set_tc(rdev, i, r600_utc[i], r600_dtc[i]);
+
+	r600_select_td(rdev, R600_TD_DFLT);
+}
+
+static void rv6xx_program_vc(struct radeon_device *rdev)
+{
+	r600_set_vrc(rdev, R600_VRC_DFLT);
+}
+
+static void rv6xx_clear_vc(struct radeon_device *rdev)
+{
+	r600_set_vrc(rdev, 0);
+}
+
+static void rv6xx_program_tpp(struct radeon_device *rdev)
+{
+	r600_set_tpu(rdev, R600_TPU_DFLT);
+	r600_set_tpc(rdev, R600_TPC_DFLT);
+}
+
+static void rv6xx_program_sstp(struct radeon_device *rdev)
+{
+	r600_set_sstu(rdev, R600_SSTU_DFLT);
+	r600_set_sst(rdev, R600_SST_DFLT);
+}
+
+static void rv6xx_program_fcp(struct radeon_device *rdev)
+{
+	r600_set_fctu(rdev, R600_FCTU_DFLT);
+	r600_set_fct(rdev, R600_FCT_DFLT);
+}
+
+static void rv6xx_program_vddc3d_parameters(struct radeon_device *rdev)
+{
+	r600_set_vddc3d_oorsu(rdev, R600_VDDC3DOORSU_DFLT);
+	r600_set_vddc3d_oorphc(rdev, R600_VDDC3DOORPHC_DFLT);
+	r600_set_vddc3d_oorsdc(rdev, R600_VDDC3DOORSDC_DFLT);
+	r600_set_ctxcgtt3d_rphc(rdev, R600_CTXCGTT3DRPHC_DFLT);
+	r600_set_ctxcgtt3d_rsdc(rdev, R600_CTXCGTT3DRSDC_DFLT);
+}
+
+static void rv6xx_program_voltage_timing_parameters(struct radeon_device *rdev)
+{
+	u32 rt;
+
+	r600_vid_rt_set_vru(rdev, R600_VRU_DFLT);
+
+	r600_vid_rt_set_vrt(rdev,
+			    rv6xx_compute_count_for_delay(rdev,
+							  rdev->pm.dpm.voltage_response_time,
+							  R600_VRU_DFLT));
+
+	rt = rv6xx_compute_count_for_delay(rdev,
+					   rdev->pm.dpm.backbias_response_time,
+					   R600_VRU_DFLT);
+
+	rv6xx_vid_response_set_brt(rdev, (rt + 0x1F) >> 5);
+}
+
+static void rv6xx_program_engine_speed_parameters(struct radeon_device *rdev)
+{
+	r600_vid_rt_set_ssu(rdev, R600_SPLLSTEPUNIT_DFLT);
+	rv6xx_enable_engine_feedback_and_reference_sync(rdev);
+}
+
+static u64 rv6xx_get_master_voltage_mask(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	u64 master_mask = 0;
+	int i;
+
+	for (i = 0; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++) {
+		u32 tmp_mask, tmp_set_pins;
+		int ret;
+
+		ret = radeon_atom_get_voltage_gpio_settings(rdev,
+							    pi->hw.vddc[i],
+							    SET_VOLTAGE_TYPE_ASIC_VDDC,
+							    &tmp_set_pins, &tmp_mask);
+
+		if (ret == 0)
+			master_mask |= tmp_mask;
+	}
+
+	return master_mask;
+}
+
+static void rv6xx_program_voltage_gpio_pins(struct radeon_device *rdev)
+{
+	r600_voltage_control_enable_pins(rdev,
+					 rv6xx_get_master_voltage_mask(rdev));
+}
+
+static void rv6xx_enable_static_voltage_control(struct radeon_device *rdev,
+						struct radeon_ps *new_ps,
+						bool enable)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+
+	if (enable)
+		radeon_atom_set_voltage(rdev,
+					new_state->low.vddc,
+					SET_VOLTAGE_TYPE_ASIC_VDDC);
+	else
+		r600_voltage_control_deactivate_static_control(rdev,
+							       rv6xx_get_master_voltage_mask(rdev));
+}
+
+static void rv6xx_enable_display_gap(struct radeon_device *rdev, bool enable)
+{
+	if (enable) {
+		u32 tmp = (DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
+			   DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
+			   DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
+			   DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
+			   VBI_TIMER_COUNT(0x3FFF) |
+			   VBI_TIMER_UNIT(7));
+		WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+		WREG32_P(MCLK_PWRMGT_CNTL, USE_DISPLAY_GAP, ~USE_DISPLAY_GAP);
+	} else
+		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~USE_DISPLAY_GAP);
+}
+
+static void rv6xx_program_power_level_enter_state(struct radeon_device *rdev)
+{
+	r600_power_level_set_enter_index(rdev, R600_POWER_LEVEL_MEDIUM);
+}
+
+static void rv6xx_calculate_t(u32 l_f, u32 h_f, int h,
+			      int d_l, int d_r, u8 *l, u8 *r)
+{
+	int a_n, a_d, h_r, l_r;
+
+	h_r = d_l;
+	l_r = 100 - d_r;
+
+	a_n = (int)h_f * d_l + (int)l_f * (h - d_r);
+	a_d = (int)l_f * l_r + (int)h_f * h_r;
+
+	if (a_d != 0) {
+		*l = d_l - h_r * a_n / a_d;
+		*r = d_r + l_r * a_n / a_d;
+	}
+}
+
+static void rv6xx_calculate_ap(struct radeon_device *rdev,
+			       struct rv6xx_ps *state)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	pi->hw.lp[0] = 0;
+	pi->hw.rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS - 1]
+		= 100;
+
+	rv6xx_calculate_t(state->low.sclk,
+			  state->medium.sclk,
+			  R600_AH_DFLT,
+			  R600_LMP_DFLT,
+			  R600_RLP_DFLT,
+			  &pi->hw.lp[1],
+			  &pi->hw.rp[0]);
+
+	rv6xx_calculate_t(state->medium.sclk,
+			  state->high.sclk,
+			  R600_AH_DFLT,
+			  R600_LHP_DFLT,
+			  R600_RMP_DFLT,
+			  &pi->hw.lp[2],
+			  &pi->hw.rp[1]);
+
+}
+
+static void rv6xx_calculate_stepping_parameters(struct radeon_device *rdev,
+						struct radeon_ps *new_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+
+	rv6xx_calculate_engine_speed_stepping_parameters(rdev, new_state);
+	rv6xx_calculate_memory_clock_stepping_parameters(rdev, new_state);
+	rv6xx_calculate_voltage_stepping_parameters(rdev, new_state);
+	rv6xx_calculate_ap(rdev, new_state);
+}
+
+static void rv6xx_program_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_program_mclk_stepping_parameters_except_lowest_entry(rdev);
+	if (pi->voltage_control)
+		rv6xx_program_voltage_stepping_parameters_except_lowest_entry(rdev);
+	rv6xx_program_backbias_stepping_parameters_except_lowest_entry(rdev);
+	rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(rdev);
+	rv6xx_program_mclk_spread_spectrum_parameters(rdev);
+	rv6xx_program_memory_timing_parameters(rdev);
+}
+
+static void rv6xx_program_stepping_parameters_lowest_entry(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_program_mclk_stepping_parameters_lowest_entry(rdev);
+	if (pi->voltage_control)
+		rv6xx_program_voltage_stepping_parameters_lowest_entry(rdev);
+	rv6xx_program_backbias_stepping_parameters_lowest_entry(rdev);
+	rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(rdev);
+}
+
+static void rv6xx_program_power_level_low(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW,
+					   pi->hw.low_vddc_index);
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW,
+					     pi->hw.low_mclk_index);
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW,
+					     pi->hw.low_sclk_index);
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
+					  R600_DISPLAY_WATERMARK_LOW);
+	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
+				       pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
+}
+
+static void rv6xx_program_power_level_low_to_lowest_state(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW, 0);
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
+
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
+					  R600_DISPLAY_WATERMARK_LOW);
+
+	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
+				       pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
+
+}
+
+static void rv6xx_program_power_level_medium(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM,
+					  pi->hw.medium_vddc_index);
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
+					    pi->hw.medium_mclk_index);
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
+					    pi->hw.medium_sclk_index);
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
+					 R600_DISPLAY_WATERMARK_LOW);
+	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
+				      pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM]);
+}
+
+static void rv6xx_program_power_level_medium_for_transition(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_program_mclk_stepping_entry(rdev,
+					  R600_POWER_LEVEL_CTXSW,
+					  pi->hw.mclks[pi->hw.low_mclk_index]);
+
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM, 1);
+
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
+					     R600_POWER_LEVEL_CTXSW);
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
+					     pi->hw.medium_sclk_index);
+
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
+					  R600_DISPLAY_WATERMARK_LOW);
+
+	rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
+
+	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
+				       pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
+}
+
+static void rv6xx_program_power_level_high(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_HIGH,
+					   pi->hw.high_vddc_index);
+	r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_HIGH,
+					     pi->hw.high_mclk_index);
+	r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_HIGH,
+					     pi->hw.high_sclk_index);
+
+	r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_HIGH,
+					  R600_DISPLAY_WATERMARK_HIGH);
+
+	r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_HIGH,
+				       pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH]);
+}
+
+static void rv6xx_enable_backbias(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL,
+			 ~(BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
+	else
+		WREG32_P(GENERAL_PWRMGT, 0,
+			 ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
+}
+
+static void rv6xx_program_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+	if (rdev->pm.dpm.new_active_crtcs & 1) {
+		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
+		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+	} else if (rdev->pm.dpm.new_active_crtcs & 2) {
+		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
+	} else {
+		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+	}
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void rv6xx_set_sw_voltage_to_safe(struct radeon_device *rdev,
+					 struct radeon_ps *new_ps,
+					 struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
+	u16 safe_voltage;
+
+	safe_voltage = (new_state->low.vddc >= old_state->low.vddc) ?
+		new_state->low.vddc : old_state->low.vddc;
+
+	rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
+					     safe_voltage);
+
+	WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
+		 ~SW_GPIO_INDEX_MASK);
+}
+
+static void rv6xx_set_sw_voltage_to_low(struct radeon_device *rdev,
+					struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
+
+	rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
+					     old_state->low.vddc);
+
+	WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
+		~SW_GPIO_INDEX_MASK);
+}
+
+static void rv6xx_set_safe_backbias(struct radeon_device *rdev,
+				    struct radeon_ps *new_ps,
+				    struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
+
+	if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) &&
+	    (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE))
+		WREG32_P(GENERAL_PWRMGT, BACKBIAS_VALUE, ~BACKBIAS_VALUE);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_VALUE);
+}
+
+static void rv6xx_set_safe_pcie_gen2(struct radeon_device *rdev,
+				     struct radeon_ps *new_ps,
+				     struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
+
+	if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) !=
+	    (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
+		rv6xx_force_pcie_gen1(rdev);
+}
+
+static void rv6xx_enable_dynamic_voltage_control(struct radeon_device *rdev,
+						 bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static void rv6xx_enable_dynamic_backbias_control(struct radeon_device *rdev,
+						  bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, BACKBIAS_DPM_CNTL, ~BACKBIAS_DPM_CNTL);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_DPM_CNTL);
+}
+
+static int rv6xx_step_sw_voltage(struct radeon_device *rdev,
+				 u16 initial_voltage,
+				 u16 target_voltage)
+{
+	u16 current_voltage;
+	u16 true_target_voltage;
+	u16 voltage_step;
+	int signed_voltage_step;
+
+	if ((radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					  &voltage_step)) ||
+	    (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					       initial_voltage, &current_voltage)) ||
+	    (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					       target_voltage, &true_target_voltage)))
+		return -EINVAL;
+
+	if (true_target_voltage < current_voltage)
+		signed_voltage_step = -(int)voltage_step;
+	else
+		signed_voltage_step = voltage_step;
+
+	while (current_voltage != true_target_voltage) {
+		current_voltage += signed_voltage_step;
+		rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
+						     current_voltage);
+		msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
+	}
+
+	return 0;
+}
+
+static int rv6xx_step_voltage_if_increasing(struct radeon_device *rdev,
+					    struct radeon_ps *new_ps,
+					    struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
+
+	if (new_state->low.vddc > old_state->low.vddc)
+		return rv6xx_step_sw_voltage(rdev,
+					     old_state->low.vddc,
+					     new_state->low.vddc);
+
+	return 0;
+}
+
+static int rv6xx_step_voltage_if_decreasing(struct radeon_device *rdev,
+					    struct radeon_ps *new_ps,
+					    struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
+
+	if (new_state->low.vddc < old_state->low.vddc)
+		return rv6xx_step_sw_voltage(rdev,
+					     old_state->low.vddc,
+					     new_state->low.vddc);
+	else
+		return 0;
+}
+
+static void rv6xx_enable_high(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if ((pi->restricted_levels < 1) ||
+	    (pi->restricted_levels == 3))
+		r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
+}
+
+static void rv6xx_enable_medium(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if (pi->restricted_levels < 2)
+		r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
+}
+
+static void rv6xx_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	bool want_thermal_protection;
+	enum radeon_dpm_event_src dpm_event_src;
+
+	switch (sources) {
+	case 0:
+	default:
+		want_thermal_protection = false;
+		break;
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
+		break;
+
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
+		break;
+
+	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+			want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+		break;
+	}
+
+	if (want_thermal_protection) {
+		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+		if (pi->thermal_protection)
+			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	} else {
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+	}
+}
+
+static void rv6xx_enable_auto_throttle_source(struct radeon_device *rdev,
+					      enum radeon_dpm_auto_throttle_src source,
+					      bool enable)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if (enable) {
+		if (!(pi->active_auto_throttle_sources & (1 << source))) {
+			pi->active_auto_throttle_sources |= 1 << source;
+			rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	} else {
+		if (pi->active_auto_throttle_sources & (1 << source)) {
+			pi->active_auto_throttle_sources &= ~(1 << source);
+			rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	}
+}
+
+
+static void rv6xx_enable_thermal_protection(struct radeon_device *rdev,
+					    bool enable)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if (pi->active_auto_throttle_sources)
+		r600_enable_thermal_protection(rdev, enable);
+}
+
+static void rv6xx_generate_transition_stepping(struct radeon_device *rdev,
+					       struct radeon_ps *new_ps,
+					       struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_generate_steps(rdev,
+			     old_state->low.sclk,
+			     new_state->low.sclk,
+			     0, &pi->hw.medium_sclk_index);
+}
+
+static void rv6xx_generate_low_step(struct radeon_device *rdev,
+				    struct radeon_ps *new_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	pi->hw.low_sclk_index = 0;
+	rv6xx_generate_single_step(rdev,
+				   new_state->low.sclk,
+				   0);
+}
+
+static void rv6xx_invalidate_intermediate_steps(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	rv6xx_invalidate_intermediate_steps_range(rdev, 0,
+						  pi->hw.medium_sclk_index);
+}
+
+static void rv6xx_generate_stepping_table(struct radeon_device *rdev,
+					  struct radeon_ps *new_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	pi->hw.low_sclk_index = 0;
+
+	rv6xx_generate_steps(rdev,
+			     new_state->low.sclk,
+			     new_state->medium.sclk,
+			     0,
+			     &pi->hw.medium_sclk_index);
+	rv6xx_generate_steps(rdev,
+			     new_state->medium.sclk,
+			     new_state->high.sclk,
+			     pi->hw.medium_sclk_index,
+			     &pi->hw.high_sclk_index);
+}
+
+static void rv6xx_enable_spread_spectrum(struct radeon_device *rdev,
+					 bool enable)
+{
+	if (enable)
+		rv6xx_enable_dynamic_spread_spectrum(rdev, true);
+	else {
+		rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_LOW, false);
+		rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
+		rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_HIGH, false);
+		rv6xx_enable_dynamic_spread_spectrum(rdev, false);
+		rv6xx_enable_memory_spread_spectrum(rdev, false);
+	}
+}
+
+static void rv6xx_reset_lvtm_data_sync(struct radeon_device *rdev)
+{
+	if (ASIC_IS_DCE3(rdev))
+		WREG32_P(DCE3_LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
+	else
+		WREG32_P(LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
+}
+
+static void rv6xx_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
+					   struct radeon_ps *new_ps,
+					   bool enable)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+
+	if (enable) {
+		rv6xx_enable_bif_dynamic_pcie_gen2(rdev, true);
+		rv6xx_enable_pcie_gen2_support(rdev);
+		r600_enable_dynamic_pcie_gen2(rdev, true);
+	} else {
+		if (!(new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
+			rv6xx_force_pcie_gen1(rdev);
+		rv6xx_enable_bif_dynamic_pcie_gen2(rdev, false);
+		r600_enable_dynamic_pcie_gen2(rdev, false);
+	}
+}
+
+static void rv6xx_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+						     struct radeon_ps *new_ps,
+						     struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->high.sclk >= current_state->high.sclk)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+static void rv6xx_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+						    struct radeon_ps *new_ps,
+						    struct radeon_ps *old_ps)
+{
+	struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
+	struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->high.sclk < current_state->high.sclk)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+int rv6xx_dpm_enable(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+	if (r600_dynamicpm_enabled(rdev))
+		return -EINVAL;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv6xx_enable_backbias(rdev, true);
+
+	if (pi->dynamic_ss)
+		rv6xx_enable_spread_spectrum(rdev, true);
+
+	rv6xx_program_mpll_timing_parameters(rdev);
+	rv6xx_program_bsp(rdev);
+	rv6xx_program_git(rdev);
+	rv6xx_program_tp(rdev);
+	rv6xx_program_tpp(rdev);
+	rv6xx_program_sstp(rdev);
+	rv6xx_program_fcp(rdev);
+	rv6xx_program_vddc3d_parameters(rdev);
+	rv6xx_program_voltage_timing_parameters(rdev);
+	rv6xx_program_engine_speed_parameters(rdev);
+
+	rv6xx_enable_display_gap(rdev, true);
+	if (pi->display_gap == false)
+		rv6xx_enable_display_gap(rdev, false);
+
+	rv6xx_program_power_level_enter_state(rdev);
+
+	rv6xx_calculate_stepping_parameters(rdev, boot_ps);
+
+	if (pi->voltage_control)
+		rv6xx_program_voltage_gpio_pins(rdev);
+
+	rv6xx_generate_stepping_table(rdev, boot_ps);
+
+	rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
+	rv6xx_program_stepping_parameters_lowest_entry(rdev);
+
+	rv6xx_program_power_level_low(rdev);
+	rv6xx_program_power_level_medium(rdev);
+	rv6xx_program_power_level_high(rdev);
+	rv6xx_program_vc(rdev);
+	rv6xx_program_at(rdev);
+
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
+
+	rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	r600_start_dpm(rdev);
+
+	if (pi->voltage_control)
+		rv6xx_enable_static_voltage_control(rdev, boot_ps, false);
+
+	if (pi->dynamic_pcie_gen2)
+		rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, true);
+
+	if (pi->gfx_clock_gating)
+		r600_gfx_clockgating_enable(rdev, true);
+
+	return 0;
+}
+
+void rv6xx_dpm_disable(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+	if (!r600_dynamicpm_enabled(rdev))
+		return;
+
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
+	rv6xx_enable_display_gap(rdev, false);
+	rv6xx_clear_vc(rdev);
+	r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+
+	if (pi->thermal_protection)
+		r600_enable_thermal_protection(rdev, false);
+
+	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv6xx_enable_backbias(rdev, false);
+
+	rv6xx_enable_spread_spectrum(rdev, false);
+
+	if (pi->voltage_control)
+		rv6xx_enable_static_voltage_control(rdev, boot_ps, true);
+
+	if (pi->dynamic_pcie_gen2)
+		rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, false);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+
+	if (pi->gfx_clock_gating)
+		r600_gfx_clockgating_enable(rdev, false);
+
+	r600_stop_dpm(rdev);
+}
+
+int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
+	struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
+	int ret;
+
+	pi->restricted_levels = 0;
+
+	rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+
+	rv6xx_clear_vc(rdev);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+	r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+
+	if (pi->thermal_protection)
+		r600_enable_thermal_protection(rdev, false);
+
+	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+
+	rv6xx_generate_transition_stepping(rdev, new_ps, old_ps);
+	rv6xx_program_power_level_medium_for_transition(rdev);
+
+	if (pi->voltage_control) {
+		rv6xx_set_sw_voltage_to_safe(rdev, new_ps, old_ps);
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+			rv6xx_set_sw_voltage_to_low(rdev, old_ps);
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv6xx_set_safe_backbias(rdev, new_ps, old_ps);
+
+	if (pi->dynamic_pcie_gen2)
+		rv6xx_set_safe_pcie_gen2(rdev, new_ps, old_ps);
+
+	if (pi->voltage_control)
+		rv6xx_enable_dynamic_voltage_control(rdev, false);
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv6xx_enable_dynamic_backbias_control(rdev, false);
+
+	if (pi->voltage_control) {
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+			rv6xx_step_voltage_if_increasing(rdev, new_ps, old_ps);
+		msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
+	}
+
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
+	r600_wait_for_power_level_unequal(rdev, R600_POWER_LEVEL_LOW);
+
+	rv6xx_generate_low_step(rdev, new_ps);
+	rv6xx_invalidate_intermediate_steps(rdev);
+	rv6xx_calculate_stepping_parameters(rdev, new_ps);
+	rv6xx_program_stepping_parameters_lowest_entry(rdev);
+	rv6xx_program_power_level_low_to_lowest_state(rdev);
+
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+
+	if (pi->voltage_control) {
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) {
+			ret = rv6xx_step_voltage_if_decreasing(rdev, new_ps, old_ps);
+			if (ret)
+				return ret;
+		}
+		rv6xx_enable_dynamic_voltage_control(rdev, true);
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv6xx_enable_dynamic_backbias_control(rdev, true);
+
+	if (pi->dynamic_pcie_gen2)
+		rv6xx_enable_dynamic_pcie_gen2(rdev, new_ps, true);
+
+	rv6xx_reset_lvtm_data_sync(rdev);
+
+	rv6xx_generate_stepping_table(rdev, new_ps);
+	rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
+	rv6xx_program_power_level_low(rdev);
+	rv6xx_program_power_level_medium(rdev);
+	rv6xx_program_power_level_high(rdev);
+	rv6xx_enable_medium(rdev);
+	rv6xx_enable_high(rdev);
+
+	if (pi->thermal_protection)
+		rv6xx_enable_thermal_protection(rdev, true);
+	rv6xx_program_vc(rdev);
+	rv6xx_program_at(rdev);
+
+	rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+
+	return 0;
+}
+
+void rv6xx_setup_asic(struct radeon_device *rdev)
+{
+	r600_enable_acpi_pm(rdev);
+
+	if (radeon_aspm != 0) {
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
+			rv6xx_enable_l0s(rdev);
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
+			rv6xx_enable_l1(rdev);
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
+			rv6xx_enable_pll_sleep_in_l1(rdev);
+	}
+}
+
+void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+	rv6xx_program_display_gap(rdev);
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void rv6xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					     struct radeon_ps *rps,
+					     struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (r600_is_uvd_state(rps->class, rps->class2)) {
+		rps->vclk = RV6XX_DEFAULT_VCLK_FREQ;
+		rps->dclk = RV6XX_DEFAULT_DCLK_FREQ;
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		rdev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void rv6xx_parse_pplib_clock_info(struct radeon_device *rdev,
+					 struct radeon_ps *rps, int index,
+					 union pplib_clock_info *clock_info)
+{
+	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+	u32 sclk, mclk;
+	u16 vddc;
+	struct rv6xx_pl *pl;
+
+	switch (index) {
+	case 0:
+		pl = &ps->low;
+		break;
+	case 1:
+		pl = &ps->medium;
+		break;
+	case 2:
+	default:
+		pl = &ps->high;
+		break;
+	}
+
+	sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+	sclk |= clock_info->r600.ucEngineClockHigh << 16;
+	mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+	mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+
+	pl->mclk = mclk;
+	pl->sclk = sclk;
+	pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
+	pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
+
+	/* patch up vddc if necessary */
+	if (pl->vddc == 0xff01) {
+		if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
+			pl->vddc = vddc;
+	}
+
+	/* fix up pcie gen2 */
+	if (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) {
+		if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV630)) {
+			if (pl->vddc < 1100)
+				pl->flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
+		}
+	}
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		u16 vddc, vddci, mvdd;
+		radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
+		pl->mclk = rdev->clock.default_mclk;
+		pl->sclk = rdev->clock.default_sclk;
+		pl->vddc = vddc;
+	}
+}
+
+static int rv6xx_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	union pplib_clock_info *clock_info;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	struct rv6xx_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		if (power_info->pplib.ucStateEntrySize - 1) {
+			u8 *idx;
+			ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
+			if (ps == NULL) {
+				kfree(rdev->pm.dpm.ps);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.ps[i].ps_priv = ps;
+			rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+							 non_clock_info);
+			idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
+			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+				clock_info = (union pplib_clock_info *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+					 (idx[j] * power_info->pplib.ucClockInfoSize));
+				rv6xx_parse_pplib_clock_info(rdev,
+							     &rdev->pm.dpm.ps[i], j,
+							     clock_info);
+			}
+		}
+	}
+	rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
+	return 0;
+}
+
+int rv6xx_dpm_init(struct radeon_device *rdev)
+{
+	struct radeon_atom_ss ss;
+	struct atom_clock_dividers dividers;
+	struct rv6xx_power_info *pi;
+	int ret;
+
+	pi = kzalloc(sizeof(struct rv6xx_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = pi;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = rv6xx_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	if (rdev->pm.dpm.voltage_response_time == 0)
+		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (rdev->pm.dpm.backbias_response_time == 0)
+		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->spll_ref_div = dividers.ref_div + 1;
+	else
+		pi->spll_ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->mpll_ref_div = dividers.ref_div + 1;
+	else
+		pi->mpll_ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	if (rdev->family >= CHIP_RV670)
+		pi->fb_div_scale = 1;
+	else
+		pi->fb_div_scale = 0;
+
+	pi->voltage_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
+
+	pi->gfx_clock_gating = true;
+
+	pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+						       ASIC_INTERNAL_ENGINE_SS, 0);
+	pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+						       ASIC_INTERNAL_MEMORY_SS, 0);
+
+	/* Disable sclk ss, causes hangs on a lot of systems */
+	pi->sclk_ss = false;
+
+	if (pi->sclk_ss || pi->mclk_ss)
+		pi->dynamic_ss = true;
+	else
+		pi->dynamic_ss = false;
+
+	pi->dynamic_pcie_gen2 = true;
+
+	if (pi->gfx_clock_gating &&
+	    (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	pi->display_gap = true;
+
+	return 0;
+}
+
+void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
+				 struct radeon_ps *rps)
+{
+	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+	struct rv6xx_pl *pl;
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	pl = &ps->low;
+	printk("\t\tpower level 0    sclk: %u mclk: %u vddc: %u\n",
+	       pl->sclk, pl->mclk, pl->vddc);
+	pl = &ps->medium;
+	printk("\t\tpower level 1    sclk: %u mclk: %u vddc: %u\n",
+	       pl->sclk, pl->mclk, pl->vddc);
+	pl = &ps->high;
+	printk("\t\tpower level 2    sclk: %u mclk: %u vddc: %u\n",
+	       pl->sclk, pl->mclk, pl->vddc);
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						       struct seq_file *m)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+	struct rv6xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u\n",
+			   current_index, pl->sclk, pl->mclk, pl->vddc);
+	}
+}
+
+/* get the current sclk in 10 khz units */
+u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+	struct rv6xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		return 0;
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		return pl->sclk;
+	}
+}
+
+/* get the current mclk in 10 khz units */
+u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+	struct rv6xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		return 0;
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		return pl->mclk;
+	}
+}
+
+void rv6xx_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+}
+
+u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
+
+	if (low)
+		return requested_state->low.sclk;
+	else
+		return requested_state->high.sclk;
+}
+
+u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
+
+	if (low)
+		return requested_state->low.mclk;
+	else
+		return requested_state->high.mclk;
+}
+
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level)
+{
+	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		pi->restricted_levels = 3;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		pi->restricted_levels = 2;
+	} else {
+		pi->restricted_levels = 0;
+	}
+
+	rv6xx_clear_vc(rdev);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+	r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+	r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
+	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+	rv6xx_enable_medium(rdev);
+	rv6xx_enable_high(rdev);
+	if (pi->restricted_levels == 3)
+		r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
+	rv6xx_program_vc(rdev);
+	rv6xx_program_at(rdev);
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.h b/drivers/gpu/drm/radeon/rv6xx_dpm.h
new file mode 100644
index 0000000..8035d53
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#ifndef __RV6XX_DPM_H__
+#define __RV6XX_DPM_H__
+
+#include "r600_dpm.h"
+
+/* Represents a single SCLK step. */
+struct rv6xx_sclk_stepping
+{
+    u32 vco_frequency;
+    u32 post_divider;
+};
+
+struct rv6xx_pm_hw_state {
+	u32 sclks[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
+	u32 mclks[R600_PM_NUMBER_OF_MCLKS];
+	u16 vddc[R600_PM_NUMBER_OF_VOLTAGE_LEVELS];
+	bool backbias[R600_PM_NUMBER_OF_VOLTAGE_LEVELS];
+	bool pcie_gen2[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
+	u8 high_sclk_index;
+	u8 medium_sclk_index;
+	u8 low_sclk_index;
+	u8 high_mclk_index;
+	u8 medium_mclk_index;
+	u8 low_mclk_index;
+	u8 high_vddc_index;
+	u8 medium_vddc_index;
+	u8 low_vddc_index;
+	u8 rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
+	u8 lp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
+};
+
+struct rv6xx_power_info {
+	/* flags */
+	bool voltage_control;
+	bool sclk_ss;
+	bool mclk_ss;
+	bool dynamic_ss;
+	bool dynamic_pcie_gen2;
+	bool thermal_protection;
+	bool display_gap;
+	bool gfx_clock_gating;
+	/* clk values */
+	u32 fb_div_scale;
+	u32 spll_ref_div;
+	u32 mpll_ref_div;
+	u32 bsu;
+	u32 bsp;
+	/* */
+	u32 active_auto_throttle_sources;
+	/* current power state */
+	u32 restricted_levels;
+	struct rv6xx_pm_hw_state hw;
+};
+
+struct rv6xx_pl {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u32 flags;
+};
+
+struct rv6xx_ps {
+	struct rv6xx_pl high;
+	struct rv6xx_pl medium;
+	struct rv6xx_pl low;
+};
+
+#define RV6XX_DEFAULT_VCLK_FREQ  40000 /* 10 khz */
+#define RV6XX_DEFAULT_DCLK_FREQ  30000 /* 10 khz */
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv6xxd.h b/drivers/gpu/drm/radeon/rv6xxd.h
new file mode 100644
index 0000000..34e86f9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xxd.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef RV6XXD_H
+#define RV6XXD_H
+
+/* RV6xx power management */
+#define SPLL_CNTL_MODE                                    0x60c
+#       define SPLL_DIV_SYNC                              (1 << 5)
+
+#define GENERAL_PWRMGT                                    0x618
+#       define GLOBAL_PWRMGT_EN                           (1 << 0)
+#       define STATIC_PM_EN                               (1 << 1)
+#       define MOBILE_SU                                  (1 << 2)
+#       define THERMAL_PROTECTION_DIS                     (1 << 3)
+#       define THERMAL_PROTECTION_TYPE                    (1 << 4)
+#       define ENABLE_GEN2PCIE                            (1 << 5)
+#       define SW_GPIO_INDEX(x)                           ((x) << 6)
+#       define SW_GPIO_INDEX_MASK                         (3 << 6)
+#       define LOW_VOLT_D2_ACPI                           (1 << 8)
+#       define LOW_VOLT_D3_ACPI                           (1 << 9)
+#       define VOLT_PWRMGT_EN                             (1 << 10)
+#       define BACKBIAS_PAD_EN                            (1 << 16)
+#       define BACKBIAS_VALUE                             (1 << 17)
+#       define BACKBIAS_DPM_CNTL                          (1 << 18)
+#       define DYN_SPREAD_SPECTRUM_EN                     (1 << 21)
+
+#define MCLK_PWRMGT_CNTL                                  0x624
+#       define MPLL_PWRMGT_OFF                            (1 << 0)
+#       define YCLK_TURNOFF                               (1 << 1)
+#       define MPLL_TURNOFF                               (1 << 2)
+#       define SU_MCLK_USE_BCLK                           (1 << 3)
+#       define DLL_READY                                  (1 << 4)
+#       define MC_BUSY                                    (1 << 5)
+#       define MC_INT_CNTL                                (1 << 7)
+#       define MRDCKA_SLEEP                               (1 << 8)
+#       define MRDCKB_SLEEP                               (1 << 9)
+#       define MRDCKC_SLEEP                               (1 << 10)
+#       define MRDCKD_SLEEP                               (1 << 11)
+#       define MRDCKE_SLEEP                               (1 << 12)
+#       define MRDCKF_SLEEP                               (1 << 13)
+#       define MRDCKG_SLEEP                               (1 << 14)
+#       define MRDCKH_SLEEP                               (1 << 15)
+#       define MRDCKA_RESET                               (1 << 16)
+#       define MRDCKB_RESET                               (1 << 17)
+#       define MRDCKC_RESET                               (1 << 18)
+#       define MRDCKD_RESET                               (1 << 19)
+#       define MRDCKE_RESET                               (1 << 20)
+#       define MRDCKF_RESET                               (1 << 21)
+#       define MRDCKG_RESET                               (1 << 22)
+#       define MRDCKH_RESET                               (1 << 23)
+#       define DLL_READY_READ                             (1 << 24)
+#       define USE_DISPLAY_GAP                            (1 << 25)
+#       define USE_DISPLAY_URGENT_NORMAL                  (1 << 26)
+#       define USE_DISPLAY_GAP_CTXSW                      (1 << 27)
+#       define MPLL_TURNOFF_D2                            (1 << 28)
+#       define USE_DISPLAY_URGENT_CTXSW                   (1 << 29)
+
+#define MPLL_FREQ_LEVEL_0                                 0x6e8
+#       define LEVEL0_MPLL_POST_DIV(x)                    ((x) << 0)
+#       define LEVEL0_MPLL_POST_DIV_MASK                  (0xff << 0)
+#       define LEVEL0_MPLL_FB_DIV(x)                      ((x) << 8)
+#       define LEVEL0_MPLL_FB_DIV_MASK                    (0xfff << 8)
+#       define LEVEL0_MPLL_REF_DIV(x)                     ((x) << 20)
+#       define LEVEL0_MPLL_REF_DIV_MASK                   (0x3f << 20)
+#       define LEVEL0_MPLL_DIV_EN                         (1 << 28)
+#       define LEVEL0_DLL_BYPASS                          (1 << 29)
+#       define LEVEL0_DLL_RESET                           (1 << 30)
+
+#define VID_RT                                            0x6f8
+#       define VID_CRT(x)                                 ((x) << 0)
+#       define VID_CRT_MASK                               (0x1fff << 0)
+#       define VID_CRTU(x)                                ((x) << 13)
+#       define VID_CRTU_MASK                              (7 << 13)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (7 << 16)
+#       define VID_SWT(x)                                 ((x) << 19)
+#       define VID_SWT_MASK                               (0x1f << 19)
+#       define BRT(x)                                     ((x) << 24)
+#       define BRT_MASK                                   (0xff << 24)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x70c
+#       define TARGET_PROFILE_INDEX_MASK                  (3 << 0)
+#       define TARGET_PROFILE_INDEX_SHIFT                 0
+#       define CURRENT_PROFILE_INDEX_MASK                 (3 << 2)
+#       define CURRENT_PROFILE_INDEX_SHIFT                2
+#       define DYN_PWR_ENTER_INDEX(x)                     ((x) << 4)
+#       define DYN_PWR_ENTER_INDEX_MASK                   (3 << 4)
+#       define DYN_PWR_ENTER_INDEX_SHIFT                  4
+#       define CURR_MCLK_INDEX_MASK                       (3 << 6)
+#       define CURR_MCLK_INDEX_SHIFT                      6
+#       define CURR_SCLK_INDEX_MASK                       (0x1f << 8)
+#       define CURR_SCLK_INDEX_SHIFT                      8
+#       define CURR_VID_INDEX_MASK                        (3 << 13)
+#       define CURR_VID_INDEX_SHIFT                       13
+
+#define VID_UPPER_GPIO_CNTL                               0x740
+#       define CTXSW_UPPER_GPIO_VALUES(x)                 ((x) << 0)
+#       define CTXSW_UPPER_GPIO_VALUES_MASK               (7 << 0)
+#       define HIGH_UPPER_GPIO_VALUES(x)                  ((x) << 3)
+#       define HIGH_UPPER_GPIO_VALUES_MASK                (7 << 3)
+#       define MEDIUM_UPPER_GPIO_VALUES(x)                ((x) << 6)
+#       define MEDIUM_UPPER_GPIO_VALUES_MASK              (7 << 6)
+#       define LOW_UPPER_GPIO_VALUES(x)                   ((x) << 9)
+#       define LOW_UPPER_GPIO_VALUES_MASK                 (7 << 9)
+#       define CTXSW_BACKBIAS_VALUE                       (1 << 12)
+#       define HIGH_BACKBIAS_VALUE                        (1 << 13)
+#       define MEDIUM_BACKBIAS_VALUE                      (1 << 14)
+#       define LOW_BACKBIAS_VALUE                         (1 << 15)
+
+#define CG_DISPLAY_GAP_CNTL                               0x7dc
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define CG_THERMAL_CTRL                                   0x7f0
+#       define DPM_EVENT_SRC(x)                           ((x) << 0)
+#       define DPM_EVENT_SRC_MASK                         (7 << 0)
+#       define THERM_INC_CLK                              (1 << 3)
+#       define TOFFSET(x)                                 ((x) << 4)
+#       define TOFFSET_MASK                               (0xff << 4)
+#       define DIG_THERM_DPM(x)                           ((x) << 12)
+#       define DIG_THERM_DPM_MASK                         (0xff << 12)
+#       define CTF_SEL(x)                                 ((x) << 20)
+#       define CTF_SEL_MASK                               (7 << 20)
+#       define CTF_PAD_POLARITY                           (1 << 23)
+#       define CTF_PAD_EN                                 (1 << 24)
+
+#define CG_SPLL_SPREAD_SPECTRUM_LOW                       0x820
+#       define SSEN                                       (1 << 0)
+#       define CLKS(x)                                    ((x) << 3)
+#       define CLKS_MASK                                  (0xff << 3)
+#       define CLKS_SHIFT                                 3
+#       define CLKV(x)                                    ((x) << 11)
+#       define CLKV_MASK                                  (0x7ff << 11)
+#       define CLKV_SHIFT                                 11
+#define CG_MPLL_SPREAD_SPECTRUM                           0x830
+
+#define CITF_CNTL					0x200c
+#       define BLACKOUT_RD                              (1 << 0)
+#       define BLACKOUT_WR                              (1 << 1)
+
+#define RAMCFG						0x2408
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000001
+#define		NOOFRANK_SHIFT					1
+#define		NOOFRANK_MASK					0x00000002
+#define		NOOFROWS_SHIFT					2
+#define		NOOFROWS_MASK					0x0000001C
+#define		NOOFCOLS_SHIFT					5
+#define		NOOFCOLS_MASK					0x00000060
+#define		CHANSIZE_SHIFT					7
+#define		CHANSIZE_MASK					0x00000080
+#define		BURSTLENGTH_SHIFT				8
+#define		BURSTLENGTH_MASK				0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 10)
+
+#define SQM_RATIO					0x2424
+#       define STATE0(x)                                ((x) << 0)
+#       define STATE0_MASK                              (0xff << 0)
+#       define STATE1(x)                                ((x) << 8)
+#       define STATE1_MASK                              (0xff << 8)
+#       define STATE2(x)                                ((x) << 16)
+#       define STATE2_MASK                              (0xff << 16)
+#       define STATE3(x)                                ((x) << 24)
+#       define STATE3_MASK                              (0xff << 24)
+
+#define ARB_RFSH_CNTL					0x2460
+#       define ENABLE                                   (1 << 0)
+#define ARB_RFSH_RATE					0x2464
+#       define POWERMODE0(x)                            ((x) << 0)
+#       define POWERMODE0_MASK                          (0xff << 0)
+#       define POWERMODE1(x)                            ((x) << 8)
+#       define POWERMODE1_MASK                          (0xff << 8)
+#       define POWERMODE2(x)                            ((x) << 16)
+#       define POWERMODE2_MASK                          (0xff << 16)
+#       define POWERMODE3(x)                            ((x) << 24)
+#       define POWERMODE3_MASK                          (0xff << 24)
+
+#define MC_SEQ_DRAM					0x2608
+#       define CKE_DYN                                  (1 << 12)
+
+#define MC_SEQ_CMD					0x26c4
+
+#define MC_SEQ_RESERVE_S				0x2890
+#define MC_SEQ_RESERVE_M				0x2894
+
+#define LVTMA_DATA_SYNCHRONIZATION                      0x7adc
+#       define LVTMA_PFREQCHG                           (1 << 8)
+#define DCE3_LVTMA_DATA_SYNCHRONIZATION                 0x7f98
+
+/* PCIE indirect regs */
+#define PCIE_P_CNTL                                       0x40
+#       define P_PLL_PWRDN_IN_L1L23                       (1 << 3)
+#       define P_PLL_BUF_PDNB                             (1 << 4)
+#       define P_PLL_PDNB                                 (1 << 9)
+#       define P_ALLOW_PRX_FRONTEND_SHUTOFF               (1 << 12)
+/* PCIE PORT indirect regs */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_SPEED_CNTL                                0xa4
+#       define LC_GEN2_EN                                 (1 << 0)
+#       define LC_INITIATE_LINK_SPEED_CHANGE              (1 << 7)
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_HW_VOLTAGE_IF_CONTROL(x)                ((x) << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_MASK              (3 << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_SHIFT             12
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
new file mode 100644
index 0000000..38fdb41
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "rv730d.h"
+#include "r600_dpm.h"
+#include "rv770_dpm.h"
+#include "atom.h"
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
+struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
+
+int rv730_populate_sclk_value(struct radeon_device *rdev,
+			      u32 engine_clock,
+			      RV770_SMC_SCLK_VALUE *sclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl = pi->clk_regs.rv730.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = pi->clk_regs.rv730.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = pi->clk_regs.rv730.cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum = pi->clk_regs.rv730.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv730.cg_spll_spread_spectrum_2;
+	u64 tmp;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_divider, post_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+
+	if (dividers.enable_post_div)
+		post_divider = ((dividers.post_div >> 4) & 0xf) +
+			(dividers.post_div & 0xf) + 2;
+	else
+		post_divider = 1;
+
+	tmp = (u64) engine_clock * reference_divider * post_divider * 16384;
+	do_div(tmp, reference_clock);
+	fbdiv = (u32) tmp;
+
+	/* set up registers */
+	if (dividers.enable_post_div)
+		spll_func_cntl |= SPLL_DIVEN;
+	else
+		spll_func_cntl &= ~SPLL_DIVEN;
+	spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK);
+	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+	spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf);
+	spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->sclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = engine_clock * post_divider;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLK_S_MASK;
+			cg_spll_spread_spectrum |= CLK_S(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+		}
+	}
+
+	sclk->sclk_value = cpu_to_be32(engine_clock);
+	sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+	sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+	sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+	sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
+	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
+
+	return 0;
+}
+
+int rv730_populate_mclk_value(struct radeon_device *rdev,
+			      u32 engine_clock, u32 memory_clock,
+			      LPRV7XX_SMC_MCLK_VALUE mclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 mclk_pwrmgt_cntl = pi->clk_regs.rv730.mclk_pwrmgt_cntl;
+	u32 dll_cntl = pi->clk_regs.rv730.dll_cntl;
+	u32 mpll_func_cntl = pi->clk_regs.rv730.mpll_func_cntl;
+	u32 mpll_func_cntl_2 = pi->clk_regs.rv730.mpll_func_cntl2;
+	u32 mpll_func_cntl_3 = pi->clk_regs.rv730.mpll_func_cntl3;
+	u32 mpll_ss = pi->clk_regs.rv730.mpll_ss;
+	u32 mpll_ss2 = pi->clk_regs.rv730.mpll_ss2;
+	struct atom_clock_dividers dividers;
+	u32 post_divider, reference_divider;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
+					     memory_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = dividers.ref_div + 1;
+
+	if (dividers.enable_post_div)
+		post_divider = ((dividers.post_div >> 4) & 0xf) +
+			(dividers.post_div & 0xf) + 2;
+	else
+		post_divider = 1;
+
+	/* setup the registers */
+	if (dividers.enable_post_div)
+		mpll_func_cntl |= MPLL_DIVEN;
+	else
+		mpll_func_cntl &= ~MPLL_DIVEN;
+
+	mpll_func_cntl &= ~(MPLL_REF_DIV_MASK | MPLL_HILEN_MASK | MPLL_LOLEN_MASK);
+	mpll_func_cntl |= MPLL_REF_DIV(dividers.ref_div);
+	mpll_func_cntl |= MPLL_HILEN((dividers.post_div >> 4) & 0xf);
+	mpll_func_cntl |= MPLL_LOLEN(dividers.post_div & 0xf);
+
+	mpll_func_cntl_3 &= ~MPLL_FB_DIV_MASK;
+	mpll_func_cntl_3 |= MPLL_FB_DIV(dividers.fb_div);
+	if (dividers.enable_dithen)
+		mpll_func_cntl_3 |= MPLL_DITHEN;
+	else
+		mpll_func_cntl_3 &= ~MPLL_DITHEN;
+
+	if (pi->mclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = memory_clock * post_divider;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+			u32 reference_clock = rdev->clock.mpll.reference_freq;
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = ss.percentage * dividers.fb_div / (clk_s * 10000);
+
+			mpll_ss &= ~CLK_S_MASK;
+			mpll_ss |= CLK_S(clk_s);
+			mpll_ss |= SSEN;
+
+			mpll_ss2 &= ~CLK_V_MASK;
+			mpll_ss |= CLK_V(clk_v);
+		}
+	}
+
+
+	mclk->mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl);
+	mclk->mclk730.mclk_value = cpu_to_be32(memory_clock);
+	mclk->mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+	mclk->mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2);
+	mclk->mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3);
+	mclk->mclk730.vMPLL_SS = cpu_to_be32(mpll_ss);
+	mclk->mclk730.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+	return 0;
+}
+
+void rv730_read_clock_registers(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	pi->clk_regs.rv730.cg_spll_func_cntl =
+		RREG32(CG_SPLL_FUNC_CNTL);
+	pi->clk_regs.rv730.cg_spll_func_cntl_2 =
+		RREG32(CG_SPLL_FUNC_CNTL_2);
+	pi->clk_regs.rv730.cg_spll_func_cntl_3 =
+		RREG32(CG_SPLL_FUNC_CNTL_3);
+	pi->clk_regs.rv730.cg_spll_spread_spectrum =
+		RREG32(CG_SPLL_SPREAD_SPECTRUM);
+	pi->clk_regs.rv730.cg_spll_spread_spectrum_2 =
+		RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+
+	pi->clk_regs.rv730.mclk_pwrmgt_cntl =
+		RREG32(TCI_MCLK_PWRMGT_CNTL);
+	pi->clk_regs.rv730.dll_cntl =
+		RREG32(TCI_DLL_CNTL);
+	pi->clk_regs.rv730.mpll_func_cntl =
+		RREG32(CG_MPLL_FUNC_CNTL);
+	pi->clk_regs.rv730.mpll_func_cntl2 =
+		RREG32(CG_MPLL_FUNC_CNTL_2);
+	pi->clk_regs.rv730.mpll_func_cntl3 =
+		RREG32(CG_MPLL_FUNC_CNTL_3);
+	pi->clk_regs.rv730.mpll_ss =
+		RREG32(CG_TCI_MPLL_SPREAD_SPECTRUM);
+	pi->clk_regs.rv730.mpll_ss2 =
+		RREG32(CG_TCI_MPLL_SPREAD_SPECTRUM_2);
+}
+
+int rv730_populate_smc_acpi_state(struct radeon_device *rdev,
+				  RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 mpll_func_cntl = 0;
+	u32 mpll_func_cntl_2 = 0 ;
+	u32 mpll_func_cntl_3 = 0;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 spll_func_cntl;
+	u32 spll_func_cntl_2;
+	u32 spll_func_cntl_3;
+
+	table->ACPIState = table->initialState;
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		rv770_populate_vddc_value(rdev, pi->acpi_vddc,
+					  &table->ACPIState.levels[0].vddc);
+		table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ?
+			pi->acpi_pcie_gen2 : 0;
+		table->ACPIState.levels[0].gen2XSP =
+			pi->acpi_pcie_gen2;
+	} else {
+		rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
+					  &table->ACPIState.levels[0].vddc);
+		table->ACPIState.levels[0].gen2PCIE = 0;
+	}
+
+	mpll_func_cntl = pi->clk_regs.rv730.mpll_func_cntl;
+	mpll_func_cntl_2 = pi->clk_regs.rv730.mpll_func_cntl2;
+	mpll_func_cntl_3 = pi->clk_regs.rv730.mpll_func_cntl3;
+
+	mpll_func_cntl |= MPLL_RESET | MPLL_BYPASS_EN;
+	mpll_func_cntl &= ~MPLL_SLEEP;
+
+	mpll_func_cntl_2 &= ~MCLK_MUX_SEL_MASK;
+	mpll_func_cntl_2 |= MCLK_MUX_SEL(1);
+
+	mclk_pwrmgt_cntl = (MRDCKA_RESET |
+			    MRDCKB_RESET |
+			    MRDCKC_RESET |
+			    MRDCKD_RESET |
+			    MRDCKE_RESET |
+			    MRDCKF_RESET |
+			    MRDCKG_RESET |
+			    MRDCKH_RESET |
+			    MRDCKA_SLEEP |
+			    MRDCKB_SLEEP |
+			    MRDCKC_SLEEP |
+			    MRDCKD_SLEEP |
+			    MRDCKE_SLEEP |
+			    MRDCKF_SLEEP |
+			    MRDCKG_SLEEP |
+			    MRDCKH_SLEEP);
+
+	dll_cntl = 0xff000000;
+
+	spll_func_cntl = pi->clk_regs.rv730.cg_spll_func_cntl;
+	spll_func_cntl_2 = pi->clk_regs.rv730.cg_spll_func_cntl_2;
+	spll_func_cntl_3 = pi->clk_regs.rv730.cg_spll_func_cntl_3;
+
+	spll_func_cntl |= SPLL_RESET | SPLL_BYPASS_EN;
+	spll_func_cntl &= ~SPLL_SLEEP;
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+	table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2);
+	table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3);
+	table->ACPIState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl);
+
+	table->ACPIState.levels[0].mclk.mclk730.mclk_value = 0;
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+
+	table->ACPIState.levels[1] = table->ACPIState.levels[0];
+	table->ACPIState.levels[2] = table->ACPIState.levels[0];
+
+	return 0;
+}
+
+int rv730_populate_smc_initial_state(struct radeon_device *rdev,
+				     struct radeon_ps *radeon_state,
+				     RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 a_t;
+
+	table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl);
+	table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 =
+		cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl2);
+	table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 =
+		cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl3);
+	table->initialState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(pi->clk_regs.rv730.mclk_pwrmgt_cntl);
+	table->initialState.levels[0].mclk.mclk730.vDLL_CNTL =
+		cpu_to_be32(pi->clk_regs.rv730.dll_cntl);
+	table->initialState.levels[0].mclk.mclk730.vMPLL_SS =
+		cpu_to_be32(pi->clk_regs.rv730.mpll_ss);
+	table->initialState.levels[0].mclk.mclk730.vMPLL_SS2 =
+		cpu_to_be32(pi->clk_regs.rv730.mpll_ss2);
+
+	table->initialState.levels[0].mclk.mclk730.mclk_value =
+		cpu_to_be32(initial_state->low.mclk);
+
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl_2);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl_3);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+		cpu_to_be32(pi->clk_regs.rv730.cg_spll_spread_spectrum);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+		cpu_to_be32(pi->clk_regs.rv730.cg_spll_spread_spectrum_2);
+
+	table->initialState.levels[0].sclk.sclk_value =
+		cpu_to_be32(initial_state->low.sclk);
+
+	table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
+
+	table->initialState.levels[0].seqValue =
+		rv770_get_seq_value(rdev, &initial_state->low);
+
+	rv770_populate_vddc_value(rdev,
+				  initial_state->low.vddc,
+				  &table->initialState.levels[0].vddc);
+	rv770_populate_initial_mvdd_value(rdev,
+					  &table->initialState.levels[0].mvdd);
+
+	a_t = CG_R(0xffff) | CG_L(0);
+
+	table->initialState.levels[0].aT = cpu_to_be32(a_t);
+
+	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+
+	if (pi->boot_in_gen2)
+		table->initialState.levels[0].gen2PCIE = 1;
+	else
+		table->initialState.levels[0].gen2PCIE = 0;
+	if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
+		table->initialState.levels[0].gen2XSP = 1;
+	else
+		table->initialState.levels[0].gen2XSP = 0;
+
+	table->initialState.levels[1] = table->initialState.levels[0];
+	table->initialState.levels[2] = table->initialState.levels[0];
+
+	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	return 0;
+}
+
+void rv730_program_memory_timing_parameters(struct radeon_device *rdev,
+					    struct radeon_ps *radeon_state)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+	u32 arb_refresh_rate = 0;
+	u32 dram_timing = 0;
+	u32 dram_timing2 = 0;
+	u32 old_dram_timing = 0;
+	u32 old_dram_timing2 = 0;
+
+	arb_refresh_rate = RREG32(MC_ARB_RFSH_RATE) &
+		~(POWERMODE1_MASK | POWERMODE2_MASK | POWERMODE3_MASK);
+	arb_refresh_rate |=
+		(POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) |
+		 POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) |
+		 POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk)));
+	WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate);
+
+	/* save the boot dram timings */
+	old_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+	old_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+
+	radeon_atom_set_engine_dram_timings(rdev,
+					    state->high.sclk,
+					    state->high.mclk);
+
+	dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+
+	WREG32(MC_ARB_DRAM_TIMING_3, dram_timing);
+	WREG32(MC_ARB_DRAM_TIMING2_3, dram_timing2);
+
+	radeon_atom_set_engine_dram_timings(rdev,
+					    state->medium.sclk,
+					    state->medium.mclk);
+
+	dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+
+	WREG32(MC_ARB_DRAM_TIMING_2, dram_timing);
+	WREG32(MC_ARB_DRAM_TIMING2_2, dram_timing2);
+
+	radeon_atom_set_engine_dram_timings(rdev,
+					    state->low.sclk,
+					    state->low.mclk);
+
+	dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+
+	WREG32(MC_ARB_DRAM_TIMING_1, dram_timing);
+	WREG32(MC_ARB_DRAM_TIMING2_1, dram_timing2);
+
+	/* restore the boot dram timings */
+	WREG32(MC_ARB_DRAM_TIMING, old_dram_timing);
+	WREG32(MC_ARB_DRAM_TIMING2, old_dram_timing2);
+
+}
+
+void rv730_start_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+
+	WREG32_P(TCI_MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
+
+	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+void rv730_stop_dpm(struct radeon_device *rdev)
+{
+	PPSMC_Result result;
+
+	result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
+
+	if (result != PPSMC_Result_OK)
+		DRM_DEBUG("Could not force DPM to low\n");
+
+	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+
+	WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+	WREG32_P(TCI_MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
+}
+
+void rv730_program_dcodt(struct radeon_device *rdev, bool use_dcodt)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 i = use_dcodt ? 0 : 1;
+	u32 mc4_io_pad_cntl;
+
+	mc4_io_pad_cntl = RREG32(MC4_IO_DQ_PAD_CNTL_D0_I0);
+	mc4_io_pad_cntl &= 0xFFFFFF00;
+	mc4_io_pad_cntl |= pi->odt_value_0[i];
+	WREG32(MC4_IO_DQ_PAD_CNTL_D0_I0, mc4_io_pad_cntl);
+	WREG32(MC4_IO_DQ_PAD_CNTL_D0_I1, mc4_io_pad_cntl);
+
+	mc4_io_pad_cntl = RREG32(MC4_IO_QS_PAD_CNTL_D0_I0);
+	mc4_io_pad_cntl &= 0xFFFFFF00;
+	mc4_io_pad_cntl |= pi->odt_value_1[i];
+	WREG32(MC4_IO_QS_PAD_CNTL_D0_I0, mc4_io_pad_cntl);
+	WREG32(MC4_IO_QS_PAD_CNTL_D0_I1, mc4_io_pad_cntl);
+}
+
+void rv730_get_odt_values(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 mc4_io_pad_cntl;
+
+	pi->odt_value_0[0] = (u8)0;
+	pi->odt_value_1[0] = (u8)0x80;
+
+	mc4_io_pad_cntl = RREG32(MC4_IO_DQ_PAD_CNTL_D0_I0);
+	pi->odt_value_0[1] = (u8)(mc4_io_pad_cntl & 0xff);
+
+	mc4_io_pad_cntl = RREG32(MC4_IO_QS_PAD_CNTL_D0_I0);
+	pi->odt_value_1[1] = (u8)(mc4_io_pad_cntl & 0xff);
+}
diff --git a/drivers/gpu/drm/radeon/rv730d.h b/drivers/gpu/drm/radeon/rv730d.h
new file mode 100644
index 0000000..f0a7954
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv730d.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef RV730_H
+#define RV730_H
+
+#define	CG_SPLL_FUNC_CNTL				0x600
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_DIVEN				(1 << 2)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_HILEN(x)				((x) << 12)
+#define		SPLL_HILEN_MASK				(0xf << 12)
+#define		SPLL_LOLEN(x)				((x) << 16)
+#define		SPLL_LOLEN_MASK				(0xf << 16)
+#define	CG_SPLL_FUNC_CNTL_2				0x604
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define	CG_SPLL_FUNC_CNTL_3				0x608
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_DITHEN				(1 << 28)
+
+#define	CG_MPLL_FUNC_CNTL				0x624
+#define		MPLL_RESET				(1 << 0)
+#define		MPLL_SLEEP				(1 << 1)
+#define		MPLL_DIVEN				(1 << 2)
+#define		MPLL_BYPASS_EN				(1 << 3)
+#define		MPLL_REF_DIV(x)				((x) << 4)
+#define		MPLL_REF_DIV_MASK			(0x3f << 4)
+#define		MPLL_HILEN(x)				((x) << 12)
+#define		MPLL_HILEN_MASK				(0xf << 12)
+#define		MPLL_LOLEN(x)				((x) << 16)
+#define		MPLL_LOLEN_MASK				(0xf << 16)
+#define	CG_MPLL_FUNC_CNTL_2				0x628
+#define		MCLK_MUX_SEL(x)				((x) << 0)
+#define		MCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define	CG_MPLL_FUNC_CNTL_3				0x62c
+#define		MPLL_FB_DIV(x)				((x) << 0)
+#define		MPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		MPLL_DITHEN				(1 << 28)
+
+#define	CG_TCI_MPLL_SPREAD_SPECTRUM			0x634
+#define	CG_TCI_MPLL_SPREAD_SPECTRUM_2			0x638
+#define GENERAL_PWRMGT                                  0x63c
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define ENABLE_GEN2PCIE                          (1 << 4)
+#       define ENABLE_GEN2XSP                           (1 << 5)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (3 << 6)
+#       define LOW_VOLT_D2_ACPI                         (1 << 8)
+#       define LOW_VOLT_D3_ACPI                         (1 << 9)
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define BACKBIAS_PAD_EN                          (1 << 18)
+#       define BACKBIAS_VALUE                           (1 << 19)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#       define AC_DC_SW                                 (1 << 24)
+
+#define SCLK_PWRMGT_CNTL                                  0x644
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+
+#define	TCI_MCLK_PWRMGT_CNTL				0x648
+#       define MPLL_PWRMGT_OFF                          (1 << 5)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCKA_SLEEP                             (1 << 8)
+#       define MRDCKB_SLEEP                             (1 << 9)
+#       define MRDCKC_SLEEP                             (1 << 10)
+#       define MRDCKD_SLEEP                             (1 << 11)
+#       define MRDCKE_SLEEP                             (1 << 12)
+#       define MRDCKF_SLEEP                             (1 << 13)
+#       define MRDCKG_SLEEP                             (1 << 14)
+#       define MRDCKH_SLEEP                             (1 << 15)
+#       define MRDCKA_RESET                             (1 << 16)
+#       define MRDCKB_RESET                             (1 << 17)
+#       define MRDCKC_RESET                             (1 << 18)
+#       define MRDCKD_RESET                             (1 << 19)
+#       define MRDCKE_RESET                             (1 << 20)
+#       define MRDCKF_RESET                             (1 << 21)
+#       define MRDCKG_RESET                             (1 << 22)
+#       define MRDCKH_RESET                             (1 << 23)
+#       define DLL_READY_READ                           (1 << 24)
+#       define USE_DISPLAY_GAP                          (1 << 25)
+#       define USE_DISPLAY_URGENT_NORMAL                (1 << 26)
+#       define MPLL_TURNOFF_D2                          (1 << 28)
+#define	TCI_DLL_CNTL					0x64c
+
+#define	CG_PG_CNTL					0x858
+#       define PWRGATE_ENABLE                           (1 << 0)
+
+#define	CG_AT				                0x6d4
+#define		CG_R(x)					((x) << 0)
+#define		CG_R_MASK				(0xffff << 0)
+#define		CG_L(x)					((x) << 16)
+#define		CG_L_MASK				(0xffff << 16)
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x790
+#define		SSEN					(1 << 0)
+#define		CLK_S(x)				((x) << 4)
+#define		CLK_S_MASK				(0xfff << 4)
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x794
+#define		CLK_V(x)				((x) << 0)
+#define		CLK_V_MASK				(0x3ffffff << 0)
+
+#define	MC_ARB_DRAM_TIMING				0x2774
+#define	MC_ARB_DRAM_TIMING2				0x2778
+
+#define	MC_ARB_RFSH_RATE				0x27b0
+#define		POWERMODE0(x)				((x) << 0)
+#define		POWERMODE0_MASK				(0xff << 0)
+#define		POWERMODE1(x)				((x) << 8)
+#define		POWERMODE1_MASK				(0xff << 8)
+#define		POWERMODE2(x)				((x) << 16)
+#define		POWERMODE2_MASK				(0xff << 16)
+#define		POWERMODE3(x)				((x) << 24)
+#define		POWERMODE3_MASK				(0xff << 24)
+
+#define	MC_ARB_DRAM_TIMING_1				0x27f0
+#define	MC_ARB_DRAM_TIMING_2				0x27f4
+#define	MC_ARB_DRAM_TIMING_3				0x27f8
+#define	MC_ARB_DRAM_TIMING2_1				0x27fc
+#define	MC_ARB_DRAM_TIMING2_2				0x2800
+#define	MC_ARB_DRAM_TIMING2_3				0x2804
+
+#define	MC4_IO_DQ_PAD_CNTL_D0_I0			0x2978
+#define	MC4_IO_DQ_PAD_CNTL_D0_I1			0x297c
+#define	MC4_IO_QS_PAD_CNTL_D0_I0			0x2980
+#define	MC4_IO_QS_PAD_CNTL_D0_I1			0x2984
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
new file mode 100644
index 0000000..afd597e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "rv740d.h"
+#include "r600_dpm.h"
+#include "rv770_dpm.h"
+#include "atom.h"
+
+struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
+
+u32 rv740_get_decoded_reference_divider(u32 encoded_ref)
+{
+	u32 ref = 0;
+
+	switch (encoded_ref) {
+	case 0:
+		ref = 1;
+		break;
+	case 16:
+		ref = 2;
+		break;
+	case 17:
+		ref = 3;
+		break;
+	case 18:
+		ref = 2;
+		break;
+	case 19:
+		ref = 3;
+		break;
+	case 20:
+		ref = 4;
+		break;
+	case 21:
+		ref = 5;
+		break;
+	default:
+		DRM_ERROR("Invalid encoded Reference Divider\n");
+		ref = 0;
+		break;
+	}
+
+	return ref;
+}
+
+struct dll_speed_setting {
+	u16 min;
+	u16 max;
+	u32 dll_speed;
+};
+
+static struct dll_speed_setting dll_speed_table[16] =
+{
+	{ 270, 320, 0x0f },
+	{ 240, 270, 0x0e },
+	{ 200, 240, 0x0d },
+	{ 180, 200, 0x0c },
+	{ 160, 180, 0x0b },
+	{ 140, 160, 0x0a },
+	{ 120, 140, 0x09 },
+	{ 110, 120, 0x08 },
+	{  95, 110, 0x07 },
+	{  85,  95, 0x06 },
+	{  78,  85, 0x05 },
+	{  70,  78, 0x04 },
+	{  65,  70, 0x03 },
+	{  60,  65, 0x02 },
+	{  42,  60, 0x01 },
+	{  00,  42, 0x00 }
+};
+
+u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock)
+{
+	int i;
+	u32 factor;
+	u16 data_rate;
+
+	if (is_gddr5)
+		factor = 4;
+	else
+		factor = 2;
+
+	data_rate = (u16)(memory_clock * factor / 1000);
+
+	if (data_rate < dll_speed_table[0].max) {
+		for (i = 0; i < 16; i++) {
+			if (data_rate > dll_speed_table[i].min &&
+			    data_rate <= dll_speed_table[i].max)
+				return dll_speed_table[i].dll_speed;
+		}
+	}
+
+	DRM_DEBUG_KMS("Target MCLK greater than largest MCLK in DLL speed table\n");
+
+	return 0x0f;
+}
+
+int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock,
+			      RV770_SMC_SCLK_VALUE *sclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum = pi->clk_regs.rv770.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
+	u64 tmp;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+
+	tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+	do_div(tmp, reference_clock);
+	fbdiv = (u32) tmp;
+
+	spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+	spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->sclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = engine_clock * dividers.post_div;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLK_S_MASK;
+			cg_spll_spread_spectrum |= CLK_S(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+		}
+	}
+
+	sclk->sclk_value = cpu_to_be32(engine_clock);
+	sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+	sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+	sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+	sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
+	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
+
+	return 0;
+}
+
+int rv740_populate_mclk_value(struct radeon_device *rdev,
+			      u32 engine_clock, u32 memory_clock,
+			      RV7XX_SMC_MCLK_VALUE *mclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
+	u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
+	u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
+	u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
+	u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
+	struct atom_clock_dividers dividers;
+	u32 ibias;
+	u32 dll_speed;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
+					     memory_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	ibias = rv770_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
+
+	mpll_ad_func_cntl &= ~(CLKR_MASK |
+			       YCLK_POST_DIV_MASK |
+			       CLKF_MASK |
+			       CLKFRAC_MASK |
+			       IBIAS_MASK);
+	mpll_ad_func_cntl |= CLKR(dividers.ref_div);
+	mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
+	mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
+	mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
+	mpll_ad_func_cntl |= IBIAS(ibias);
+
+	if (dividers.vco_mode)
+		mpll_ad_func_cntl_2 |= VCO_MODE;
+	else
+		mpll_ad_func_cntl_2 &= ~VCO_MODE;
+
+	if (pi->mem_gddr5) {
+		mpll_dq_func_cntl &= ~(CLKR_MASK |
+				       YCLK_POST_DIV_MASK |
+				       CLKF_MASK |
+				       CLKFRAC_MASK |
+				       IBIAS_MASK);
+		mpll_dq_func_cntl |= CLKR(dividers.ref_div);
+		mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
+		mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
+		mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
+		mpll_dq_func_cntl |= IBIAS(ibias);
+
+		if (dividers.vco_mode)
+			mpll_dq_func_cntl_2 |= VCO_MODE;
+		else
+			mpll_dq_func_cntl_2 &= ~VCO_MODE;
+	}
+
+	if (pi->mclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = memory_clock * dividers.post_div;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+			u32 reference_clock = rdev->clock.mpll.reference_freq;
+			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+			u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+			u32 clk_v = 0x40000 * ss.percentage *
+				(dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
+
+			mpll_ss1 &= ~CLKV_MASK;
+			mpll_ss1 |= CLKV(clk_v);
+
+			mpll_ss2 &= ~CLKS_MASK;
+			mpll_ss2 |= CLKS(clk_s);
+		}
+	}
+
+	dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
+					memory_clock);
+
+	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
+
+	mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
+	mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+	mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+	mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
+	mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
+	mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+	return 0;
+}
+
+void rv740_read_clock_registers(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	pi->clk_regs.rv770.cg_spll_func_cntl =
+		RREG32(CG_SPLL_FUNC_CNTL);
+	pi->clk_regs.rv770.cg_spll_func_cntl_2 =
+		RREG32(CG_SPLL_FUNC_CNTL_2);
+	pi->clk_regs.rv770.cg_spll_func_cntl_3 =
+		RREG32(CG_SPLL_FUNC_CNTL_3);
+	pi->clk_regs.rv770.cg_spll_spread_spectrum =
+		RREG32(CG_SPLL_SPREAD_SPECTRUM);
+	pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
+		RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+
+	pi->clk_regs.rv770.mpll_ad_func_cntl =
+		RREG32(MPLL_AD_FUNC_CNTL);
+	pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
+		RREG32(MPLL_AD_FUNC_CNTL_2);
+	pi->clk_regs.rv770.mpll_dq_func_cntl =
+		RREG32(MPLL_DQ_FUNC_CNTL);
+	pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
+		RREG32(MPLL_DQ_FUNC_CNTL_2);
+	pi->clk_regs.rv770.mclk_pwrmgt_cntl =
+		RREG32(MCLK_PWRMGT_CNTL);
+	pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
+	pi->clk_regs.rv770.mpll_ss1 = RREG32(MPLL_SS1);
+	pi->clk_regs.rv770.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+int rv740_populate_smc_acpi_state(struct radeon_device *rdev,
+				  RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
+	u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
+	u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
+	u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
+
+	table->ACPIState = table->initialState;
+
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		rv770_populate_vddc_value(rdev, pi->acpi_vddc,
+					  &table->ACPIState.levels[0].vddc);
+		table->ACPIState.levels[0].gen2PCIE =
+			pi->pcie_gen2 ?
+			pi->acpi_pcie_gen2 : 0;
+		table->ACPIState.levels[0].gen2XSP =
+			pi->acpi_pcie_gen2;
+	} else {
+		rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
+					  &table->ACPIState.levels[0].vddc);
+		table->ACPIState.levels[0].gen2PCIE = 0;
+	}
+
+	mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
+
+	mpll_dq_func_cntl_2 |= BYPASS | BIAS_GEN_PDNB | RESET_EN;
+
+	mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
+			     MRDCKA1_RESET |
+			     MRDCKB0_RESET |
+			     MRDCKB1_RESET |
+			     MRDCKC0_RESET |
+			     MRDCKC1_RESET |
+			     MRDCKD0_RESET |
+			     MRDCKD1_RESET);
+
+	dll_cntl |= (MRDCKA0_BYPASS |
+		     MRDCKA1_BYPASS |
+		     MRDCKB0_BYPASS |
+		     MRDCKB1_BYPASS |
+		     MRDCKC0_BYPASS |
+		     MRDCKC1_BYPASS |
+		     MRDCKD0_BYPASS |
+		     MRDCKD1_BYPASS);
+
+	spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+	table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
+
+	table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	table->ACPIState.levels[1] = table->ACPIState.levels[0];
+	table->ACPIState.levels[2] = table->ACPIState.levels[0];
+
+	rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+
+	return 0;
+}
+
+void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev,
+				       bool enable)
+{
+	if (enable)
+		WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
+	else
+		WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
+}
+
+u8 rv740_get_mclk_frequency_ratio(u32 memory_clock)
+{
+	u8 mc_para_index;
+
+	if ((memory_clock < 10000) || (memory_clock > 47500))
+		mc_para_index = 0x00;
+	else
+		mc_para_index = (u8)((memory_clock - 10000) / 2500);
+
+	return mc_para_index;
+}
diff --git a/drivers/gpu/drm/radeon/rv740d.h b/drivers/gpu/drm/radeon/rv740d.h
new file mode 100644
index 0000000..fe5ab07
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv740d.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef RV740_H
+#define RV740_H
+
+#define	CG_SPLL_FUNC_CNTL				0x600
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_PDIV_A(x)				((x) << 20)
+#define		SPLL_PDIV_A_MASK			(0x7f << 20)
+#define	CG_SPLL_FUNC_CNTL_2				0x604
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define	CG_SPLL_FUNC_CNTL_3				0x608
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_DITHEN				(1 << 28)
+
+#define	MPLL_CNTL_MODE					0x61c
+#define		SS_SSEN					(1 << 24)
+
+#define	MPLL_AD_FUNC_CNTL				0x624
+#define		CLKF(x)					((x) << 0)
+#define		CLKF_MASK				(0x7f << 0)
+#define		CLKR(x)					((x) << 7)
+#define		CLKR_MASK				(0x1f << 7)
+#define		CLKFRAC(x)				((x) << 12)
+#define		CLKFRAC_MASK				(0x1f << 12)
+#define		YCLK_POST_DIV(x)			((x) << 17)
+#define		YCLK_POST_DIV_MASK			(3 << 17)
+#define		IBIAS(x)				((x) << 20)
+#define		IBIAS_MASK				(0x3ff << 20)
+#define		RESET					(1 << 30)
+#define		PDNB					(1 << 31)
+#define	MPLL_AD_FUNC_CNTL_2				0x628
+#define		BYPASS					(1 << 19)
+#define		BIAS_GEN_PDNB				(1 << 24)
+#define		RESET_EN				(1 << 25)
+#define		VCO_MODE				(1 << 29)
+#define	MPLL_DQ_FUNC_CNTL				0x62c
+#define	MPLL_DQ_FUNC_CNTL_2				0x630
+
+#define	MCLK_PWRMGT_CNTL				0x648
+#define		DLL_SPEED(x)				((x) << 0)
+#define		DLL_SPEED_MASK				(0x1f << 0)
+#       define MPLL_PWRMGT_OFF                          (1 << 5)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCKA0_SLEEP                            (1 << 8)
+#       define MRDCKA1_SLEEP                            (1 << 9)
+#       define MRDCKB0_SLEEP                            (1 << 10)
+#       define MRDCKB1_SLEEP                            (1 << 11)
+#       define MRDCKC0_SLEEP                            (1 << 12)
+#       define MRDCKC1_SLEEP                            (1 << 13)
+#       define MRDCKD0_SLEEP                            (1 << 14)
+#       define MRDCKD1_SLEEP                            (1 << 15)
+#       define MRDCKA0_RESET                            (1 << 16)
+#       define MRDCKA1_RESET                            (1 << 17)
+#       define MRDCKB0_RESET                            (1 << 18)
+#       define MRDCKB1_RESET                            (1 << 19)
+#       define MRDCKC0_RESET                            (1 << 20)
+#       define MRDCKC1_RESET                            (1 << 21)
+#       define MRDCKD0_RESET                            (1 << 22)
+#       define MRDCKD1_RESET                            (1 << 23)
+#       define DLL_READY_READ                           (1 << 24)
+#       define USE_DISPLAY_GAP                          (1 << 25)
+#       define USE_DISPLAY_URGENT_NORMAL                (1 << 26)
+#       define MPLL_TURNOFF_D2                          (1 << 28)
+#define	DLL_CNTL					0x64c
+#       define MRDCKA0_BYPASS                           (1 << 24)
+#       define MRDCKA1_BYPASS                           (1 << 25)
+#       define MRDCKB0_BYPASS                           (1 << 26)
+#       define MRDCKB1_BYPASS                           (1 << 27)
+#       define MRDCKC0_BYPASS                           (1 << 28)
+#       define MRDCKC1_BYPASS                           (1 << 29)
+#       define MRDCKD0_BYPASS                           (1 << 30)
+#       define MRDCKD1_BYPASS                           (1 << 31)
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x790
+#define		SSEN					(1 << 0)
+#define		CLK_S(x)				((x) << 4)
+#define		CLK_S_MASK				(0xfff << 4)
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x794
+#define		CLK_V(x)				((x) << 0)
+#define		CLK_V_MASK				(0x3ffffff << 0)
+
+#define	MPLL_SS1					0x85c
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	MPLL_SS2					0x860
+#define		CLKS(x)					((x) << 0)
+#define		CLKS_MASK				(0xfff << 0)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
new file mode 100644
index 0000000..729ae58
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -0,0 +1,2100 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include <drm/radeon_drm.h>
+#include "rv770d.h"
+#include "atom.h"
+#include "avivod.h"
+
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+
+static void rv770_gpu_init(struct radeon_device *rdev);
+void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+	int r;
+
+	/* RV740 uses evergreen uvd clk programming */
+	if (rdev->family == CHIP_RV740)
+		return evergreen_set_uvd_clocks(rdev, vclk, dclk);
+
+	/* bypass vclk and dclk with bclk */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	if (!vclk || !dclk) {
+		/* keep the Bypass mode, put PLL to sleep */
+		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+		return 0;
+	}
+
+	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+					  43663, 0x03FFFFFE, 1, 30, ~0,
+					  &fb_div, &vclk_div, &dclk_div);
+	if (r)
+		return r;
+
+	fb_div |= 1;
+	vclk_div -= 1;
+	dclk_div -= 1;
+
+	/* set UPLL_FB_DIV to 0x50000 */
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
+
+	/* deassert UPLL_RESET and UPLL_SLEEP */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
+
+	/* assert BYPASS EN and FB_DIV[0] <- ??? why? */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* assert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+	/* set the required FB_DIV, REF_DIV, Post divder values */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 UPLL_SW_HILEN(vclk_div >> 1) |
+		 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+		 UPLL_SW_HILEN2(dclk_div >> 1) |
+		 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
+		 ~UPLL_SW_MASK);
+
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
+		 ~UPLL_FB_DIV_MASK);
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
+
+static const u32 r7xx_golden_registers[] =
+{
+	0x8d00, 0xffffffff, 0x0e0e0074,
+	0x8d04, 0xffffffff, 0x013a2b34,
+	0x9508, 0xffffffff, 0x00000002,
+	0x8b20, 0xffffffff, 0,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x28350, 0xffffffff, 0,
+	0x9058, 0xffffffff, 0x0fffc40f,
+	0x240c, 0xffffffff, 0x00000380,
+	0x733c, 0xffffffff, 0x00000002,
+	0x2650, 0x00040000, 0,
+	0x20bc, 0x00040000, 0,
+	0x7300, 0xffffffff, 0x001000f0
+};
+
+static const u32 r7xx_golden_dyn_gpr_registers[] =
+{
+	0x8db0, 0xffffffff, 0x98989898,
+	0x8db4, 0xffffffff, 0x98989898,
+	0x8db8, 0xffffffff, 0x98989898,
+	0x8dbc, 0xffffffff, 0x98989898,
+	0x8dc0, 0xffffffff, 0x98989898,
+	0x8dc4, 0xffffffff, 0x98989898,
+	0x8dc8, 0xffffffff, 0x98989898,
+	0x8dcc, 0xffffffff, 0x98989898,
+	0x88c4, 0xffffffff, 0x00000082
+};
+
+static const u32 rv770_golden_registers[] =
+{
+	0x562c, 0xffffffff, 0,
+	0x3f90, 0xffffffff, 0,
+	0x9148, 0xffffffff, 0,
+	0x3f94, 0xffffffff, 0,
+	0x914c, 0xffffffff, 0,
+	0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770ce_golden_registers[] =
+{
+	0x562c, 0xffffffff, 0,
+	0x3f90, 0xffffffff, 0x00cc0000,
+	0x9148, 0xffffffff, 0x00cc0000,
+	0x3f94, 0xffffffff, 0x00cc0000,
+	0x914c, 0xffffffff, 0x00cc0000,
+	0x9b7c, 0xffffffff, 0x00fa0000,
+	0x3f8c, 0xffffffff, 0x00fa0000,
+	0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x130300f9,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x8000100,
+	0x8b28, 0xffffffff, 0x3c000100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10001,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10002,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10003,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x2,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x3,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x4,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x5,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x6,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x7,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x9,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x2,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x3,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x4,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x5,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x6,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x7,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x9,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x2,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x3,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x4,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x5,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x6,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x7,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x8,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x9,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x9160, 0xffffffff, 0x00040003,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00080007,
+	0x9174, 0xffffffff, 0x000a0009,
+	0x9178, 0xffffffff, 0x000c000b,
+	0x917c, 0xffffffff, 0x000e000d,
+	0x9180, 0xffffffff, 0x0010000f,
+	0x918c, 0xffffffff, 0x00120011,
+	0x9190, 0xffffffff, 0x00140013,
+	0x9194, 0xffffffff, 0x00020001,
+	0x9198, 0xffffffff, 0x00040003,
+	0x919c, 0xffffffff, 0x00060005,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000a0009,
+	0x91b0, 0xffffffff, 0x000c000b,
+	0x91b4, 0xffffffff, 0x000e000d,
+	0x91b8, 0xffffffff, 0x0010000f,
+	0x91c4, 0xffffffff, 0x00120011,
+	0x91c8, 0xffffffff, 0x00140013,
+	0x91cc, 0xffffffff, 0x00020001,
+	0x91d0, 0xffffffff, 0x00040003,
+	0x91d4, 0xffffffff, 0x00060005,
+	0x91e0, 0xffffffff, 0x00080007,
+	0x91e4, 0xffffffff, 0x000a0009,
+	0x91e8, 0xffffffff, 0x000c000b,
+	0x91ec, 0xffffffff, 0x00020001,
+	0x91f0, 0xffffffff, 0x00040003,
+	0x91f4, 0xffffffff, 0x00060005,
+	0x9200, 0xffffffff, 0x00080007,
+	0x9204, 0xffffffff, 0x000a0009,
+	0x9208, 0xffffffff, 0x000c000b,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x0010000f,
+	0x921c, 0xffffffff, 0x00120011,
+	0x9220, 0xffffffff, 0x00140013,
+	0x9224, 0xffffffff, 0x00020001,
+	0x9228, 0xffffffff, 0x00040003,
+	0x922c, 0xffffffff, 0x00060005,
+	0x9238, 0xffffffff, 0x00080007,
+	0x923c, 0xffffffff, 0x000a0009,
+	0x9240, 0xffffffff, 0x000c000b,
+	0x9244, 0xffffffff, 0x000e000d,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x9254, 0xffffffff, 0x00120011,
+	0x9258, 0xffffffff, 0x00140013,
+	0x925c, 0xffffffff, 0x00020001,
+	0x9260, 0xffffffff, 0x00040003,
+	0x9264, 0xffffffff, 0x00060005,
+	0x9270, 0xffffffff, 0x00080007,
+	0x9274, 0xffffffff, 0x000a0009,
+	0x9278, 0xffffffff, 0x000c000b,
+	0x927c, 0xffffffff, 0x000e000d,
+	0x9280, 0xffffffff, 0x0010000f,
+	0x928c, 0xffffffff, 0x00120011,
+	0x9290, 0xffffffff, 0x00140013,
+	0x9294, 0xffffffff, 0x00020001,
+	0x929c, 0xffffffff, 0x00040003,
+	0x92a0, 0xffffffff, 0x00060005,
+	0x92a4, 0xffffffff, 0x00080007
+};
+
+static const u32 rv710_golden_registers[] =
+{
+	0x3f90, 0x00ff0000, 0x00fc0000,
+	0x9148, 0x00ff0000, 0x00fc0000,
+	0x3f94, 0x00ff0000, 0x00fc0000,
+	0x914c, 0x00ff0000, 0x00fc0000,
+	0xb4c, 0x00000020, 0x00000020,
+	0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv710_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x13030040,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x8000100,
+	0x8b28, 0xffffffff, 0x3c000100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x9174, 0xffffffff, 0x00000003,
+	0x9178, 0xffffffff, 0x00050001,
+	0x917c, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00000004,
+	0x9190, 0xffffffff, 0x00070006,
+	0x9194, 0xffffffff, 0x00050001,
+	0x9198, 0xffffffff, 0x00030002,
+	0x91a8, 0xffffffff, 0x00000004,
+	0x91ac, 0xffffffff, 0x00070006,
+	0x91e8, 0xffffffff, 0x00000001,
+	0x9294, 0xffffffff, 0x00000001,
+	0x929c, 0xffffffff, 0x00000002,
+	0x92a0, 0xffffffff, 0x00040003,
+	0x9150, 0xffffffff, 0x4d940000
+};
+
+static const u32 rv730_golden_registers[] =
+{
+	0x3f90, 0x00ff0000, 0x00f00000,
+	0x9148, 0x00ff0000, 0x00f00000,
+	0x3f94, 0x00ff0000, 0x00f00000,
+	0x914c, 0x00ff0000, 0x00f00000,
+	0x900c, 0xffffffff, 0x003b033f,
+	0xb4c, 0x00000020, 0x00000020,
+	0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv730_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x130300f9,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x8000100,
+	0x8b28, 0xffffffff, 0x3c000100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10001,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x2,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x3,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x4,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x5,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x6,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x7,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x2,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x3,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x4,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x5,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x6,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x7,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x2,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x3,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x4,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x5,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x6,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x7,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x916c, 0xffffffff, 0x00040003,
+	0x9170, 0xffffffff, 0x00000005,
+	0x9178, 0xffffffff, 0x00050001,
+	0x917c, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00000004,
+	0x9190, 0xffffffff, 0x00070006,
+	0x9194, 0xffffffff, 0x00050001,
+	0x9198, 0xffffffff, 0x00030002,
+	0x91a8, 0xffffffff, 0x00000004,
+	0x91ac, 0xffffffff, 0x00070006,
+	0x91b0, 0xffffffff, 0x00050001,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91c4, 0xffffffff, 0x00000004,
+	0x91c8, 0xffffffff, 0x00070006,
+	0x91cc, 0xffffffff, 0x00050001,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91e0, 0xffffffff, 0x00000004,
+	0x91e4, 0xffffffff, 0x00070006,
+	0x91e8, 0xffffffff, 0x00000001,
+	0x91ec, 0xffffffff, 0x00050001,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x9200, 0xffffffff, 0x00000004,
+	0x9204, 0xffffffff, 0x00070006,
+	0x9208, 0xffffffff, 0x00050001,
+	0x920c, 0xffffffff, 0x00030002,
+	0x921c, 0xffffffff, 0x00000004,
+	0x9220, 0xffffffff, 0x00070006,
+	0x9224, 0xffffffff, 0x00050001,
+	0x9228, 0xffffffff, 0x00030002,
+	0x9238, 0xffffffff, 0x00000004,
+	0x923c, 0xffffffff, 0x00070006,
+	0x9240, 0xffffffff, 0x00050001,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9254, 0xffffffff, 0x00000004,
+	0x9258, 0xffffffff, 0x00070006,
+	0x9294, 0xffffffff, 0x00000001,
+	0x929c, 0xffffffff, 0x00000002,
+	0x92a0, 0xffffffff, 0x00040003,
+	0x92a4, 0xffffffff, 0x00000005
+};
+
+static const u32 rv740_golden_registers[] =
+{
+	0x88c4, 0xffffffff, 0x00000082,
+	0x28a50, 0xfffffffc, 0x00000004,
+	0x2650, 0x00040000, 0,
+	0x20bc, 0x00040000, 0,
+	0x733c, 0xffffffff, 0x00000002,
+	0x7300, 0xffffffff, 0x001000f0,
+	0x3f90, 0x00ff0000, 0,
+	0x9148, 0x00ff0000, 0,
+	0x3f94, 0x00ff0000, 0,
+	0x914c, 0x00ff0000, 0,
+	0x240c, 0xffffffff, 0x00000380,
+	0x8a14, 0x00000007, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x28a4c, 0xffffffff, 0x00004000,
+	0xa180, 0xffffffff, 0x00003f3f,
+	0x8d00, 0xffffffff, 0x0e0e003a,
+	0x8d04, 0xffffffff, 0x013a0e2a,
+	0x8c00, 0xffffffff, 0xe400000f,
+	0x8db0, 0xffffffff, 0x98989898,
+	0x8db4, 0xffffffff, 0x98989898,
+	0x8db8, 0xffffffff, 0x98989898,
+	0x8dbc, 0xffffffff, 0x98989898,
+	0x8dc0, 0xffffffff, 0x98989898,
+	0x8dc4, 0xffffffff, 0x98989898,
+	0x8dc8, 0xffffffff, 0x98989898,
+	0x8dcc, 0xffffffff, 0x98989898,
+	0x9058, 0xffffffff, 0x0fffc40f,
+	0x900c, 0xffffffff, 0x003b033f,
+	0x28350, 0xffffffff, 0,
+	0x8cf0, 0x1fffffff, 0x08e00420,
+	0x9508, 0xffffffff, 0x00000002,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv740_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x13030100,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x100,
+	0x8b28, 0xffffffff, 0x100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10001,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10002,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10003,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x2,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x3,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x4,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x5,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x6,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x7,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x2,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x3,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x4,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x5,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x6,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x7,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x2,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x3,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x4,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x5,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x6,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x7,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x9160, 0xffffffff, 0x00040003,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00080007,
+	0x9174, 0xffffffff, 0x000a0009,
+	0x9178, 0xffffffff, 0x000c000b,
+	0x917c, 0xffffffff, 0x000e000d,
+	0x9180, 0xffffffff, 0x0010000f,
+	0x918c, 0xffffffff, 0x00120011,
+	0x9190, 0xffffffff, 0x00140013,
+	0x9194, 0xffffffff, 0x00020001,
+	0x9198, 0xffffffff, 0x00040003,
+	0x919c, 0xffffffff, 0x00060005,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000a0009,
+	0x91b0, 0xffffffff, 0x000c000b,
+	0x91b4, 0xffffffff, 0x000e000d,
+	0x91b8, 0xffffffff, 0x0010000f,
+	0x91c4, 0xffffffff, 0x00120011,
+	0x91c8, 0xffffffff, 0x00140013,
+	0x91cc, 0xffffffff, 0x00020001,
+	0x91d0, 0xffffffff, 0x00040003,
+	0x91d4, 0xffffffff, 0x00060005,
+	0x91e0, 0xffffffff, 0x00080007,
+	0x91e4, 0xffffffff, 0x000a0009,
+	0x91e8, 0xffffffff, 0x000c000b,
+	0x91ec, 0xffffffff, 0x00020001,
+	0x91f0, 0xffffffff, 0x00040003,
+	0x91f4, 0xffffffff, 0x00060005,
+	0x9200, 0xffffffff, 0x00080007,
+	0x9204, 0xffffffff, 0x000a0009,
+	0x9208, 0xffffffff, 0x000c000b,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x0010000f,
+	0x921c, 0xffffffff, 0x00120011,
+	0x9220, 0xffffffff, 0x00140013,
+	0x9224, 0xffffffff, 0x00020001,
+	0x9228, 0xffffffff, 0x00040003,
+	0x922c, 0xffffffff, 0x00060005,
+	0x9238, 0xffffffff, 0x00080007,
+	0x923c, 0xffffffff, 0x000a0009,
+	0x9240, 0xffffffff, 0x000c000b,
+	0x9244, 0xffffffff, 0x000e000d,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x9254, 0xffffffff, 0x00120011,
+	0x9258, 0xffffffff, 0x00140013,
+	0x9294, 0xffffffff, 0x00020001,
+	0x929c, 0xffffffff, 0x00040003,
+	0x92a0, 0xffffffff, 0x00060005,
+	0x92a4, 0xffffffff, 0x00080007
+};
+
+static void rv770_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_RV770:
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_dyn_gpr_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+		if (rdev->pdev->device == 0x994e)
+			radeon_program_register_sequence(rdev,
+							 rv770ce_golden_registers,
+							 (const u32)ARRAY_SIZE(rv770ce_golden_registers));
+		else
+			radeon_program_register_sequence(rdev,
+							 rv770_golden_registers,
+							 (const u32)ARRAY_SIZE(rv770_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv770_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv770_mgcg_init));
+		break;
+	case CHIP_RV730:
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_dyn_gpr_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+		radeon_program_register_sequence(rdev,
+						 rv730_golden_registers,
+						 (const u32)ARRAY_SIZE(rv730_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv730_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv730_mgcg_init));
+		break;
+	case CHIP_RV710:
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_dyn_gpr_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+		radeon_program_register_sequence(rdev,
+						 rv710_golden_registers,
+						 (const u32)ARRAY_SIZE(rv710_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv710_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv710_mgcg_init));
+		break;
+	case CHIP_RV740:
+		radeon_program_register_sequence(rdev,
+						 rv740_golden_registers,
+						 (const u32)ARRAY_SIZE(rv740_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv740_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv740_mgcg_init));
+		break;
+	default:
+		break;
+	}
+}
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+/**
+ * rv770_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r7xx-cayman).
+ */
+u32 rv770_get_xclk(struct radeon_device *rdev)
+{
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 tmp = RREG32(CG_CLKPIN_CNTL);
+
+	if (tmp & MUX_TCLK_TO_XCLK)
+		return TCLK;
+
+	if (tmp & XTALIN_DIVIDE)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+	       async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+	if (radeon_crtc->crtc_id) {
+		WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	} else {
+		WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	}
+	WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
+	/* Return current update_pending status: */
+	return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
+		AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
+}
+
+/* get temperature in millidegrees */
+int rv770_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+		ASIC_T_SHIFT;
+	int actual_temp;
+
+	if (temp & 0x400)
+		actual_temp = -256;
+	else if (temp & 0x200)
+		actual_temp = 255;
+	else if (temp & 0x100) {
+		actual_temp = temp & 0x1ff;
+		actual_temp |= ~0x1ff;
+	} else
+		actual_temp = temp & 0xff;
+
+	return (actual_temp * 1000) / 2;
+}
+
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage != rdev->pm.current_vddc) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+		}
+	}
+}
+
+/*
+ * GART
+ */
+static int rv770_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	if (rdev->family == CHIP_RV740)
+		WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	for (i = 1; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	r600_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void rv770_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Disable all tables */
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rv770_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rv770_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+
+static void rv770_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void rv770_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	/* r7xx hw bug.  Read from HDP_DEBUG1 rather
+	 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
+	 */
+	tmp = RREG32(HDP_DEBUG1);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	rv515_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+
+/*
+ * CP.
+ */
+void r700_cp_stop(struct radeon_device *rdev)
+{
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+static int rv770_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r700_cp_stop(rdev);
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+void r700_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r700_cp_stop(rdev);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+	u32 tmp, i;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+	tmp &= SCLK_MUX_SEL_MASK;
+	tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
+			break;
+		udelay(1);
+	}
+
+	tmp &= ~SCLK_MUX_UPDATE;
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	tmp = RREG32(MPLL_CNTL_MODE);
+	if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+		tmp &= ~RV730_MPLL_MCLK_SEL;
+	else
+		tmp &= ~MPLL_MCLK_SEL;
+	WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+/*
+ * Core functions
+ */
+static void rv770_gpu_init(struct radeon_device *rdev)
+{
+	int i, j, num_qd_pipes;
+	u32 ta_aux_cntl;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 db_debug3;
+	u32 num_gs_verts_per_thread;
+	u32 vgt_gs_per_es;
+	u32 gs_prim_buffer_depth = 0;
+	u32 sq_ms_fifo_sizes;
+	u32 sq_config;
+	u32 sq_thread_resource_mgmt;
+	u32 hdp_host_path_cntl;
+	u32 sq_dyn_gpr_size_simd_ab_0;
+	u32 gb_tiling_config = 0;
+	u32 cc_gc_shader_pipe_config = 0;
+	u32 mc_arb_ramcfg;
+	u32 db_debug4, tmp;
+	u32 inactive_pipes, shader_pipe_config;
+	u32 disabled_rb_mask;
+	unsigned active_number;
+
+	/* setup chip specs */
+	rdev->config.rv770.tiling_group_size = 256;
+	switch (rdev->family) {
+	case CHIP_RV770:
+		rdev->config.rv770.max_pipes = 4;
+		rdev->config.rv770.max_tile_pipes = 8;
+		rdev->config.rv770.max_simds = 10;
+		rdev->config.rv770.max_backends = 4;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 512;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 128;
+		rdev->config.rv770.sx_max_export_pos_size = 16;
+		rdev->config.rv770.sx_max_export_smx_size = 112;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0xF9;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV730:
+		rdev->config.rv770.max_pipes = 2;
+		rdev->config.rv770.max_tile_pipes = 4;
+		rdev->config.rv770.max_simds = 8;
+		rdev->config.rv770.max_backends = 2;
+		rdev->config.rv770.max_gprs = 128;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 256;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 256;
+		rdev->config.rv770.sx_max_export_pos_size = 32;
+		rdev->config.rv770.sx_max_export_smx_size = 224;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0xf9;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+			rdev->config.rv770.sx_max_export_pos_size -= 16;
+			rdev->config.rv770.sx_max_export_smx_size += 16;
+		}
+		break;
+	case CHIP_RV710:
+		rdev->config.rv770.max_pipes = 2;
+		rdev->config.rv770.max_tile_pipes = 2;
+		rdev->config.rv770.max_simds = 2;
+		rdev->config.rv770.max_backends = 1;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 192;
+		rdev->config.rv770.max_stack_entries = 256;
+		rdev->config.rv770.max_hw_contexts = 4;
+		rdev->config.rv770.max_gs_threads = 8 * 2;
+		rdev->config.rv770.sx_max_export_size = 128;
+		rdev->config.rv770.sx_max_export_pos_size = 16;
+		rdev->config.rv770.sx_max_export_smx_size = 112;
+		rdev->config.rv770.sq_num_cf_insts = 1;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0x40;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV740:
+		rdev->config.rv770.max_pipes = 4;
+		rdev->config.rv770.max_tile_pipes = 4;
+		rdev->config.rv770.max_simds = 8;
+		rdev->config.rv770.max_backends = 4;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 512;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 256;
+		rdev->config.rv770.sx_max_export_pos_size = 32;
+		rdev->config.rv770.sx_max_export_smx_size = 224;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0x100;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+
+		if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+			rdev->config.rv770.sx_max_export_pos_size -= 16;
+			rdev->config.rv770.sx_max_export_smx_size += 16;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	j = 0;
+	for (i = 0; i < 32; i++) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+		j += 0x18;
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	/* setup tiling, simd, pipe config */
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+	inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+	for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+		if (!(inactive_pipes & tmp)) {
+			active_number++;
+		}
+		tmp <<= 1;
+	}
+	if (active_number == 1) {
+		WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+	} else {
+		WREG32(SPI_CONFIG_CNTL, 0);
+	}
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	tmp = rdev->config.rv770.max_simds -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+	rdev->config.rv770.active_simds = tmp;
+
+	switch (rdev->config.rv770.max_tile_pipes) {
+	case 1:
+	default:
+		gb_tiling_config = PIPE_TILING(0);
+		break;
+	case 2:
+		gb_tiling_config = PIPE_TILING(1);
+		break;
+	case 4:
+		gb_tiling_config = PIPE_TILING(2);
+		break;
+	case 8:
+		gb_tiling_config = PIPE_TILING(3);
+		break;
+	}
+	rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
+
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+	tmp = 0;
+	for (i = 0; i < rdev->config.rv770.max_backends; i++)
+		tmp |= (1 << i);
+	/* if all the backends are disabled, fix it up here */
+	if ((disabled_rb_mask & tmp) == tmp) {
+		for (i = 0; i < rdev->config.rv770.max_backends; i++)
+			disabled_rb_mask &= ~(1 << i);
+	}
+	tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+					R7XX_MAX_BACKENDS, disabled_rb_mask);
+	gb_tiling_config |= tmp << 16;
+	rdev->config.rv770.backend_map = tmp;
+
+	if (rdev->family == CHIP_RV770)
+		gb_tiling_config |= BANK_TILING(1);
+	else {
+		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+			gb_tiling_config |= BANK_TILING(1);
+		else
+			gb_tiling_config |= BANK_TILING(0);
+	}
+	rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
+	gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+	if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
+		gb_tiling_config |= ROW_TILING(3);
+		gb_tiling_config |= SAMPLE_SPLIT(3);
+	} else {
+		gb_tiling_config |=
+			ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+		gb_tiling_config |=
+			SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+	}
+
+	gb_tiling_config |= BANK_SWAPS(1);
+	rdev->config.rv770.tile_config = gb_tiling_config;
+
+	WREG32(GB_TILING_CONFIG, gb_tiling_config);
+	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
+	if (rdev->family == CHIP_RV730) {
+		WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
+		WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
+		WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	}
+
+	WREG32(CGTS_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+
+	num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+	WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+
+	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+	ta_aux_cntl = RREG32(TA_CNTL_AUX);
+	WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
+	smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	if (rdev->family != CHIP_RV740)
+		WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
+				       GS_FLUSH_CTL(4) |
+				       ACK_FLUSH_CTL(3) |
+				       SYNC_FLUSH_CTL));
+
+	if (rdev->family != CHIP_RV770)
+		WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
+	db_debug3 = RREG32(DB_DEBUG3);
+	db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV740:
+		db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
+		break;
+	case CHIP_RV710:
+	case CHIP_RV730:
+	default:
+		db_debug3 |= DB_CLK_OFF_DELAY(2);
+		break;
+	}
+	WREG32(DB_DEBUG3, db_debug3);
+
+	if (rdev->family != CHIP_RV770) {
+		db_debug4 = RREG32(DB_DEBUG4);
+		db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
+		WREG32(DB_DEBUG4, db_debug4);
+	}
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
+			    DONE_FIFO_HIWATER(0xe0) |
+			    ALU_UPDATE_FIFO_HIWATER(0x8));
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+		sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
+		break;
+	case CHIP_RV740:
+	default:
+		sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
+		break;
+	}
+	WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (DX9_CONSTS |
+		      VC_ENABLE |
+		      EXPORT_SRC_C |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+	if (rdev->family == CHIP_RV710)
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+
+	WREG32(SQ_CONFIG, sq_config);
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_1,  (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+					 NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+					 NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_2,  (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
+					 NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
+
+	sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
+				   NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
+				   NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
+	if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
+		sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
+	else
+		sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+						     NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+						     NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+	sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
+
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	if (rdev->family == CHIP_RV710)
+		WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
+						AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+	else
+		WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
+						AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV740:
+		gs_prim_buffer_depth = 384;
+		break;
+	case CHIP_RV710:
+		gs_prim_buffer_depth = 128;
+		break;
+	default:
+		break;
+	}
+
+	num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
+	vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+	/* Max value for this is 256 */
+	if (vgt_gs_per_es > 256)
+		vgt_gs_per_es = 256;
+
+	WREG32(VGT_ES_PER_GS, 128);
+	WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
+	WREG32(VGT_GS_PER_VS, 2);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+	WREG32(VGT_STRMOUT_EN, 0);
+	WREG32(SX_MISC, 0);
+	WREG32(PA_SC_MODE_CNTL, 0);
+	WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
+	WREG32(PA_SC_AA_CONFIG, 0);
+	WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
+	WREG32(PA_SC_LINE_STIPPLE, 0);
+	WREG32(SPI_INPUT_Z, 0);
+	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	/* clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+
+	WREG32(TCP_CNTL, 0);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+					  NUM_CLIP_SEQ(3)));
+	WREG32(VC_ENHANCE, 0);
+}
+
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = mc->mc_mask - mc->gtt_end;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end + 1;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+				mc->mc_vram_size >> 20, mc->vram_start,
+				mc->vram_end, mc->real_vram_size >> 20);
+	} else {
+		radeon_vram_location(rdev, &rdev->mc, 0);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
+static int rv770_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* Setup GPU memory space */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r700_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+static void rv770_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void rv770_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void rv770_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static int rv770_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	rv770_pcie_gen2_enable(rdev);
+
+	/* scratch needs to be initialized before MC */
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	rv770_mc_program(rdev);
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		rv770_agp_enable(rdev);
+	} else {
+		r = rv770_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	rv770_gpu_init(rdev);
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	rv770_uvd_start(rdev);
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = rv770_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = r600_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	rv770_uvd_resume(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rv770_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	rv770_init_golden_registers(rdev);
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_resume(rdev);
+
+	rdev->accel_working = true;
+	r = rv770_startup(rdev);
+	if (r) {
+		DRM_ERROR("r600 startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int rv770_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	r700_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	r600_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	rv770_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int rv770_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	rv770_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	r = rv770_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	rv770_uvd_init(rdev);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = rv770_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv770_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	return 0;
+}
+
+void rv770_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	uvd_v1_0_fini(rdev);
+	radeon_uvd_fini(rdev);
+	rv770_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, tmp;
+	u16 link_cntl2;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+		return;
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	/* advertise upconfig capability */
+	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+	WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+	if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+		lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+		link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+				     LC_RECONFIG_ARC_MISSING_ESCAPE);
+		link_width_cntl |= lanes | LC_RECONFIG_NOW |
+			LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	} else {
+		link_width_cntl |= LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
new file mode 100644
index 0000000..acff6e0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @resv: reservation object to sync to
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
+				    uint64_t src_offset, uint64_t dst_offset,
+				    unsigned num_gpu_pages,
+				    struct reservation_object *resv)
+{
+	struct radeon_fence *fence;
+	struct radeon_sync sync;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	radeon_sync_create(&sync);
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_rings(rdev, &sync, ring->idx);
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFF)
+			cur_size_in_dw = 0xFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_sync_free(rdev, &sync, fence);
+
+	return fence;
+}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
new file mode 100644
index 0000000..c765ae7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -0,0 +1,2587 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+#include "r600_dpm.h"
+#include "rv770_dpm.h"
+#include "cypress_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0       0x05
+#define MC_CG_SEQ_DRAMCONF_S1       0x06
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+#define SMC_RAM_END 0xC000
+
+struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps)
+{
+	struct rv7xx_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+static void rv770_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
+					       bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if (enable) {
+		tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+		tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
+		tmp |= LC_GEN2_EN_STRAP;
+	} else {
+		if (!pi->boot_in_gen2) {
+			tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
+			tmp &= ~LC_GEN2_EN_STRAP;
+		}
+	}
+	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
+
+}
+
+static void rv770_enable_l0s(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
+	tmp |= LC_L0S_INACTIVITY(3);
+	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
+}
+
+static void rv770_enable_l1(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+	tmp &= ~LC_L1_INACTIVITY_MASK;
+	tmp |= LC_L1_INACTIVITY(4);
+	tmp &= ~LC_PMI_TO_L1_DIS;
+	tmp &= ~LC_ASPM_TO_L1_DIS;
+	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
+}
+
+static void rv770_enable_pll_sleep_in_l1(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
+	tmp |= LC_L1_INACTIVITY(8);
+	WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
+
+	/* NOTE, this is a PCIE indirect reg, not PCIE PORT */
+	tmp = RREG32_PCIE(PCIE_P_CNTL);
+	tmp |= P_PLL_PWRDN_IN_L1L23;
+	tmp &= ~P_PLL_BUF_PDNB;
+	tmp &= ~P_PLL_PDNB;
+	tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
+	WREG32_PCIE(PCIE_P_CNTL, tmp);
+}
+
+static void rv770_gfx_clock_gating_enable(struct radeon_device *rdev,
+					  bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
+	else {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+		RREG32(GB_TILING_CONFIG);
+	}
+}
+
+static void rv770_mg_clock_gating_enable(struct radeon_device *rdev,
+					 bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (enable) {
+		u32 mgcg_cgtt_local0;
+
+		if (rdev->family == CHIP_RV770)
+			mgcg_cgtt_local0 = RV770_MGCGTTLOCAL0_DFLT;
+		else
+			mgcg_cgtt_local0 = RV7XX_MGCGTTLOCAL0_DFLT;
+
+		WREG32(CG_CGTT_LOCAL_0, mgcg_cgtt_local0);
+		WREG32(CG_CGTT_LOCAL_1, (RV770_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF));
+
+		if (pi->mgcgtssm)
+			WREG32(CGTS_SM_CTRL_REG, RV770_MGCGCGTSSMCTRL_DFLT);
+	} else {
+		WREG32(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
+		WREG32(CG_CGTT_LOCAL_1, 0xFFFFCFFF);
+	}
+}
+
+void rv770_restore_cgcg(struct radeon_device *rdev)
+{
+	bool dpm_en = false, cg_en = false;
+
+	if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
+		dpm_en = true;
+	if (RREG32(SCLK_PWRMGT_CNTL) & DYN_GFX_CLK_OFF_EN)
+		cg_en = true;
+
+	if (dpm_en && !cg_en)
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
+}
+
+static void rv770_start_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+
+	WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
+
+	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+void rv770_stop_dpm(struct radeon_device *rdev)
+{
+	PPSMC_Result result;
+
+	result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
+
+	if (result != PPSMC_Result_OK)
+		DRM_DEBUG("Could not force DPM to low.\n");
+
+	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+
+	WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+	WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
+}
+
+bool rv770_dpm_enabled(struct radeon_device *rdev)
+{
+	if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
+		return true;
+	else
+		return false;
+}
+
+void rv770_enable_thermal_protection(struct radeon_device *rdev,
+				     bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	else
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+void rv770_enable_acpi_pm(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+u8 rv770_get_seq_value(struct radeon_device *rdev,
+		       struct rv7xx_pl *pl)
+{
+	return (pl->flags & ATOM_PPLIB_R600_FLAGS_LOWPOWER) ?
+		MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1;
+}
+
+#if 0
+int rv770_read_smc_soft_register(struct radeon_device *rdev,
+				 u16 reg_offset, u32 *value)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	return rv770_read_smc_sram_dword(rdev,
+					 pi->soft_regs_start + reg_offset,
+					 value, pi->sram_end);
+}
+#endif
+
+int rv770_write_smc_soft_register(struct radeon_device *rdev,
+				  u16 reg_offset, u32 value)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	return rv770_write_smc_sram_dword(rdev,
+					  pi->soft_regs_start + reg_offset,
+					  value, pi->sram_end);
+}
+
+int rv770_populate_smc_t(struct radeon_device *rdev,
+			 struct radeon_ps *radeon_state,
+			 RV770_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int i;
+	int a_n;
+	int a_d;
+	u8 l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+	u8 r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+	u32 a_t;
+
+	l[0] = 0;
+	r[2] = 100;
+
+	a_n = (int)state->medium.sclk * pi->lmp +
+		(int)state->low.sclk * (R600_AH_DFLT - pi->rlp);
+	a_d = (int)state->low.sclk * (100 - (int)pi->rlp) +
+		(int)state->medium.sclk * pi->lmp;
+
+	l[1] = (u8)(pi->lmp - (int)pi->lmp * a_n / a_d);
+	r[0] = (u8)(pi->rlp + (100 - (int)pi->rlp) * a_n / a_d);
+
+	a_n = (int)state->high.sclk * pi->lhp + (int)state->medium.sclk *
+		(R600_AH_DFLT - pi->rmp);
+	a_d = (int)state->medium.sclk * (100 - (int)pi->rmp) +
+		(int)state->high.sclk * pi->lhp;
+
+	l[2] = (u8)(pi->lhp - (int)pi->lhp * a_n / a_d);
+	r[1] = (u8)(pi->rmp + (100 - (int)pi->rmp) * a_n / a_d);
+
+	for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) {
+		a_t = CG_R(r[i] * pi->bsp / 200) | CG_L(l[i] * pi->bsp / 200);
+		smc_state->levels[i].aT = cpu_to_be32(a_t);
+	}
+
+	a_t = CG_R(r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200) |
+		CG_L(l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200);
+
+	smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].aT =
+		cpu_to_be32(a_t);
+
+	return 0;
+}
+
+int rv770_populate_smc_sp(struct radeon_device *rdev,
+			  struct radeon_ps *radeon_state,
+			  RV770_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int i;
+
+	for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++)
+		smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+	smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].bSP =
+		cpu_to_be32(pi->psp);
+
+	return 0;
+}
+
+static void rv770_calculate_fractional_mpll_feedback_divider(u32 memory_clock,
+							     u32 reference_clock,
+							     bool gddr5,
+							     struct atom_clock_dividers *dividers,
+							     u32 *clkf,
+							     u32 *clkfrac)
+{
+	u32 post_divider, reference_divider, feedback_divider8;
+	u32 fyclk;
+
+	if (gddr5)
+		fyclk = (memory_clock * 8) / 2;
+	else
+		fyclk = (memory_clock * 4) / 2;
+
+	post_divider = dividers->post_div;
+	reference_divider = dividers->ref_div;
+
+	feedback_divider8 =
+		(8 * fyclk * reference_divider * post_divider) / reference_clock;
+
+	*clkf = feedback_divider8 / 8;
+	*clkfrac = feedback_divider8 % 8;
+}
+
+static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv)
+{
+	int ret = 0;
+
+	switch (postdiv) {
+	case 1:
+		*encoded_postdiv = 0;
+		break;
+	case 2:
+		*encoded_postdiv = 1;
+		break;
+	case 4:
+		*encoded_postdiv = 2;
+		break;
+	case 8:
+		*encoded_postdiv = 3;
+		break;
+	case 16:
+		*encoded_postdiv = 4;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
+{
+	if (clkf <= 0x10)
+		return 0x4B;
+	if (clkf <= 0x19)
+		return 0x5B;
+	if (clkf <= 0x21)
+		return 0x2B;
+	if (clkf <= 0x27)
+		return 0x6C;
+	if (clkf <= 0x31)
+		return 0x9D;
+	return 0xC6;
+}
+
+static int rv770_populate_mclk_value(struct radeon_device *rdev,
+				     u32 engine_clock, u32 memory_clock,
+				     RV7XX_SMC_MCLK_VALUE *mclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 encoded_reference_dividers[] = { 0, 16, 17, 20, 21 };
+	u32 mpll_ad_func_cntl =
+		pi->clk_regs.rv770.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl =
+		pi->clk_regs.rv770.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
+	u32 mclk_pwrmgt_cntl =
+		pi->clk_regs.rv770.mclk_pwrmgt_cntl;
+	u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
+	struct atom_clock_dividers dividers;
+	u32 reference_clock = rdev->clock.mpll.reference_freq;
+	u32 clkf, clkfrac;
+	u32 postdiv_yclk;
+	u32 ibias;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
+					     memory_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	if ((dividers.ref_div < 1) || (dividers.ref_div > 5))
+		return -EINVAL;
+
+	rv770_calculate_fractional_mpll_feedback_divider(memory_clock, reference_clock,
+							 pi->mem_gddr5,
+							 &dividers, &clkf, &clkfrac);
+
+	ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
+	if (ret)
+		return ret;
+
+	ibias = rv770_map_clkf_to_ibias(rdev, clkf);
+
+	mpll_ad_func_cntl &= ~(CLKR_MASK |
+			       YCLK_POST_DIV_MASK |
+			       CLKF_MASK |
+			       CLKFRAC_MASK |
+			       IBIAS_MASK);
+	mpll_ad_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
+	mpll_ad_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
+	mpll_ad_func_cntl |= CLKF(clkf);
+	mpll_ad_func_cntl |= CLKFRAC(clkfrac);
+	mpll_ad_func_cntl |= IBIAS(ibias);
+
+	if (dividers.vco_mode)
+		mpll_ad_func_cntl_2 |= VCO_MODE;
+	else
+		mpll_ad_func_cntl_2 &= ~VCO_MODE;
+
+	if (pi->mem_gddr5) {
+		rv770_calculate_fractional_mpll_feedback_divider(memory_clock,
+								 reference_clock,
+								 pi->mem_gddr5,
+								 &dividers, &clkf, &clkfrac);
+
+		ibias = rv770_map_clkf_to_ibias(rdev, clkf);
+
+		ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
+		if (ret)
+			return ret;
+
+		mpll_dq_func_cntl &= ~(CLKR_MASK |
+				       YCLK_POST_DIV_MASK |
+				       CLKF_MASK |
+				       CLKFRAC_MASK |
+				       IBIAS_MASK);
+		mpll_dq_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
+		mpll_dq_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
+		mpll_dq_func_cntl |= CLKF(clkf);
+		mpll_dq_func_cntl |= CLKFRAC(clkfrac);
+		mpll_dq_func_cntl |= IBIAS(ibias);
+
+		if (dividers.vco_mode)
+			mpll_dq_func_cntl_2 |= VCO_MODE;
+		else
+			mpll_dq_func_cntl_2 &= ~VCO_MODE;
+	}
+
+	mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
+	mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+	mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+	mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
+
+	return 0;
+}
+
+static int rv770_populate_sclk_value(struct radeon_device *rdev,
+				     u32 engine_clock,
+				     RV770_SMC_SCLK_VALUE *sclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl =
+		pi->clk_regs.rv770.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 =
+		pi->clk_regs.rv770.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 =
+		pi->clk_regs.rv770.cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum =
+		pi->clk_regs.rv770.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 =
+		pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
+	u64 tmp;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_divider, post_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+
+	if (dividers.enable_post_div)
+		post_divider = (0x0f & (dividers.post_div >> 4)) + (0x0f & dividers.post_div) + 2;
+	else
+		post_divider = 1;
+
+	tmp = (u64) engine_clock * reference_divider * post_divider * 16384;
+	do_div(tmp, reference_clock);
+	fbdiv = (u32) tmp;
+
+	if (dividers.enable_post_div)
+		spll_func_cntl |= SPLL_DIVEN;
+	else
+		spll_func_cntl &= ~SPLL_DIVEN;
+	spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK);
+	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+	spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf);
+	spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->sclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = engine_clock * post_divider;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLKS_MASK;
+			cg_spll_spread_spectrum |= CLKS(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLKV_MASK;
+			cg_spll_spread_spectrum_2 |= CLKV(clk_v);
+		}
+	}
+
+	sclk->sclk_value = cpu_to_be32(engine_clock);
+	sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+	sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+	sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+	sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
+	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
+
+	return 0;
+}
+
+int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc,
+			      RV770_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int i;
+
+	if (!pi->voltage_control) {
+		voltage->index = 0;
+		voltage->value = 0;
+		return 0;
+	}
+
+	for (i = 0; i < pi->valid_vddc_entries; i++) {
+		if (vddc <= pi->vddc_table[i].vddc) {
+			voltage->index = pi->vddc_table[i].vddc_index;
+			voltage->value = cpu_to_be16(vddc);
+			break;
+		}
+	}
+
+	if (i == pi->valid_vddc_entries)
+		return -EINVAL;
+
+	return 0;
+}
+
+int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
+			      RV770_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (!pi->mvdd_control) {
+		voltage->index = MVDD_HIGH_INDEX;
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+		return 0;
+	}
+
+	if (mclk <= pi->mvdd_split_frequency) {
+		voltage->index = MVDD_LOW_INDEX;
+		voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
+	} else {
+		voltage->index = MVDD_HIGH_INDEX;
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+	}
+
+	return 0;
+}
+
+static int rv770_convert_power_level_to_smc(struct radeon_device *rdev,
+					    struct rv7xx_pl *pl,
+					    RV770_SMC_HW_PERFORMANCE_LEVEL *level,
+					    u8 watermark_level)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int ret;
+
+	level->gen2PCIE = pi->pcie_gen2 ?
+		((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
+	level->gen2XSP  = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
+	level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
+	level->displayWatermark = watermark_level;
+
+	if (rdev->family == CHIP_RV740)
+		ret = rv740_populate_sclk_value(rdev, pl->sclk,
+						&level->sclk);
+	else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		ret = rv730_populate_sclk_value(rdev, pl->sclk,
+						&level->sclk);
+	else
+		ret = rv770_populate_sclk_value(rdev, pl->sclk,
+						&level->sclk);
+	if (ret)
+		return ret;
+
+	if (rdev->family == CHIP_RV740) {
+		if (pi->mem_gddr5) {
+			if (pl->mclk <= pi->mclk_strobe_mode_threshold)
+				level->strobeMode =
+					rv740_get_mclk_frequency_ratio(pl->mclk) | 0x10;
+			else
+				level->strobeMode = 0;
+
+			if (pl->mclk > pi->mclk_edc_enable_threshold)
+				level->mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
+			else
+				level->mcFlags =  0;
+		}
+		ret = rv740_populate_mclk_value(rdev, pl->sclk,
+						pl->mclk, &level->mclk);
+	} else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		ret = rv730_populate_mclk_value(rdev, pl->sclk,
+						pl->mclk, &level->mclk);
+	else
+		ret = rv770_populate_mclk_value(rdev, pl->sclk,
+						pl->mclk, &level->mclk);
+	if (ret)
+		return ret;
+
+	ret = rv770_populate_vddc_value(rdev, pl->vddc,
+					&level->vddc);
+	if (ret)
+		return ret;
+
+	ret = rv770_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
+
+	return ret;
+}
+
+static int rv770_convert_power_state_to_smc(struct radeon_device *rdev,
+					    struct radeon_ps *radeon_state,
+					    RV770_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+	int ret;
+
+	if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
+		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	ret = rv770_convert_power_level_to_smc(rdev,
+					       &state->low,
+					       &smc_state->levels[0],
+					       PPSMC_DISPLAY_WATERMARK_LOW);
+	if (ret)
+		return ret;
+
+	ret = rv770_convert_power_level_to_smc(rdev,
+					       &state->medium,
+					       &smc_state->levels[1],
+					       PPSMC_DISPLAY_WATERMARK_LOW);
+	if (ret)
+		return ret;
+
+	ret = rv770_convert_power_level_to_smc(rdev,
+					       &state->high,
+					       &smc_state->levels[2],
+					       PPSMC_DISPLAY_WATERMARK_HIGH);
+	if (ret)
+		return ret;
+
+	smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
+	smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
+	smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
+
+	smc_state->levels[0].seqValue = rv770_get_seq_value(rdev,
+							    &state->low);
+	smc_state->levels[1].seqValue = rv770_get_seq_value(rdev,
+							    &state->medium);
+	smc_state->levels[2].seqValue = rv770_get_seq_value(rdev,
+							    &state->high);
+
+	rv770_populate_smc_sp(rdev, radeon_state, smc_state);
+
+	return rv770_populate_smc_t(rdev, radeon_state, smc_state);
+
+}
+
+u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev,
+					u32 engine_clock)
+{
+	u32 dram_rows;
+	u32 dram_refresh_rate;
+	u32 mc_arb_rfsh_rate;
+	u32 tmp;
+
+	tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+	dram_rows = 1 << (tmp + 10);
+	tmp = RREG32(MC_SEQ_MISC0) & 3;
+	dram_refresh_rate = 1 << (tmp + 3);
+	mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+	return mc_arb_rfsh_rate;
+}
+
+static void rv770_program_memory_timing_parameters(struct radeon_device *rdev,
+						   struct radeon_ps *radeon_state)
+{
+	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 sqm_ratio;
+	u32 arb_refresh_rate;
+	u32 high_clock;
+
+	if (state->high.sclk < (state->low.sclk * 0xFF / 0x40))
+		high_clock = state->high.sclk;
+	else
+		high_clock = (state->low.sclk * 0xFF / 0x40);
+
+	radeon_atom_set_engine_dram_timings(rdev, high_clock,
+					    state->high.mclk);
+
+	sqm_ratio =
+		STATE0(64 * high_clock / pi->boot_sclk) |
+		STATE1(64 * high_clock / state->low.sclk) |
+		STATE2(64 * high_clock / state->medium.sclk) |
+		STATE3(64 * high_clock / state->high.sclk);
+	WREG32(MC_ARB_SQM_RATIO, sqm_ratio);
+
+	arb_refresh_rate =
+		POWERMODE0(rv770_calculate_memory_refresh_rate(rdev, pi->boot_sclk)) |
+		POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) |
+		POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) |
+		POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk));
+	WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate);
+}
+
+void rv770_enable_backbias(struct radeon_device *rdev,
+			   bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN, ~BACKBIAS_PAD_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN));
+}
+
+static void rv770_enable_spread_spectrum(struct radeon_device *rdev,
+					 bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (enable) {
+		if (pi->sclk_ss)
+			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+
+		if (pi->mclk_ss) {
+			if (rdev->family == CHIP_RV740)
+				rv740_enable_mclk_spread_spectrum(rdev, true);
+		}
+	} else {
+		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+
+		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+
+		WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+
+		if (rdev->family == CHIP_RV740)
+			rv740_enable_mclk_spread_spectrum(rdev, false);
+	}
+}
+
+static void rv770_program_mpll_timing_parameters(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if ((rdev->family == CHIP_RV770) && !pi->mem_gddr5) {
+		WREG32(MPLL_TIME,
+		       (MPLL_LOCK_TIME(R600_MPLLLOCKTIME_DFLT * pi->ref_div) |
+			MPLL_RESET_TIME(R600_MPLLRESETTIME_DFLT)));
+	}
+}
+
+void rv770_setup_bsp(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 xclk = radeon_get_xclk(rdev);
+
+	r600_calculate_u_and_p(pi->asi,
+			       xclk,
+			       16,
+			       &pi->bsp,
+			       &pi->bsu);
+
+	r600_calculate_u_and_p(pi->pasi,
+			       xclk,
+			       16,
+			       &pi->pbsp,
+			       &pi->pbsu);
+
+	pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+	WREG32(CG_BSP, pi->dsp);
+
+}
+
+void rv770_program_git(struct radeon_device *rdev)
+{
+	WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+void rv770_program_tp(struct radeon_device *rdev)
+{
+	int i;
+	enum r600_td td = R600_TD_DFLT;
+
+	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+		WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+	if (td == R600_TD_AUTO)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+	if (td == R600_TD_UP)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+	if (td == R600_TD_DOWN)
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+void rv770_program_tpp(struct radeon_device *rdev)
+{
+	WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+void rv770_program_sstp(struct radeon_device *rdev)
+{
+	WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+void rv770_program_engine_speed_parameters(struct radeon_device *rdev)
+{
+	WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
+}
+
+static void rv770_enable_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
+		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+void rv770_program_vc(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	WREG32(CG_FTV, pi->vrc);
+}
+
+void rv770_clear_vc(struct radeon_device *rdev)
+{
+	WREG32(CG_FTV, 0);
+}
+
+int rv770_upload_firmware(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int ret;
+
+	rv770_reset_smc(rdev);
+	rv770_stop_smc_clock(rdev);
+
+	ret = rv770_load_smc_ucode(rdev, pi->sram_end);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int rv770_populate_smc_acpi_state(struct radeon_device *rdev,
+					 RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	u32 mpll_ad_func_cntl =
+		pi->clk_regs.rv770.mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl =
+		pi->clk_regs.rv770.mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2 =
+		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
+	u32 spll_func_cntl =
+		pi->clk_regs.rv770.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 =
+		pi->clk_regs.rv770.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 =
+		pi->clk_regs.rv770.cg_spll_func_cntl_3;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+
+	table->ACPIState = table->initialState;
+
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		rv770_populate_vddc_value(rdev, pi->acpi_vddc,
+					  &table->ACPIState.levels[0].vddc);
+		if (pi->pcie_gen2) {
+			if (pi->acpi_pcie_gen2)
+				table->ACPIState.levels[0].gen2PCIE = 1;
+			else
+				table->ACPIState.levels[0].gen2PCIE = 0;
+		} else
+			table->ACPIState.levels[0].gen2PCIE = 0;
+		if (pi->acpi_pcie_gen2)
+			table->ACPIState.levels[0].gen2XSP = 1;
+		else
+			table->ACPIState.levels[0].gen2XSP = 0;
+	} else {
+		rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
+					  &table->ACPIState.levels[0].vddc);
+		table->ACPIState.levels[0].gen2PCIE = 0;
+	}
+
+
+	mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
+
+	mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
+
+	mclk_pwrmgt_cntl = (MRDCKA0_RESET |
+			    MRDCKA1_RESET |
+			    MRDCKB0_RESET |
+			    MRDCKB1_RESET |
+			    MRDCKC0_RESET |
+			    MRDCKC1_RESET |
+			    MRDCKD0_RESET |
+			    MRDCKD1_RESET);
+
+	dll_cntl = 0xff000000;
+
+	spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+
+	table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
+
+	table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+
+	table->ACPIState.levels[1] = table->ACPIState.levels[0];
+	table->ACPIState.levels[2] = table->ACPIState.levels[0];
+
+	return 0;
+}
+
+int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
+				      RV770_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) ==
+	     (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) {
+		voltage->index = MVDD_LOW_INDEX;
+		voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
+	} else {
+		voltage->index = MVDD_HIGH_INDEX;
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+	}
+
+	return 0;
+}
+
+static int rv770_populate_smc_initial_state(struct radeon_device *rdev,
+					    struct radeon_ps *radeon_state,
+					    RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 a_t;
+
+	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
+	table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
+	table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
+
+	table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
+	table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
+		cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
+
+	table->initialState.levels[0].mclk.mclk770.mclk_value =
+		cpu_to_be32(initial_state->low.mclk);
+
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
+
+	table->initialState.levels[0].sclk.sclk_value =
+		cpu_to_be32(initial_state->low.sclk);
+
+	table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
+
+	table->initialState.levels[0].seqValue =
+		rv770_get_seq_value(rdev, &initial_state->low);
+
+	rv770_populate_vddc_value(rdev,
+				  initial_state->low.vddc,
+				  &table->initialState.levels[0].vddc);
+	rv770_populate_initial_mvdd_value(rdev,
+					  &table->initialState.levels[0].mvdd);
+
+	a_t = CG_R(0xffff) | CG_L(0);
+	table->initialState.levels[0].aT = cpu_to_be32(a_t);
+
+	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+
+	if (pi->boot_in_gen2)
+		table->initialState.levels[0].gen2PCIE = 1;
+	else
+		table->initialState.levels[0].gen2PCIE = 0;
+	if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
+		table->initialState.levels[0].gen2XSP = 1;
+	else
+		table->initialState.levels[0].gen2XSP = 0;
+
+	if (rdev->family == CHIP_RV740) {
+		if (pi->mem_gddr5) {
+			if (initial_state->low.mclk <= pi->mclk_strobe_mode_threshold)
+				table->initialState.levels[0].strobeMode =
+					rv740_get_mclk_frequency_ratio(initial_state->low.mclk) | 0x10;
+			else
+				table->initialState.levels[0].strobeMode = 0;
+
+			if (initial_state->low.mclk >= pi->mclk_edc_enable_threshold)
+				table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
+			else
+				table->initialState.levels[0].mcFlags =  0;
+		}
+	}
+
+	table->initialState.levels[1] = table->initialState.levels[0];
+	table->initialState.levels[2] = table->initialState.levels[0];
+
+	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	return 0;
+}
+
+static int rv770_populate_smc_vddc_table(struct radeon_device *rdev,
+					 RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int i;
+
+	for (i = 0; i < pi->valid_vddc_entries; i++) {
+		table->highSMIO[pi->vddc_table[i].vddc_index] =
+			pi->vddc_table[i].high_smio;
+		table->lowSMIO[pi->vddc_table[i].vddc_index] =
+			cpu_to_be32(pi->vddc_table[i].low_smio);
+	}
+
+	table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
+	table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
+		cpu_to_be32(pi->vddc_mask_low);
+
+	for (i = 0;
+	     ((i < pi->valid_vddc_entries) &&
+	      (pi->max_vddc_in_table >
+	       pi->vddc_table[i].vddc));
+	     i++);
+
+	table->maxVDDCIndexInPPTable =
+		pi->vddc_table[i].vddc_index;
+
+	return 0;
+}
+
+static int rv770_populate_smc_mvdd_table(struct radeon_device *rdev,
+					 RV770_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (pi->mvdd_control) {
+		table->lowSMIO[MVDD_HIGH_INDEX] |=
+			cpu_to_be32(pi->mvdd_low_smio[MVDD_HIGH_INDEX]);
+		table->lowSMIO[MVDD_LOW_INDEX] |=
+			cpu_to_be32(pi->mvdd_low_smio[MVDD_LOW_INDEX]);
+
+		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0;
+		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] =
+			cpu_to_be32(pi->mvdd_mask_low);
+	}
+
+	return 0;
+}
+
+static int rv770_init_smc_table(struct radeon_device *rdev,
+				struct radeon_ps *radeon_boot_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
+	RV770_SMC_STATETABLE *table = &pi->smc_statetable;
+	int ret;
+
+	memset(table, 0, sizeof(RV770_SMC_STATETABLE));
+
+	pi->boot_sclk = boot_state->low.sclk;
+
+	rv770_populate_smc_vddc_table(rdev, table);
+	rv770_populate_smc_mvdd_table(rdev, table);
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_RV770:
+	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+		break;
+	case THERMAL_TYPE_NONE:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+		break;
+	case THERMAL_TYPE_EXTERNAL_GPIO:
+	default:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+		break;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) {
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT)
+			table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK;
+
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT)
+			table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (pi->mem_gddr5)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table);
+	else
+		ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table);
+	if (ret)
+		return ret;
+
+	if (rdev->family == CHIP_RV740)
+		ret = rv740_populate_smc_acpi_state(rdev, table);
+	else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		ret = rv730_populate_smc_acpi_state(rdev, table);
+	else
+		ret = rv770_populate_smc_acpi_state(rdev, table);
+	if (ret)
+		return ret;
+
+	table->driverState = table->initialState;
+
+	return rv770_copy_bytes_to_smc(rdev,
+				       pi->state_table_start,
+				       (const u8 *)table,
+				       sizeof(RV770_SMC_STATETABLE),
+				       pi->sram_end);
+}
+
+static int rv770_construct_vddc_table(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u16 min, max, step;
+	u32 steps = 0;
+	u8 vddc_index = 0;
+	u32 i;
+
+	radeon_atom_get_min_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &min);
+	radeon_atom_get_max_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &max);
+	radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &step);
+
+	steps = (max - min) / step + 1;
+
+	if (steps > MAX_NO_VREG_STEPS)
+		return -EINVAL;
+
+	for (i = 0; i < steps; i++) {
+		u32 gpio_pins, gpio_mask;
+
+		pi->vddc_table[i].vddc = (u16)(min + i * step);
+		radeon_atom_get_voltage_gpio_settings(rdev,
+						      pi->vddc_table[i].vddc,
+						      SET_VOLTAGE_TYPE_ASIC_VDDC,
+						      &gpio_pins, &gpio_mask);
+		pi->vddc_table[i].low_smio = gpio_pins & gpio_mask;
+		pi->vddc_table[i].high_smio = 0;
+		pi->vddc_mask_low = gpio_mask;
+		if (i > 0) {
+			if ((pi->vddc_table[i].low_smio !=
+			     pi->vddc_table[i - 1].low_smio ) ||
+			     (pi->vddc_table[i].high_smio !=
+			      pi->vddc_table[i - 1].high_smio))
+				vddc_index++;
+		}
+		pi->vddc_table[i].vddc_index = vddc_index;
+	}
+
+	pi->valid_vddc_entries = (u8)steps;
+
+	return 0;
+}
+
+static u32 rv770_get_mclk_split_point(struct atom_memory_info *memory_info)
+{
+	if (memory_info->mem_type == MEM_TYPE_GDDR3)
+		return 30000;
+
+	return 0;
+}
+
+static int rv770_get_mvdd_pin_configuration(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 gpio_pins, gpio_mask;
+
+	radeon_atom_get_voltage_gpio_settings(rdev,
+					      MVDD_HIGH_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+					      &gpio_pins, &gpio_mask);
+	pi->mvdd_mask_low = gpio_mask;
+	pi->mvdd_low_smio[MVDD_HIGH_INDEX] =
+		gpio_pins & gpio_mask;
+
+	radeon_atom_get_voltage_gpio_settings(rdev,
+					      MVDD_LOW_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+					      &gpio_pins, &gpio_mask);
+	pi->mvdd_low_smio[MVDD_LOW_INDEX] =
+		gpio_pins & gpio_mask;
+
+	return 0;
+}
+
+u8 rv770_get_memory_module_index(struct radeon_device *rdev)
+{
+	return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+static int rv770_get_mvdd_configuration(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 memory_module_index;
+	struct atom_memory_info memory_info;
+
+	memory_module_index = rv770_get_memory_module_index(rdev);
+
+	if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) {
+		pi->mvdd_control = false;
+		return 0;
+	}
+
+	pi->mvdd_split_frequency =
+		rv770_get_mclk_split_point(&memory_info);
+
+	if (pi->mvdd_split_frequency == 0) {
+		pi->mvdd_control = false;
+		return 0;
+	}
+
+	return rv770_get_mvdd_pin_configuration(rdev);
+}
+
+void rv770_enable_voltage_control(struct radeon_device *rdev,
+				  bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static void rv770_program_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+	if (rdev->pm.dpm.new_active_crtcs & 1) {
+		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
+		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+	} else if (rdev->pm.dpm.new_active_crtcs & 2) {
+		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
+	} else {
+		tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+		tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
+	}
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void rv770_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
+					   bool enable)
+{
+	rv770_enable_bif_dynamic_pcie_gen2(rdev, enable);
+
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+}
+
+static void r7xx_program_memory_timing_parameters(struct radeon_device *rdev,
+						  struct radeon_ps *radeon_new_state)
+{
+	if ((rdev->family == CHIP_RV730) ||
+	    (rdev->family == CHIP_RV710) ||
+	    (rdev->family == CHIP_RV740))
+		rv730_program_memory_timing_parameters(rdev, radeon_new_state);
+	else
+		rv770_program_memory_timing_parameters(rdev, radeon_new_state);
+}
+
+static int rv770_upload_sw_state(struct radeon_device *rdev,
+				 struct radeon_ps *radeon_new_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u16 address = pi->state_table_start +
+		offsetof(RV770_SMC_STATETABLE, driverState);
+	RV770_SMC_SWSTATE state = { 0 };
+	int ret;
+
+	ret = rv770_convert_power_state_to_smc(rdev, radeon_new_state, &state);
+	if (ret)
+		return ret;
+
+	return rv770_copy_bytes_to_smc(rdev, address, (const u8 *)&state,
+				       sizeof(RV770_SMC_SWSTATE),
+				       pi->sram_end);
+}
+
+int rv770_halt_smc(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	if (rv770_wait_for_smc_inactive(rdev) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+int rv770_resume_smc(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Resume) != PPSMC_Result_OK)
+		return -EINVAL;
+	return 0;
+}
+
+int rv770_set_sw_state(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
+		DRM_DEBUG("rv770_set_sw_state failed\n");
+	return 0;
+}
+
+int rv770_set_boot_state(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) != PPSMC_Result_OK)
+		return -EINVAL;
+	return 0;
+}
+
+void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+					      struct radeon_ps *new_ps,
+					      struct radeon_ps *old_ps)
+{
+	struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
+	struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->high.sclk >= current_state->high.sclk)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+					     struct radeon_ps *new_ps,
+					     struct radeon_ps *old_ps)
+{
+	struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
+	struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->high.sclk < current_state->high.sclk)
+		return;
+
+	radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
+}
+
+int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev)
+{
+	if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled)) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+int rv770_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level)
+{
+	PPSMC_Msg msg;
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ZeroLevelsDisabled) != PPSMC_Result_OK)
+			return -EINVAL;
+		msg = PPSMC_MSG_ForceHigh;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+			return -EINVAL;
+		msg = (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled);
+	} else {
+		if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+			return -EINVAL;
+		msg = (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled);
+	}
+
+	if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+void r7xx_start_smc(struct radeon_device *rdev)
+{
+	rv770_start_smc(rdev);
+	rv770_start_smc_clock(rdev);
+}
+
+
+void r7xx_stop_smc(struct radeon_device *rdev)
+{
+	rv770_reset_smc(rdev);
+	rv770_stop_smc_clock(rdev);
+}
+
+static void rv770_read_clock_registers(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	pi->clk_regs.rv770.cg_spll_func_cntl =
+		RREG32(CG_SPLL_FUNC_CNTL);
+	pi->clk_regs.rv770.cg_spll_func_cntl_2 =
+		RREG32(CG_SPLL_FUNC_CNTL_2);
+	pi->clk_regs.rv770.cg_spll_func_cntl_3 =
+		RREG32(CG_SPLL_FUNC_CNTL_3);
+	pi->clk_regs.rv770.cg_spll_spread_spectrum =
+		RREG32(CG_SPLL_SPREAD_SPECTRUM);
+	pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
+		RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+	pi->clk_regs.rv770.mpll_ad_func_cntl =
+		RREG32(MPLL_AD_FUNC_CNTL);
+	pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
+		RREG32(MPLL_AD_FUNC_CNTL_2);
+	pi->clk_regs.rv770.mpll_dq_func_cntl =
+		RREG32(MPLL_DQ_FUNC_CNTL);
+	pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
+		RREG32(MPLL_DQ_FUNC_CNTL_2);
+	pi->clk_regs.rv770.mclk_pwrmgt_cntl =
+		RREG32(MCLK_PWRMGT_CNTL);
+	pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
+}
+
+static void r7xx_read_clock_registers(struct radeon_device *rdev)
+{
+	if (rdev->family == CHIP_RV740)
+		rv740_read_clock_registers(rdev);
+	else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		rv730_read_clock_registers(rdev);
+	else
+		rv770_read_clock_registers(rdev);
+}
+
+void rv770_read_voltage_smio_registers(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	pi->s0_vid_lower_smio_cntl =
+		RREG32(S0_VID_LOWER_SMIO_CNTL);
+}
+
+void rv770_reset_smio_status(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 sw_smio_index, vid_smio_cntl;
+
+	sw_smio_index =
+		(RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
+	switch (sw_smio_index) {
+	case 3:
+		vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
+		break;
+	case 2:
+		vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
+		break;
+	case 1:
+		vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
+		break;
+	case 0:
+		return;
+	default:
+		vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
+		break;
+	}
+
+	WREG32(S0_VID_LOWER_SMIO_CNTL, vid_smio_cntl);
+	WREG32_P(GENERAL_PWRMGT, SW_SMIO_INDEX(0), ~SW_SMIO_INDEX_MASK);
+}
+
+void rv770_get_memory_type(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp;
+
+	tmp = RREG32(MC_SEQ_MISC0);
+
+	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
+	    MC_SEQ_MISC0_GDDR5_VALUE)
+		pi->mem_gddr5 = true;
+	else
+		pi->mem_gddr5 = false;
+
+}
+
+void rv770_get_pcie_gen2_status(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp;
+
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+
+	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+		pi->pcie_gen2 = true;
+	else
+		pi->pcie_gen2 = false;
+
+	if (pi->pcie_gen2) {
+		if (tmp & LC_CURRENT_DATA_RATE)
+			pi->boot_in_gen2 = true;
+		else
+			pi->boot_in_gen2 = false;
+	} else
+		pi->boot_in_gen2 = false;
+}
+
+#if 0
+static int rv770_enter_ulp_state(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (pi->gfx_clock_gating) {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+		RREG32(GB_TILING_CONFIG);
+	}
+
+	WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
+		 ~HOST_SMC_MSG_MASK);
+
+	udelay(7000);
+
+	return 0;
+}
+
+static int rv770_exit_ulp_state(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int i;
+
+	WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_ResumeFromMinimumPower),
+		 ~HOST_SMC_MSG_MASK);
+
+	udelay(7000);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (((RREG32(SMC_MSG) & HOST_SMC_RESP_MASK) >> HOST_SMC_RESP_SHIFT) == 1)
+			break;
+		udelay(1000);
+	}
+
+	if (pi->gfx_clock_gating)
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
+
+	return 0;
+}
+#endif
+
+static void rv770_get_mclk_odt_threshold(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 memory_module_index;
+	struct atom_memory_info memory_info;
+
+	pi->mclk_odt_threshold = 0;
+
+	if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) {
+		memory_module_index = rv770_get_memory_module_index(rdev);
+
+		if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info))
+			return;
+
+		if (memory_info.mem_type == MEM_TYPE_DDR2 ||
+		    memory_info.mem_type == MEM_TYPE_DDR3)
+			pi->mclk_odt_threshold = 30000;
+	}
+}
+
+void rv770_get_max_vddc(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u16 vddc;
+
+	if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc))
+		pi->max_vddc = 0;
+	else
+		pi->max_vddc = vddc;
+}
+
+void rv770_program_response_times(struct radeon_device *rdev)
+{
+	u32 voltage_response_time, backbias_response_time;
+	u32 acpi_delay_time, vbi_time_out;
+	u32 vddc_dly, bb_dly, acpi_dly, vbi_dly;
+	u32 reference_clock;
+
+	voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
+	backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
+
+	if (voltage_response_time == 0)
+		voltage_response_time = 1000;
+
+	if (backbias_response_time == 0)
+		backbias_response_time = 1000;
+
+	acpi_delay_time = 15000;
+	vbi_time_out = 100000;
+
+	reference_clock = radeon_get_xclk(rdev);
+
+	vddc_dly = (voltage_response_time  * reference_clock) / 1600;
+	bb_dly = (backbias_response_time * reference_clock) / 1600;
+	acpi_dly = (acpi_delay_time * reference_clock) / 1600;
+	vbi_dly = (vbi_time_out * reference_clock) / 1600;
+
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
+	rv770_write_smc_soft_register(rdev,
+				      RV770_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+#if 0
+	/* XXX look up hw revision */
+	if (WEKIVA_A21)
+		rv770_write_smc_soft_register(rdev,
+					      RV770_SMC_SOFT_REGISTER_baby_step_timer,
+					      0x10);
+#endif
+}
+
+static void rv770_program_dcodt_before_state_switch(struct radeon_device *rdev,
+						    struct radeon_ps *radeon_new_state,
+						    struct radeon_ps *radeon_current_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
+	struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
+	bool current_use_dc = false;
+	bool new_use_dc = false;
+
+	if (pi->mclk_odt_threshold == 0)
+		return;
+
+	if (current_state->high.mclk <= pi->mclk_odt_threshold)
+		current_use_dc = true;
+
+	if (new_state->high.mclk <= pi->mclk_odt_threshold)
+		new_use_dc = true;
+
+	if (current_use_dc == new_use_dc)
+		return;
+
+	if (!current_use_dc && new_use_dc)
+		return;
+
+	if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		rv730_program_dcodt(rdev, new_use_dc);
+}
+
+static void rv770_program_dcodt_after_state_switch(struct radeon_device *rdev,
+						   struct radeon_ps *radeon_new_state,
+						   struct radeon_ps *radeon_current_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
+	struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
+	bool current_use_dc = false;
+	bool new_use_dc = false;
+
+	if (pi->mclk_odt_threshold == 0)
+		return;
+
+	if (current_state->high.mclk <= pi->mclk_odt_threshold)
+		current_use_dc = true;
+
+	if (new_state->high.mclk <= pi->mclk_odt_threshold)
+		new_use_dc = true;
+
+	if (current_use_dc == new_use_dc)
+		return;
+
+	if (current_use_dc && !new_use_dc)
+		return;
+
+	if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		rv730_program_dcodt(rdev, new_use_dc);
+}
+
+static void rv770_retrieve_odt_values(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (pi->mclk_odt_threshold == 0)
+		return;
+
+	if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		rv730_get_odt_values(rdev);
+}
+
+static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	bool want_thermal_protection;
+	enum radeon_dpm_event_src dpm_event_src;
+
+	switch (sources) {
+	case 0:
+	default:
+		want_thermal_protection = false;
+		break;
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
+		break;
+
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
+		break;
+
+	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+		break;
+	}
+
+	if (want_thermal_protection) {
+		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+		if (pi->thermal_protection)
+			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	} else {
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+	}
+}
+
+void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
+				       enum radeon_dpm_auto_throttle_src source,
+				       bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (enable) {
+		if (!(pi->active_auto_throttle_sources & (1 << source))) {
+			pi->active_auto_throttle_sources |= 1 << source;
+			rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	} else {
+		if (pi->active_auto_throttle_sources & (1 << source)) {
+			pi->active_auto_throttle_sources &= ~(1 << source);
+			rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	}
+}
+
+static int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
+					       int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+	rdev->pm.dpm.thermal.min_temp = low_temp;
+	rdev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+int rv770_dpm_enable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+	int ret;
+
+	if (pi->gfx_clock_gating)
+		rv770_restore_cgcg(rdev);
+
+	if (rv770_dpm_enabled(rdev))
+		return -EINVAL;
+
+	if (pi->voltage_control) {
+		rv770_enable_voltage_control(rdev, true);
+		ret = rv770_construct_vddc_table(rdev);
+		if (ret) {
+			DRM_ERROR("rv770_construct_vddc_table failed\n");
+			return ret;
+		}
+	}
+
+	if (pi->dcodt)
+		rv770_retrieve_odt_values(rdev);
+
+	if (pi->mvdd_control) {
+		ret = rv770_get_mvdd_configuration(rdev);
+		if (ret) {
+			DRM_ERROR("rv770_get_mvdd_configuration failed\n");
+			return ret;
+		}
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
+		rv770_enable_backbias(rdev, true);
+
+	rv770_enable_spread_spectrum(rdev, true);
+
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, true);
+
+	rv770_program_mpll_timing_parameters(rdev);
+	rv770_setup_bsp(rdev);
+	rv770_program_git(rdev);
+	rv770_program_tp(rdev);
+	rv770_program_tpp(rdev);
+	rv770_program_sstp(rdev);
+	rv770_program_engine_speed_parameters(rdev);
+	rv770_enable_display_gap(rdev);
+	rv770_program_vc(rdev);
+
+	if (pi->dynamic_pcie_gen2)
+		rv770_enable_dynamic_pcie_gen2(rdev, true);
+
+	ret = rv770_upload_firmware(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_upload_firmware failed\n");
+		return ret;
+	}
+	ret = rv770_init_smc_table(rdev, boot_ps);
+	if (ret) {
+		DRM_ERROR("rv770_init_smc_table failed\n");
+		return ret;
+	}
+
+	rv770_program_response_times(rdev);
+	r7xx_start_smc(rdev);
+
+	if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		rv730_start_dpm(rdev);
+	else
+		rv770_start_dpm(rdev);
+
+	if (pi->gfx_clock_gating)
+		rv770_gfx_clock_gating_enable(rdev, true);
+
+	if (pi->mg_clock_gating)
+		rv770_mg_clock_gating_enable(rdev, true);
+
+	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	return 0;
+}
+
+int rv770_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		PPSMC_Result result;
+
+		ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+		if (ret)
+			return ret;
+		rdev->irq.dpm_thermal = true;
+		radeon_irq_set(rdev);
+		result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+
+		if (result != PPSMC_Result_OK)
+			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+	}
+
+	return 0;
+}
+
+void rv770_dpm_disable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (!rv770_dpm_enabled(rdev))
+		return;
+
+	rv770_clear_vc(rdev);
+
+	if (pi->thermal_protection)
+		rv770_enable_thermal_protection(rdev, false);
+
+	rv770_enable_spread_spectrum(rdev, false);
+
+	if (pi->dynamic_pcie_gen2)
+		rv770_enable_dynamic_pcie_gen2(rdev, false);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+
+	if (pi->gfx_clock_gating)
+		rv770_gfx_clock_gating_enable(rdev, false);
+
+	if (pi->mg_clock_gating)
+		rv770_mg_clock_gating_enable(rdev, false);
+
+	if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
+		rv730_stop_dpm(rdev);
+	else
+		rv770_stop_dpm(rdev);
+
+	r7xx_stop_smc(rdev);
+	rv770_reset_smio_status(rdev);
+}
+
+int rv770_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
+	struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
+	int ret;
+
+	ret = rv770_restrict_performance_levels_before_switch(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
+		return ret;
+	}
+	rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+	ret = rv770_halt_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_halt_smc failed\n");
+		return ret;
+	}
+	ret = rv770_upload_sw_state(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("rv770_upload_sw_state failed\n");
+		return ret;
+	}
+	r7xx_program_memory_timing_parameters(rdev, new_ps);
+	if (pi->dcodt)
+		rv770_program_dcodt_before_state_switch(rdev, new_ps, old_ps);
+	ret = rv770_resume_smc(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_resume_smc failed\n");
+		return ret;
+	}
+	ret = rv770_set_sw_state(rdev);
+	if (ret) {
+		DRM_ERROR("rv770_set_sw_state failed\n");
+		return ret;
+	}
+	if (pi->dcodt)
+		rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
+	rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+
+	return 0;
+}
+
+#if 0
+void rv770_dpm_reset_asic(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+	rv770_restrict_performance_levels_before_switch(rdev);
+	if (pi->dcodt)
+		rv770_program_dcodt_before_state_switch(rdev, boot_ps, boot_ps);
+	rv770_set_boot_state(rdev);
+	if (pi->dcodt)
+		rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps);
+}
+#endif
+
+void rv770_dpm_setup_asic(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	r7xx_read_clock_registers(rdev);
+	rv770_read_voltage_smio_registers(rdev);
+	rv770_get_memory_type(rdev);
+	if (pi->dcodt)
+		rv770_get_mclk_odt_threshold(rdev);
+	rv770_get_pcie_gen2_status(rdev);
+
+	rv770_enable_acpi_pm(rdev);
+
+	if (radeon_aspm != 0) {
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
+			rv770_enable_l0s(rdev);
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
+			rv770_enable_l1(rdev);
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
+			rv770_enable_pll_sleep_in_l1(rdev);
+	}
+}
+
+void rv770_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+	rv770_program_display_gap(rdev);
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					     struct radeon_ps *rps,
+					     struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					     u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (r600_is_uvd_state(rps->class, rps->class2)) {
+		if ((rps->vclk == 0) || (rps->dclk == 0)) {
+			rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+			rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+		}
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		rdev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
+					 struct radeon_ps *rps, int index,
+					 union pplib_clock_info *clock_info)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	u32 sclk, mclk;
+	struct rv7xx_pl *pl;
+
+	switch (index) {
+	case 0:
+		pl = &ps->low;
+		break;
+	case 1:
+		pl = &ps->medium;
+		break;
+	case 2:
+	default:
+		pl = &ps->high;
+		break;
+	}
+
+	if (rdev->family >= CHIP_CEDAR) {
+		sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+		sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+		mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+
+		pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
+		pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
+		pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
+	} else {
+		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+		sclk |= clock_info->r600.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+		mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+
+		pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
+		pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
+	}
+
+	pl->mclk = mclk;
+	pl->sclk = sclk;
+
+	/* patch up vddc if necessary */
+	if (pl->vddc == 0xff01) {
+		if (pi->max_vddc)
+			pl->vddc = pi->max_vddc;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+		pi->acpi_vddc = pl->vddc;
+		if (rdev->family >= CHIP_CEDAR)
+			eg_pi->acpi_vddci = pl->vddci;
+		if (ps->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
+			pi->acpi_pcie_gen2 = true;
+		else
+			pi->acpi_pcie_gen2 = false;
+	}
+
+	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+		if (rdev->family >= CHIP_BARTS) {
+			eg_pi->ulv.supported = true;
+			eg_pi->ulv.pl = pl;
+		}
+	}
+
+	if (pi->min_vddc_in_table > pl->vddc)
+		pi->min_vddc_in_table = pl->vddc;
+
+	if (pi->max_vddc_in_table < pl->vddc)
+		pi->max_vddc_in_table = pl->vddc;
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		u16 vddc, vddci, mvdd;
+		radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
+		pl->mclk = rdev->clock.default_mclk;
+		pl->sclk = rdev->clock.default_sclk;
+		pl->vddc = vddc;
+		pl->vddci = vddci;
+	}
+
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+	}
+}
+
+int rv7xx_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	union pplib_clock_info *clock_info;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	struct rv7xx_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		if (power_info->pplib.ucStateEntrySize - 1) {
+			u8 *idx;
+			ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
+			if (ps == NULL) {
+				kfree(rdev->pm.dpm.ps);
+				return -ENOMEM;
+			}
+			rdev->pm.dpm.ps[i].ps_priv = ps;
+			rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+							 non_clock_info,
+							 power_info->pplib.ucNonClockSize);
+			idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
+			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+				clock_info = (union pplib_clock_info *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+					 (idx[j] * power_info->pplib.ucClockInfoSize));
+				rv7xx_parse_pplib_clock_info(rdev,
+							     &rdev->pm.dpm.ps[i], j,
+							     clock_info);
+			}
+		}
+	}
+	rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
+	return 0;
+}
+
+void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct radeon_atom_ss ss;
+
+	pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+						       ASIC_INTERNAL_ENGINE_SS, 0);
+	pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+						       ASIC_INTERNAL_MEMORY_SS, 0);
+
+	if (pi->sclk_ss || pi->mclk_ss)
+		pi->dynamic_ss = true;
+	else
+		pi->dynamic_ss = false;
+}
+
+int rv770_dpm_init(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi;
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	pi = kzalloc(sizeof(struct rv7xx_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = pi;
+
+	rv770_get_max_vddc(rdev);
+
+	pi->acpi_vddc = 0;
+	pi->min_vddc_in_table = 0;
+	pi->max_vddc_in_table = 0;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = rv7xx_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	if (rdev->pm.dpm.voltage_response_time == 0)
+		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (rdev->pm.dpm.backbias_response_time == 0)
+		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->ref_div = dividers.ref_div + 1;
+	else
+		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	pi->mclk_strobe_mode_threshold = 30000;
+	pi->mclk_edc_enable_threshold = 30000;
+
+	pi->rlp = RV770_RLP_DFLT;
+	pi->rmp = RV770_RMP_DFLT;
+	pi->lhp = RV770_LHP_DFLT;
+	pi->lmp = RV770_LMP_DFLT;
+
+	pi->voltage_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
+
+	pi->mvdd_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
+
+	rv770_get_engine_memory_ss(rdev);
+
+	pi->asi = RV770_ASI_DFLT;
+	pi->pasi = RV770_HASI_DFLT;
+	pi->vrc = RV770_VRC_DFLT;
+
+	pi->power_gating = false;
+
+	pi->gfx_clock_gating = true;
+
+	pi->mg_clock_gating = true;
+	pi->mgcgtssm = true;
+
+	pi->dynamic_pcie_gen2 = true;
+
+	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	pi->display_gap = true;
+
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		pi->dcodt = true;
+	else
+		pi->dcodt = false;
+
+	pi->ulps = true;
+
+	pi->mclk_stutter_mode_threshold = 0;
+
+	pi->sram_end = SMC_RAM_END;
+	pi->state_table_start = RV770_SMC_TABLE_ADDRESS;
+	pi->soft_regs_start = RV770_SMC_SOFT_REGISTERS_START;
+
+	return 0;
+}
+
+void rv770_dpm_print_power_state(struct radeon_device *rdev,
+				 struct radeon_ps *rps)
+{
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	if (rdev->family >= CHIP_CEDAR) {
+		pl = &ps->low;
+		printk("\t\tpower level 0    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+		       pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+		pl = &ps->medium;
+		printk("\t\tpower level 1    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+		       pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+		pl = &ps->high;
+		printk("\t\tpower level 2    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+		       pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+	} else {
+		pl = &ps->low;
+		printk("\t\tpower level 0    sclk: %u mclk: %u vddc: %u\n",
+		       pl->sclk, pl->mclk, pl->vddc);
+		pl = &ps->medium;
+		printk("\t\tpower level 1    sclk: %u mclk: %u vddc: %u\n",
+		       pl->sclk, pl->mclk, pl->vddc);
+		pl = &ps->high;
+		printk("\t\tpower level 2    sclk: %u mclk: %u vddc: %u\n",
+		       pl->sclk, pl->mclk, pl->vddc);
+	}
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						       struct seq_file *m)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		if (rdev->family >= CHIP_CEDAR) {
+			seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+				   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+		} else {
+			seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u\n",
+				   current_index, pl->sclk, pl->mclk, pl->vddc);
+		}
+	}
+}
+
+u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		return 0;
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		return  pl->sclk;
+	}
+}
+
+u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		return 0;
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		return  pl->mclk;
+	}
+}
+
+void rv770_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+}
+
+u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
+
+	if (low)
+		return requested_state->low.sclk;
+	else
+		return requested_state->high.sclk;
+}
+
+u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
+
+	if (low)
+		return requested_state->low.mclk;
+	else
+		return requested_state->high.mclk;
+}
+
+bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+	u32 switch_limit = 200; /* 300 */
+
+	/* RV770 */
+	/* mclk switching doesn't seem to work reliably on desktop RV770s */
+	if ((rdev->family == CHIP_RV770) &&
+	    !(rdev->flags & RADEON_IS_MOBILITY))
+		switch_limit = 0xffffffff; /* disable mclk switching */
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
new file mode 100644
index 0000000..d12beab
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RV770_DPM_H__
+#define __RV770_DPM_H__
+
+#include "rv770_smc.h"
+
+struct rv770_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct rv730_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl2;
+	u32 mpll_func_cntl3;
+	u32 mpll_ss;
+	u32 mpll_ss2;
+};
+
+union r7xx_clock_registers {
+	struct rv770_clock_registers rv770;
+	struct rv730_clock_registers rv730;
+};
+
+struct vddc_table_entry {
+	u16 vddc;
+	u8 vddc_index;
+	u8 high_smio;
+	u32 low_smio;
+};
+
+#define MAX_NO_OF_MVDD_VALUES 2
+#define MAX_NO_VREG_STEPS 32
+
+struct rv7xx_power_info {
+	/* flags */
+	bool mem_gddr5;
+	bool pcie_gen2;
+	bool dynamic_pcie_gen2;
+	bool acpi_pcie_gen2;
+	bool boot_in_gen2;
+	bool voltage_control; /* vddc */
+	bool mvdd_control;
+	bool sclk_ss;
+	bool mclk_ss;
+	bool dynamic_ss;
+	bool gfx_clock_gating;
+	bool mg_clock_gating;
+	bool mgcgtssm;
+	bool power_gating;
+	bool thermal_protection;
+	bool display_gap;
+	bool dcodt;
+	bool ulps;
+	/* registers */
+	union r7xx_clock_registers clk_regs;
+	u32 s0_vid_lower_smio_cntl;
+	/* voltage */
+	u32 vddc_mask_low;
+	u32 mvdd_mask_low;
+	u32 mvdd_split_frequency;
+	u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
+	u16 max_vddc;
+	u16 max_vddc_in_table;
+	u16 min_vddc_in_table;
+	struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
+	u8 valid_vddc_entries;
+	/* dc odt */
+	u32 mclk_odt_threshold;
+	u8 odt_value_0[2];
+	u8 odt_value_1[2];
+	/* stored values */
+	u32 boot_sclk;
+	u16 acpi_vddc;
+	u32 ref_div;
+	u32 active_auto_throttle_sources;
+	u32 mclk_stutter_mode_threshold;
+	u32 mclk_strobe_mode_threshold;
+	u32 mclk_edc_enable_threshold;
+	u32 bsp;
+	u32 bsu;
+	u32 pbsp;
+	u32 pbsu;
+	u32 dsp;
+	u32 psp;
+	u32 asi;
+	u32 pasi;
+	u32 vrc;
+	u32 restricted_levels;
+	u32 rlp;
+	u32 rmp;
+	u32 lhp;
+	u32 lmp;
+	/* smc offsets */
+	u16 state_table_start;
+	u16 soft_regs_start;
+	u16 sram_end;
+	/* scratch structs */
+	RV770_SMC_STATETABLE smc_statetable;
+};
+
+struct rv7xx_pl {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u16 vddci; /* eg+ only */
+	u32 flags;
+	enum radeon_pcie_gen pcie_gen; /* si+ only */
+};
+
+struct rv7xx_ps {
+	struct rv7xx_pl high;
+	struct rv7xx_pl medium;
+	struct rv7xx_pl low;
+	bool dc_compatible;
+};
+
+#define RV770_RLP_DFLT                                10
+#define RV770_RMP_DFLT                                25
+#define RV770_LHP_DFLT                                25
+#define RV770_LMP_DFLT                                10
+#define RV770_VRC_DFLT                                0x003f
+#define RV770_ASI_DFLT                                1000
+#define RV770_HASI_DFLT                               200000
+#define RV770_MGCGTTLOCAL0_DFLT                       0x00100000
+#define RV7XX_MGCGTTLOCAL0_DFLT                       0
+#define RV770_MGCGTTLOCAL1_DFLT                       0xFFFF0000
+#define RV770_MGCGCGTSSMCTRL_DFLT                     0x55940000
+
+#define MVDD_LOW_INDEX  0
+#define MVDD_HIGH_INDEX 1
+
+#define MVDD_LOW_VALUE  0
+#define MVDD_HIGH_VALUE 0xffff
+
+#define RV770_DEFAULT_VCLK_FREQ  53300 /* 10 khz */
+#define RV770_DEFAULT_DCLK_FREQ  40000 /* 10 khz */
+
+/* rv730/rv710 */
+int rv730_populate_sclk_value(struct radeon_device *rdev,
+			      u32 engine_clock,
+			      RV770_SMC_SCLK_VALUE *sclk);
+int rv730_populate_mclk_value(struct radeon_device *rdev,
+			      u32 engine_clock, u32 memory_clock,
+			      LPRV7XX_SMC_MCLK_VALUE mclk);
+void rv730_read_clock_registers(struct radeon_device *rdev);
+int rv730_populate_smc_acpi_state(struct radeon_device *rdev,
+				  RV770_SMC_STATETABLE *table);
+int rv730_populate_smc_initial_state(struct radeon_device *rdev,
+				     struct radeon_ps *radeon_initial_state,
+				     RV770_SMC_STATETABLE *table);
+void rv730_program_memory_timing_parameters(struct radeon_device *rdev,
+					    struct radeon_ps *radeon_state);
+void rv730_power_gating_enable(struct radeon_device *rdev,
+			       bool enable);
+void rv730_start_dpm(struct radeon_device *rdev);
+void rv730_stop_dpm(struct radeon_device *rdev);
+void rv730_program_dcodt(struct radeon_device *rdev, bool use_dcodt);
+void rv730_get_odt_values(struct radeon_device *rdev);
+
+/* rv740 */
+int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock,
+			      RV770_SMC_SCLK_VALUE *sclk);
+int rv740_populate_mclk_value(struct radeon_device *rdev,
+			      u32 engine_clock, u32 memory_clock,
+			      RV7XX_SMC_MCLK_VALUE *mclk);
+void rv740_read_clock_registers(struct radeon_device *rdev);
+int rv740_populate_smc_acpi_state(struct radeon_device *rdev,
+				  RV770_SMC_STATETABLE *table);
+void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev,
+				       bool enable);
+u8 rv740_get_mclk_frequency_ratio(u32 memory_clock);
+u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock);
+u32 rv740_get_decoded_reference_divider(u32 encoded_ref);
+
+/* rv770 */
+u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf);
+int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc,
+			      RV770_SMC_VOLTAGE_VALUE *voltage);
+int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
+			      RV770_SMC_VOLTAGE_VALUE *voltage);
+u8 rv770_get_seq_value(struct radeon_device *rdev,
+		       struct rv7xx_pl *pl);
+int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
+				      RV770_SMC_VOLTAGE_VALUE *voltage);
+u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev,
+					u32 engine_clock);
+void rv770_program_response_times(struct radeon_device *rdev);
+int rv770_populate_smc_sp(struct radeon_device *rdev,
+			  struct radeon_ps *radeon_state,
+			  RV770_SMC_SWSTATE *smc_state);
+int rv770_populate_smc_t(struct radeon_device *rdev,
+			 struct radeon_ps *radeon_state,
+			 RV770_SMC_SWSTATE *smc_state);
+void rv770_read_voltage_smio_registers(struct radeon_device *rdev);
+void rv770_get_memory_type(struct radeon_device *rdev);
+void r7xx_start_smc(struct radeon_device *rdev);
+u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+void rv770_get_max_vddc(struct radeon_device *rdev);
+void rv770_get_pcie_gen2_status(struct radeon_device *rdev);
+void rv770_enable_acpi_pm(struct radeon_device *rdev);
+void rv770_restore_cgcg(struct radeon_device *rdev);
+bool rv770_dpm_enabled(struct radeon_device *rdev);
+void rv770_enable_voltage_control(struct radeon_device *rdev,
+				  bool enable);
+void rv770_enable_backbias(struct radeon_device *rdev,
+			   bool enable);
+void rv770_enable_thermal_protection(struct radeon_device *rdev,
+				     bool enable);
+void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
+				       enum radeon_dpm_auto_throttle_src source,
+				       bool enable);
+void rv770_setup_bsp(struct radeon_device *rdev);
+void rv770_program_git(struct radeon_device *rdev);
+void rv770_program_tp(struct radeon_device *rdev);
+void rv770_program_tpp(struct radeon_device *rdev);
+void rv770_program_sstp(struct radeon_device *rdev);
+void rv770_program_engine_speed_parameters(struct radeon_device *rdev);
+void rv770_program_vc(struct radeon_device *rdev);
+void rv770_clear_vc(struct radeon_device *rdev);
+int rv770_upload_firmware(struct radeon_device *rdev);
+void rv770_stop_dpm(struct radeon_device *rdev);
+void r7xx_stop_smc(struct radeon_device *rdev);
+void rv770_reset_smio_status(struct radeon_device *rdev);
+int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev);
+int rv770_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level);
+int rv770_halt_smc(struct radeon_device *rdev);
+int rv770_resume_smc(struct radeon_device *rdev);
+int rv770_set_sw_state(struct radeon_device *rdev);
+int rv770_set_boot_state(struct radeon_device *rdev);
+int rv7xx_parse_power_table(struct radeon_device *rdev);
+void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+					      struct radeon_ps *new_ps,
+					      struct radeon_ps *old_ps);
+void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+					     struct radeon_ps *new_ps,
+					     struct radeon_ps *old_ps);
+void rv770_get_engine_memory_ss(struct radeon_device *rdev);
+
+/* smc */
+int rv770_write_smc_soft_register(struct radeon_device *rdev,
+				  u16 reg_offset, u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
new file mode 100644
index 0000000..2b7ddee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "rv770d.h"
+#include "rv770_dpm.h"
+#include "rv770_smc.h"
+#include "atom.h"
+#include "radeon_ucode.h"
+
+#define FIRST_SMC_INT_VECT_REG 0xFFD8
+#define FIRST_INT_VECT_S19     0xFFC0
+
+static const u8 rv770_smc_int_vectors[] =
+{
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x0C, 0xD7,
+	0x08, 0x2B, 0x08, 0x10,
+	0x03, 0x51, 0x03, 0x51,
+	0x03, 0x51, 0x03, 0x51
+};
+
+static const u8 rv730_smc_int_vectors[] =
+{
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x08, 0x15,
+	0x08, 0x15, 0x0C, 0xBB,
+	0x08, 0x30, 0x08, 0x15,
+	0x03, 0x56, 0x03, 0x56,
+	0x03, 0x56, 0x03, 0x56
+};
+
+static const u8 rv710_smc_int_vectors[] =
+{
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x08, 0x04,
+	0x08, 0x04, 0x0C, 0xCB,
+	0x08, 0x1F, 0x08, 0x04,
+	0x03, 0x51, 0x03, 0x51,
+	0x03, 0x51, 0x03, 0x51
+};
+
+static const u8 rv740_smc_int_vectors[] =
+{
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x08, 0x10,
+	0x08, 0x10, 0x0C, 0xD7,
+	0x08, 0x2B, 0x08, 0x10,
+	0x03, 0x51, 0x03, 0x51,
+	0x03, 0x51, 0x03, 0x51
+};
+
+static const u8 cedar_smc_int_vectors[] =
+{
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x11, 0x8B,
+	0x0B, 0x20, 0x0B, 0x05,
+	0x04, 0xF6, 0x04, 0xF6,
+	0x04, 0xF6, 0x04, 0xF6
+};
+
+static const u8 redwood_smc_int_vectors[] =
+{
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x11, 0x8B,
+	0x0B, 0x20, 0x0B, 0x05,
+	0x04, 0xF6, 0x04, 0xF6,
+	0x04, 0xF6, 0x04, 0xF6
+};
+
+static const u8 juniper_smc_int_vectors[] =
+{
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x11, 0x8B,
+	0x0B, 0x20, 0x0B, 0x05,
+	0x04, 0xF6, 0x04, 0xF6,
+	0x04, 0xF6, 0x04, 0xF6
+};
+
+static const u8 cypress_smc_int_vectors[] =
+{
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x0B, 0x05,
+	0x0B, 0x05, 0x11, 0x8B,
+	0x0B, 0x20, 0x0B, 0x05,
+	0x04, 0xF6, 0x04, 0xF6,
+	0x04, 0xF6, 0x04, 0xF6
+};
+
+static const u8 barts_smc_int_vectors[] =
+{
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x12, 0xAA,
+	0x0C, 0x2F, 0x15, 0xF6,
+	0x15, 0xF6, 0x05, 0x0A,
+	0x05, 0x0A, 0x05, 0x0A
+};
+
+static const u8 turks_smc_int_vectors[] =
+{
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x12, 0xAA,
+	0x0C, 0x2F, 0x15, 0xF6,
+	0x15, 0xF6, 0x05, 0x0A,
+	0x05, 0x0A, 0x05, 0x0A
+};
+
+static const u8 caicos_smc_int_vectors[] =
+{
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x0C, 0x14,
+	0x0C, 0x14, 0x12, 0xAA,
+	0x0C, 0x2F, 0x15, 0xF6,
+	0x15, 0xF6, 0x05, 0x0A,
+	0x05, 0x0A, 0x05, 0x0A
+};
+
+static const u8 cayman_smc_int_vectors[] =
+{
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x12, 0x05,
+	0x12, 0x05, 0x18, 0xEA,
+	0x12, 0x20, 0x1C, 0x34,
+	0x1C, 0x34, 0x08, 0x72,
+	0x08, 0x72, 0x08, 0x72
+};
+
+static int rv770_set_smc_sram_address(struct radeon_device *rdev,
+				      u16 smc_address, u16 limit)
+{
+	u32 addr;
+
+	if (smc_address & 3)
+		return -EINVAL;
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	addr = smc_address;
+	addr |= SMC_SRAM_AUTO_INC_DIS;
+
+	WREG32(SMC_SRAM_ADDR, addr);
+
+	return 0;
+}
+
+int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
+			    u16 smc_start_address, const u8 *src,
+			    u16 byte_count, u16 limit)
+{
+	unsigned long flags;
+	u32 data, original_data, extra_shift;
+	u16 addr;
+	int ret = 0;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		ret = rv770_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_SRAM_DATA, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	/* RMW for final bytes */
+	if (byte_count > 0) {
+		data = 0;
+
+		ret = rv770_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		original_data = RREG32(SMC_SRAM_DATA);
+
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			/* SMC address space is BE */
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		ret = rv770_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_SRAM_DATA, data);
+	}
+
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
+					   u32 smc_first_vector, const u8 *src,
+					   u32 byte_count)
+{
+	u32 tmp, i;
+
+	if (byte_count % 4)
+		return -EINVAL;
+
+	if (smc_first_vector < FIRST_SMC_INT_VECT_REG) {
+		tmp = FIRST_SMC_INT_VECT_REG - smc_first_vector;
+
+		if (tmp > byte_count)
+			return 0;
+
+		byte_count -= tmp;
+		src += tmp;
+		smc_first_vector = FIRST_SMC_INT_VECT_REG;
+	}
+
+	for (i = 0; i < byte_count; i += 4) {
+		/* SMC address space is BE */
+		tmp = (src[i] << 24) | (src[i + 1] << 16) | (src[i + 2] << 8) | src[i + 3];
+
+		WREG32(SMC_ISR_FFD8_FFDB + i, tmp);
+	}
+
+	return 0;
+}
+
+void rv770_start_smc(struct radeon_device *rdev)
+{
+	WREG32_P(SMC_IO, SMC_RST_N, ~SMC_RST_N);
+}
+
+void rv770_reset_smc(struct radeon_device *rdev)
+{
+	WREG32_P(SMC_IO, 0, ~SMC_RST_N);
+}
+
+void rv770_stop_smc_clock(struct radeon_device *rdev)
+{
+	WREG32_P(SMC_IO, 0, ~SMC_CLK_EN);
+}
+
+void rv770_start_smc_clock(struct radeon_device *rdev)
+{
+	WREG32_P(SMC_IO, SMC_CLK_EN, ~SMC_CLK_EN);
+}
+
+bool rv770_is_smc_running(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(SMC_IO);
+
+	if ((tmp & SMC_RST_N) && (tmp & SMC_CLK_EN))
+		return true;
+	else
+		return false;
+}
+
+PPSMC_Result rv770_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+	u32 tmp;
+	int i;
+	PPSMC_Result result;
+
+	if (!rv770_is_smc_running(rdev))
+		return PPSMC_Result_Failed;
+
+	WREG32_P(SMC_MSG, HOST_SMC_MSG(msg), ~HOST_SMC_MSG_MASK);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK;
+		tmp >>= HOST_SMC_RESP_SHIFT;
+		if (tmp != 0)
+			break;
+		udelay(1);
+	}
+
+	tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK;
+	tmp >>= HOST_SMC_RESP_SHIFT;
+
+	result = (PPSMC_Result)tmp;
+	return result;
+}
+
+PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
+{
+	int i;
+	PPSMC_Result result = PPSMC_Result_OK;
+
+	if (!rv770_is_smc_running(rdev))
+		return result;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(SMC_IO) & SMC_STOP_MODE)
+			break;
+		udelay(1);
+	}
+
+	return result;
+}
+
+static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
+{
+	unsigned long flags;
+	u16 i;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	for (i = 0;  i < limit; i += 4) {
+		rv770_set_smc_sram_address(rdev, i, limit);
+		WREG32(SMC_SRAM_DATA, 0);
+	}
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+}
+
+int rv770_load_smc_ucode(struct radeon_device *rdev,
+			 u16 limit)
+{
+	int ret;
+	const u8 *int_vect;
+	u16 int_vect_start_address;
+	u16 int_vect_size;
+	const u8 *ucode_data;
+	u16 ucode_start_address;
+	u16 ucode_size;
+
+	if (!rdev->smc_fw)
+		return -EINVAL;
+
+	rv770_clear_smc_sram(rdev, limit);
+
+	switch (rdev->family) {
+	case CHIP_RV770:
+		ucode_start_address = RV770_SMC_UCODE_START;
+		ucode_size = RV770_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&rv770_smc_int_vectors;
+		int_vect_start_address = RV770_SMC_INT_VECTOR_START;
+		int_vect_size = RV770_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_RV730:
+		ucode_start_address = RV730_SMC_UCODE_START;
+		ucode_size = RV730_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&rv730_smc_int_vectors;
+		int_vect_start_address = RV730_SMC_INT_VECTOR_START;
+		int_vect_size = RV730_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_RV710:
+		ucode_start_address = RV710_SMC_UCODE_START;
+		ucode_size = RV710_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&rv710_smc_int_vectors;
+		int_vect_start_address = RV710_SMC_INT_VECTOR_START;
+		int_vect_size = RV710_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_RV740:
+		ucode_start_address = RV740_SMC_UCODE_START;
+		ucode_size = RV740_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&rv740_smc_int_vectors;
+		int_vect_start_address = RV740_SMC_INT_VECTOR_START;
+		int_vect_size = RV740_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_CEDAR:
+		ucode_start_address = CEDAR_SMC_UCODE_START;
+		ucode_size = CEDAR_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&cedar_smc_int_vectors;
+		int_vect_start_address = CEDAR_SMC_INT_VECTOR_START;
+		int_vect_size = CEDAR_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_REDWOOD:
+		ucode_start_address = REDWOOD_SMC_UCODE_START;
+		ucode_size = REDWOOD_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&redwood_smc_int_vectors;
+		int_vect_start_address = REDWOOD_SMC_INT_VECTOR_START;
+		int_vect_size = REDWOOD_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_JUNIPER:
+		ucode_start_address = JUNIPER_SMC_UCODE_START;
+		ucode_size = JUNIPER_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&juniper_smc_int_vectors;
+		int_vect_start_address = JUNIPER_SMC_INT_VECTOR_START;
+		int_vect_size = JUNIPER_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		ucode_start_address = CYPRESS_SMC_UCODE_START;
+		ucode_size = CYPRESS_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&cypress_smc_int_vectors;
+		int_vect_start_address = CYPRESS_SMC_INT_VECTOR_START;
+		int_vect_size = CYPRESS_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_BARTS:
+		ucode_start_address = BARTS_SMC_UCODE_START;
+		ucode_size = BARTS_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&barts_smc_int_vectors;
+		int_vect_start_address = BARTS_SMC_INT_VECTOR_START;
+		int_vect_size = BARTS_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_TURKS:
+		ucode_start_address = TURKS_SMC_UCODE_START;
+		ucode_size = TURKS_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&turks_smc_int_vectors;
+		int_vect_start_address = TURKS_SMC_INT_VECTOR_START;
+		int_vect_size = TURKS_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_CAICOS:
+		ucode_start_address = CAICOS_SMC_UCODE_START;
+		ucode_size = CAICOS_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&caicos_smc_int_vectors;
+		int_vect_start_address = CAICOS_SMC_INT_VECTOR_START;
+		int_vect_size = CAICOS_SMC_INT_VECTOR_SIZE;
+		break;
+	case CHIP_CAYMAN:
+		ucode_start_address = CAYMAN_SMC_UCODE_START;
+		ucode_size = CAYMAN_SMC_UCODE_SIZE;
+		int_vect = (const u8 *)&cayman_smc_int_vectors;
+		int_vect_start_address = CAYMAN_SMC_INT_VECTOR_START;
+		int_vect_size = CAYMAN_SMC_INT_VECTOR_SIZE;
+		break;
+	default:
+		DRM_ERROR("unknown asic in smc ucode loader\n");
+		BUG();
+	}
+
+	/* load the ucode */
+	ucode_data = (const u8 *)rdev->smc_fw->data;
+	ret = rv770_copy_bytes_to_smc(rdev, ucode_start_address,
+				      ucode_data, ucode_size, limit);
+	if (ret)
+		return ret;
+
+	/* set up the int vectors */
+	ret = rv770_program_interrupt_vectors(rdev, int_vect_start_address,
+					      int_vect, int_vect_size);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int rv770_read_smc_sram_dword(struct radeon_device *rdev,
+			      u16 smc_address, u32 *value, u16 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
+	if (ret == 0)
+		*value = RREG32(SMC_SRAM_DATA);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+int rv770_write_smc_sram_dword(struct radeon_device *rdev,
+			       u16 smc_address, u32 value, u16 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
+	if (ret == 0)
+		WREG32(SMC_SRAM_DATA, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
new file mode 100644
index 0000000..3b2c963
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RV770_SMC_H__
+#define __RV770_SMC_H__
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define RV770_SMC_TABLE_ADDRESS 0xB000
+
+#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE    3
+
+struct RV770_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
+
+struct RV770_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
+
+
+struct RV730_SMC_MCLK_VALUE
+{
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL2;
+    uint32_t        vMPLL_FUNC_CNTL3;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
+
+struct RV770_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
+
+union RV7XX_SMC_MCLK_VALUE
+{
+    RV770_SMC_MCLK_VALUE    mclk770;
+    RV730_SMC_MCLK_VALUE    mclk730;
+};
+
+typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
+
+struct RV770_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                 arbValue;
+    union{
+        uint8_t             seqValue;
+        uint8_t             ACIndex;
+    };
+    uint8_t                 displayWatermark;
+    uint8_t                 gen2PCIE;
+    uint8_t                 gen2XSP;
+    uint8_t                 backbias;
+    uint8_t                 strobeMode;
+    uint8_t                 mcFlags;
+    uint32_t                aT;
+    uint32_t                bSP;
+    RV770_SMC_SCLK_VALUE    sclk;
+    RV7XX_SMC_MCLK_VALUE    mclk;
+    RV770_SMC_VOLTAGE_VALUE vddc;
+    RV770_SMC_VOLTAGE_VALUE mvdd;
+    RV770_SMC_VOLTAGE_VALUE vddci;
+    uint8_t                 reserved1;
+    uint8_t                 reserved2;
+    uint8_t                 stateFlags;
+    uint8_t                 padding;
+};
+
+#define SMC_STROBE_RATIO    0x0F
+#define SMC_STROBE_ENABLE   0x10
+
+#define SMC_MC_EDC_RD_FLAG  0x01
+#define SMC_MC_EDC_WR_FLAG  0x02
+#define SMC_MC_RTT_ENABLE   0x04
+#define SMC_MC_STUTTER_EN   0x08
+
+typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
+
+struct RV770_SMC_SWSTATE
+{
+    uint8_t           flags;
+    uint8_t           padding1;
+    uint8_t           padding2;
+    uint8_t           padding3;
+    RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
+
+#define RV770_SMC_VOLTAGEMASK_VDDC 0
+#define RV770_SMC_VOLTAGEMASK_MVDD 1
+#define RV770_SMC_VOLTAGEMASK_VDDCI 2
+#define RV770_SMC_VOLTAGEMASK_MAX  4
+
+struct RV770_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[RV770_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
+
+#define MAX_NO_VREG_STEPS 32
+
+struct RV770_SMC_STATETABLE
+{
+    uint8_t             thermalProtectType;
+    uint8_t             systemFlags;
+    uint8_t             maxVDDCIndexInPPTable;
+    uint8_t             extraFlags;
+    uint8_t             highSMIO[MAX_NO_VREG_STEPS];
+    uint32_t            lowSMIO[MAX_NO_VREG_STEPS];
+    RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+    RV770_SMC_SWSTATE   initialState;
+    RV770_SMC_SWSTATE   ACPIState;
+    RV770_SMC_SWSTATE   driverState;
+    RV770_SMC_SWSTATE   ULVState;
+};
+
+typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
+
+#pragma pack(pop)
+
+#define RV770_SMC_SOFT_REGISTERS_START        0x104
+
+#define RV770_SMC_SOFT_REGISTER_mclk_chg_timeout        0x0
+#define RV770_SMC_SOFT_REGISTER_baby_step_timer         0x8
+#define RV770_SMC_SOFT_REGISTER_delay_bbias             0xC
+#define RV770_SMC_SOFT_REGISTER_delay_vreg              0x10
+#define RV770_SMC_SOFT_REGISTER_delay_acpi              0x2C
+#define RV770_SMC_SOFT_REGISTER_seq_index               0x64
+#define RV770_SMC_SOFT_REGISTER_mvdd_chg_time           0x68
+#define RV770_SMC_SOFT_REGISTER_mclk_switch_lim         0x78
+#define RV770_SMC_SOFT_REGISTER_mc_block_delay          0x90
+#define RV770_SMC_SOFT_REGISTER_uvd_enabled             0x9C
+#define RV770_SMC_SOFT_REGISTER_is_asic_lombok          0xA0
+
+int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
+			    u16 smc_start_address, const u8 *src,
+			    u16 byte_count, u16 limit);
+void rv770_start_smc(struct radeon_device *rdev);
+void rv770_reset_smc(struct radeon_device *rdev);
+void rv770_stop_smc_clock(struct radeon_device *rdev);
+void rv770_start_smc_clock(struct radeon_device *rdev);
+bool rv770_is_smc_running(struct radeon_device *rdev);
+PPSMC_Result rv770_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
+PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev);
+int rv770_read_smc_sram_dword(struct radeon_device *rdev,
+			      u16 smc_address, u32 *value, u16 limit);
+int rv770_write_smc_sram_dword(struct radeon_device *rdev,
+			       u16 smc_address, u32 value, u16 limit);
+int rv770_load_smc_ucode(struct radeon_device *rdev,
+			 u16 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
new file mode 100644
index 0000000..0271f4c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef RV770_H
+#define RV770_H
+
+#define R7XX_MAX_SH_GPRS           256
+#define R7XX_MAX_TEMP_GPRS         16
+#define R7XX_MAX_SH_THREADS        256
+#define R7XX_MAX_SH_STACK_ENTRIES  4096
+#define R7XX_MAX_BACKENDS          8
+#define R7XX_MAX_BACKENDS_MASK     0xff
+#define R7XX_MAX_SIMDS             16
+#define R7XX_MAX_SIMDS_MASK        0xffff
+#define R7XX_MAX_PIPES             8
+#define R7XX_MAX_PIPES_MASK        0xff
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL				0x718
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_REF_DIV(x)				((x) << 16)
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define CG_UPLL_FUNC_CNTL_2				0x71c
+#	define UPLL_SW_HILEN(x)				((x) << 0)
+#	define UPLL_SW_LOLEN(x)				((x) << 4)
+#	define UPLL_SW_HILEN2(x)			((x) << 8)
+#	define UPLL_SW_LOLEN2(x)			((x) << 12)
+#	define UPLL_SW_MASK				0x0000FFFF
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define CG_UPLL_FUNC_CNTL_3				0x720
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+
+/* pm registers */
+#define	SMC_SRAM_ADDR					0x200
+#define		SMC_SRAM_AUTO_INC_DIS				(1 << 16)
+#define	SMC_SRAM_DATA					0x204
+#define	SMC_IO						0x208
+#define		SMC_RST_N					(1 << 0)
+#define		SMC_STOP_MODE					(1 << 2)
+#define		SMC_CLK_EN					(1 << 11)
+#define	SMC_MSG						0x20c
+#define		HOST_SMC_MSG(x)					((x) << 0)
+#define		HOST_SMC_MSG_MASK				(0xff << 0)
+#define		HOST_SMC_MSG_SHIFT				0
+#define		HOST_SMC_RESP(x)				((x) << 8)
+#define		HOST_SMC_RESP_MASK				(0xff << 8)
+#define		HOST_SMC_RESP_SHIFT				8
+#define		SMC_HOST_MSG(x)					((x) << 16)
+#define		SMC_HOST_MSG_MASK				(0xff << 16)
+#define		SMC_HOST_MSG_SHIFT				16
+#define		SMC_HOST_RESP(x)				((x) << 24)
+#define		SMC_HOST_RESP_MASK				(0xff << 24)
+#define		SMC_HOST_RESP_SHIFT				24
+
+#define	SMC_ISR_FFD8_FFDB				0x218
+
+#define	CG_SPLL_FUNC_CNTL				0x600
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_DIVEN				(1 << 2)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_HILEN(x)				((x) << 12)
+#define		SPLL_HILEN_MASK				(0xf << 12)
+#define		SPLL_LOLEN(x)				((x) << 16)
+#define		SPLL_LOLEN_MASK				(0xf << 16)
+#define	CG_SPLL_FUNC_CNTL_2				0x604
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SCLK_MUX_UPDATE				(1 << 26)
+#define	CG_SPLL_FUNC_CNTL_3				0x608
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_STATUS					0x60c
+#define		SPLL_CHG_STATUS				(1 << 1)
+
+#define	SPLL_CNTL_MODE					0x610
+#define		SPLL_DIV_SYNC				(1 << 5)
+
+#define MPLL_CNTL_MODE                                  0x61c
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#       define RV730_MPLL_MCLK_SEL                      (1 << 25)
+
+#define	MPLL_AD_FUNC_CNTL				0x624
+#define		CLKF(x)					((x) << 0)
+#define		CLKF_MASK				(0x7f << 0)
+#define		CLKR(x)					((x) << 7)
+#define		CLKR_MASK				(0x1f << 7)
+#define		CLKFRAC(x)				((x) << 12)
+#define		CLKFRAC_MASK				(0x1f << 12)
+#define		YCLK_POST_DIV(x)			((x) << 17)
+#define		YCLK_POST_DIV_MASK			(3 << 17)
+#define		IBIAS(x)				((x) << 20)
+#define		IBIAS_MASK				(0x3ff << 20)
+#define		RESET					(1 << 30)
+#define		PDNB					(1 << 31)
+#define	MPLL_AD_FUNC_CNTL_2				0x628
+#define		BYPASS					(1 << 19)
+#define		BIAS_GEN_PDNB				(1 << 24)
+#define		RESET_EN				(1 << 25)
+#define		VCO_MODE				(1 << 29)
+#define	MPLL_DQ_FUNC_CNTL				0x62c
+#define	MPLL_DQ_FUNC_CNTL_2				0x630
+
+#define GENERAL_PWRMGT                                  0x63c
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define ENABLE_GEN2PCIE                          (1 << 4)
+#       define ENABLE_GEN2XSP                           (1 << 5)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (3 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define LOW_VOLT_D2_ACPI                         (1 << 8)
+#       define LOW_VOLT_D3_ACPI                         (1 << 9)
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define BACKBIAS_PAD_EN                          (1 << 18)
+#       define BACKBIAS_VALUE                           (1 << 19)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#       define AC_DC_SW                                 (1 << 24)
+
+#define CG_TPC                                            0x640
+#define SCLK_PWRMGT_CNTL                                  0x644
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#define	MCLK_PWRMGT_CNTL				0x648
+#       define DLL_SPEED(x)				((x) << 0)
+#       define DLL_SPEED_MASK				(0x1f << 0)
+#       define MPLL_PWRMGT_OFF                          (1 << 5)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCKA0_SLEEP                            (1 << 8)
+#       define MRDCKA1_SLEEP                            (1 << 9)
+#       define MRDCKB0_SLEEP                            (1 << 10)
+#       define MRDCKB1_SLEEP                            (1 << 11)
+#       define MRDCKC0_SLEEP                            (1 << 12)
+#       define MRDCKC1_SLEEP                            (1 << 13)
+#       define MRDCKD0_SLEEP                            (1 << 14)
+#       define MRDCKD1_SLEEP                            (1 << 15)
+#       define MRDCKA0_RESET                            (1 << 16)
+#       define MRDCKA1_RESET                            (1 << 17)
+#       define MRDCKB0_RESET                            (1 << 18)
+#       define MRDCKB1_RESET                            (1 << 19)
+#       define MRDCKC0_RESET                            (1 << 20)
+#       define MRDCKC1_RESET                            (1 << 21)
+#       define MRDCKD0_RESET                            (1 << 22)
+#       define MRDCKD1_RESET                            (1 << 23)
+#       define DLL_READY_READ                           (1 << 24)
+#       define USE_DISPLAY_GAP                          (1 << 25)
+#       define USE_DISPLAY_URGENT_NORMAL                (1 << 26)
+#       define MPLL_TURNOFF_D2                          (1 << 28)
+#define	DLL_CNTL					0x64c
+#       define MRDCKA0_BYPASS                           (1 << 24)
+#       define MRDCKA1_BYPASS                           (1 << 25)
+#       define MRDCKB0_BYPASS                           (1 << 26)
+#       define MRDCKB1_BYPASS                           (1 << 27)
+#       define MRDCKC0_BYPASS                           (1 << 28)
+#       define MRDCKC1_BYPASS                           (1 << 29)
+#       define MRDCKD0_BYPASS                           (1 << 30)
+#       define MRDCKD1_BYPASS                           (1 << 31)
+
+#define MPLL_TIME                                         0x654
+#       define MPLL_LOCK_TIME(x)			((x) << 0)
+#       define MPLL_LOCK_TIME_MASK			(0xffff << 0)
+#       define MPLL_RESET_TIME(x)			((x) << 16)
+#       define MPLL_RESET_TIME_MASK			(0xffff << 16)
+
+#define CG_CLKPIN_CNTL                                    0x660
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+#       define XTALIN_DIVIDE                              (1 << 9)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x66c
+#       define CURRENT_PROFILE_INDEX_MASK                 (0xf << 4)
+#       define CURRENT_PROFILE_INDEX_SHIFT                4
+
+#define S0_VID_LOWER_SMIO_CNTL                            0x678
+#define S1_VID_LOWER_SMIO_CNTL                            0x67c
+#define S2_VID_LOWER_SMIO_CNTL                            0x680
+#define S3_VID_LOWER_SMIO_CNTL                            0x684
+
+#define CG_FTV                                            0x690
+#define CG_FFCT_0                                         0x694
+#       define UTC_0(x)                                   ((x) << 0)
+#       define UTC_0_MASK                                 (0x3ff << 0)
+#       define DTC_0(x)                                   ((x) << 10)
+#       define DTC_0_MASK                                 (0x3ff << 10)
+
+#define CG_BSP                                          0x6d0
+#       define BSP(x)					((x) << 0)
+#       define BSP_MASK					(0xffff << 0)
+#       define BSU(x)					((x) << 16)
+#       define BSU_MASK					(0xf << 16)
+#define CG_AT                                           0x6d4
+#       define CG_R(x)					((x) << 0)
+#       define CG_R_MASK				(0xffff << 0)
+#       define CG_L(x)					((x) << 16)
+#       define CG_L_MASK				(0xffff << 16)
+#define CG_GIT                                          0x6d8
+#       define CG_GICST(x)                              ((x) << 0)
+#       define CG_GICST_MASK                            (0xffff << 0)
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+
+#define CG_SSP                                            0x6e8
+#       define SST(x)                                     ((x) << 0)
+#       define SST_MASK                                   (0xffff << 0)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0x714
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x790
+#define		SSEN					(1 << 0)
+#define		CLKS(x)					((x) << 4)
+#define		CLKS_MASK				(0xfff << 4)
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x794
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	CG_MPLL_SPREAD_SPECTRUM				0x798
+#define CG_UPLL_SPREAD_SPECTRUM				0x79c
+#	define SSEN_MASK				0x00000001
+
+#define CG_CGTT_LOCAL_0                                   0x7d0
+#define CG_CGTT_LOCAL_1                                   0x7d4
+
+#define BIOS_SCRATCH_4                                    0x1734
+
+#define MC_SEQ_MISC0                                      0x2a00
+#define         MC_SEQ_MISC0_GDDR5_SHIFT                  28
+#define         MC_SEQ_MISC0_GDDR5_MASK                   0xf0000000
+#define         MC_SEQ_MISC0_GDDR5_VALUE                  5
+
+#define MC_ARB_SQM_RATIO                                  0x2770
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0xff << 0)
+#define		STATE1(x)				((x) << 8)
+#define		STATE1_MASK				(0xff << 8)
+#define		STATE2(x)				((x) << 16)
+#define		STATE2_MASK				(0xff << 16)
+#define		STATE3(x)				((x) << 24)
+#define		STATE3_MASK				(0xff << 24)
+
+#define	MC_ARB_RFSH_RATE				0x27b0
+#define		POWERMODE0(x)				((x) << 0)
+#define		POWERMODE0_MASK				(0xff << 0)
+#define		POWERMODE1(x)				((x) << 8)
+#define		POWERMODE1_MASK				(0xff << 8)
+#define		POWERMODE2(x)				((x) << 16)
+#define		POWERMODE2_MASK				(0xff << 16)
+#define		POWERMODE3(x)				((x) << 24)
+#define		POWERMODE3_MASK				(0xff << 24)
+
+#define CGTS_SM_CTRL_REG                                  0x9150
+
+/* Registers */
+#define	CB_COLOR0_BASE					0x28040
+#define	CB_COLOR1_BASE					0x28044
+#define	CB_COLOR2_BASE					0x28048
+#define	CB_COLOR3_BASE					0x2804C
+#define	CB_COLOR4_BASE					0x28050
+#define	CB_COLOR5_BASE					0x28054
+#define	CB_COLOR6_BASE					0x28058
+#define	CB_COLOR7_BASE					0x2805C
+#define	CB_COLOR7_FRAG					0x280FC
+
+#define	CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)				((x) << 16)
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+#define	CGTS_USER_TCC_DISABLE				0x914C
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define	CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		STQ_SPLIT(x)					((x) << 0)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	DB_DEBUG3					0x98B0
+#define		DB_CLK_OFF_DELAY(x)				((x) << 11)
+#define DB_DEBUG4					0x9B8C
+#define		DISABLE_TILE_COVERED_FOR_PS_ITER		(1 << 6)
+
+#define	DCP_TILING_CONFIG				0x6CA0
+#define		PIPE_TILING(x)					((x) << 1)
+#define 	BANK_TILING(x)					((x) << 4)
+#define		GROUP_SIZE(x)					((x) << 6)
+#define		ROW_TILING(x)					((x) << 8)
+#define		BANK_SWAPS(x)					((x) << 11)
+#define		SAMPLE_SPLIT(x)					((x) << 14)
+#define		BACKEND_MAP(x)					((x) << 16)
+
+#define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
+
+#define DMA_TILING_CONFIG                               0x3ec8
+#define DMA_TILING_CONFIG2                              0xd0b8
+
+/* RV730 only */
+#define UVD_UDEC_TILING_CONFIG                          0xef40
+#define UVD_UDEC_DB_TILING_CONFIG                       0xef44
+#define UVD_UDEC_DBW_TILING_CONFIG                      0xef48
+#define UVD_NO_OP					0xeffc
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_QD_PIPES_SHIFT			    8
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1<<0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		GUI_ACTIVE					(1<<31)
+#define	GRBM_STATUS2					0x8014
+
+#define	CG_THERMAL_CTRL					0x72C
+#define 	DPM_EVENT_SRC(x)			((x) << 0)
+#define 	DPM_EVENT_SRC_MASK			(7 << 0)
+#define		DIG_THERM_DPM(x)			((x) << 14)
+#define		DIG_THERM_DPM_MASK			0x003FC000
+#define		DIG_THERM_DPM_SHIFT			14
+
+#define	CG_THERMAL_INT					0x734
+#define		DIG_THERM_INTH(x)			((x) << 8)
+#define		DIG_THERM_INTH_MASK			0x0000FF00
+#define		DIG_THERM_INTH_SHIFT			8
+#define		DIG_THERM_INTL(x)			((x) << 16)
+#define		DIG_THERM_INTL_MASK			0x00FF0000
+#define		DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+
+#define	CG_MULT_THERMAL_STATUS				0x740
+#define		ASIC_T(x)			        ((x) << 16)
+#define		ASIC_T_MASK			        0x3FF0000
+#define		ASIC_T_SHIFT			        16
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+#define HDP_DEBUG1                                      0x2F34
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_MB_L1_TLB0_CNTL				0x2234
+#define	MC_VM_MB_L1_TLB1_CNTL				0x2238
+#define	MC_VM_MB_L1_TLB2_CNTL				0x223C
+#define	MC_VM_MB_L1_TLB3_CNTL				0x2240
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			((x)<<15)
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			((x)<<18)
+#define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+#define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+#define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define PA_SC_AA_CONFIG					0x28C04
+#define PA_SC_CLIPRECT_RULE				0x2820C
+#define	PA_SC_EDGERULE					0x28230
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x)<<0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x)<<16)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define PA_SC_MODE_CNTL					0x28A4C
+#define	PA_SC_MULTI_CHIP_CNTL				0x8B20
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SMX_SAR_CTL0					0xA008
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		CACHE_DEPTH(x)					((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define		DISABLE_INTERP_1				(1 << 5)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		DX9_CONSTS					(1 << 2)
+#define		ALU_INST_PREFER_VECTOR				(1 << 3)
+#define		DX10_CLAMP					(1 << 4)
+#define		CLAUSE_SEQ_PRIO(x)				((x) << 8)
+#define		PS_PRIO(x)					((x) << 24)
+#define		VS_PRIO(x)					((x) << 26)
+#define		GS_PRIO(x)					((x) << 28)
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_0			0x8DB0
+#define		SIMDA_RING0(x)					((x)<<0)
+#define		SIMDA_RING1(x)					((x)<<8)
+#define		SIMDB_RING0(x)					((x)<<16)
+#define		SIMDB_RING1(x)					((x)<<24)
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_1			0x8DB4
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_2			0x8DB8
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_3			0x8DBC
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_4			0x8DC0
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_5			0x8DC4
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_6			0x8DC8
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_7			0x8DCC
+#define		ES_PRIO(x)					((x) << 30)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		DYN_GPR_ENABLE					(1 << 27)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define	SQ_GPR_RESOURCE_MGMT_2				0x8C08
+#define		NUM_GS_GPRS(x)					((x) << 0)
+#define		NUM_ES_GPRS(x)					((x) << 16)
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define	SQ_STACK_RESOURCE_MGMT_1			0x8C10
+#define		NUM_PS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_VS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_2			0x8C14
+#define		NUM_GS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_ES_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_THREAD_RESOURCE_MGMT				0x8C0C
+#define		NUM_PS_THREADS(x)				((x) << 0)
+#define		NUM_VS_THREADS(x)				((x) << 8)
+#define		NUM_GS_THREADS(x)				((x) << 16)
+#define		NUM_ES_THREADS(x)				((x) << 24)
+
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_MISC						0x28350
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+#define		BILINEAR_PRECISION_6_BIT			(0 << 31)
+#define		BILINEAR_PRECISION_8_BIT			(1 << 31)
+
+#define	TCP_CNTL					0x9610
+#define	TCP_CHAN_STEER					0x9614
+
+#define	VC_ENHANCE					0x9714
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x)<<0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ES_PER_GS					0x88CC
+#define	VGT_GS_PER_ES					0x88C8
+#define	VGT_GS_PER_VS					0x88E8
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_STRMOUT_EN					0x28AB0
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+
+#define	WAIT_UNTIL					0x8040
+
+/* async DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+
+#define	SRBM_STATUS				        0x0E50
+
+/* DCE 3.2 HDMI */
+#define HDMI_CONTROL                         0x7400
+#       define HDMI_KEEPOUT_MODE             (1 << 0)
+#       define HDMI_PACKET_GEN_VERSION       (1 << 4) /* 0 = r6xx compat */
+#       define HDMI_ERROR_ACK                (1 << 8)
+#       define HDMI_ERROR_MASK               (1 << 9)
+#define HDMI_STATUS                          0x7404
+#       define HDMI_ACTIVE_AVMUTE            (1 << 0)
+#       define HDMI_AUDIO_PACKET_ERROR       (1 << 16)
+#       define HDMI_VBI_PACKET_ERROR         (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL            0x7408
+#       define HDMI_AUDIO_DELAY_EN(x)        (((x) & 3) << 4)
+#       define HDMI_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL              0x740c
+#       define HDMI_ACR_SEND                 (1 << 0)
+#       define HDMI_ACR_CONT                 (1 << 1)
+#       define HDMI_ACR_SELECT(x)            (((x) & 3) << 4)
+#       define HDMI_ACR_HW                   0
+#       define HDMI_ACR_32                   1
+#       define HDMI_ACR_44                   2
+#       define HDMI_ACR_48                   3
+#       define HDMI_ACR_SOURCE               (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI_ACR_AUTO_SEND            (1 << 12)
+#define HDMI_VBI_PACKET_CONTROL              0x7410
+#       define HDMI_NULL_SEND                (1 << 0)
+#       define HDMI_GC_SEND                  (1 << 4)
+#       define HDMI_GC_CONT                  (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0              0x7414
+#       define HDMI_AVI_INFO_SEND            (1 << 0)
+#       define HDMI_AVI_INFO_CONT            (1 << 1)
+#       define HDMI_AUDIO_INFO_SEND          (1 << 4)
+#       define HDMI_AUDIO_INFO_CONT          (1 << 5)
+#       define HDMI_MPEG_INFO_SEND           (1 << 8)
+#       define HDMI_MPEG_INFO_CONT           (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1              0x7418
+#       define HDMI_AVI_INFO_LINE(x)         (((x) & 0x3f) << 0)
+#       define HDMI_AUDIO_INFO_LINE(x)       (((x) & 0x3f) << 8)
+#       define HDMI_MPEG_INFO_LINE(x)        (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL          0x741c
+#       define HDMI_GENERIC0_SEND            (1 << 0)
+#       define HDMI_GENERIC0_CONT            (1 << 1)
+#       define HDMI_GENERIC1_SEND            (1 << 4)
+#       define HDMI_GENERIC1_CONT            (1 << 5)
+#       define HDMI_GENERIC0_LINE(x)         (((x) & 0x3f) << 16)
+#       define HDMI_GENERIC1_LINE(x)         (((x) & 0x3f) << 24)
+#define HDMI_GC                              0x7428
+#       define HDMI_GC_AVMUTE                (1 << 0)
+#define AFMT_AUDIO_PACKET_CONTROL2           0x742c
+#       define AFMT_AUDIO_LAYOUT_OVRD        (1 << 0)
+#       define AFMT_AUDIO_LAYOUT_SELECT      (1 << 1)
+#       define AFMT_60958_CS_SOURCE          (1 << 4)
+#       define AFMT_AUDIO_CHANNEL_ENABLE(x)  (((x) & 0xff) << 8)
+#       define AFMT_DP_AUDIO_STREAM_ID(x)    (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0                       0x7454
+#       define AFMT_AVI_INFO_CHECKSUM(x)     (((x) & 0xff) << 0)
+#       define AFMT_AVI_INFO_S(x)            (((x) & 3) << 8)
+#       define AFMT_AVI_INFO_B(x)            (((x) & 3) << 10)
+#       define AFMT_AVI_INFO_A(x)            (((x) & 1) << 12)
+#       define AFMT_AVI_INFO_Y(x)            (((x) & 3) << 13)
+#       define AFMT_AVI_INFO_Y_RGB           0
+#       define AFMT_AVI_INFO_Y_YCBCR422      1
+#       define AFMT_AVI_INFO_Y_YCBCR444      2
+#       define AFMT_AVI_INFO_Y_A_B_S(x)      (((x) & 0xff) << 8)
+#       define AFMT_AVI_INFO_R(x)            (((x) & 0xf) << 16)
+#       define AFMT_AVI_INFO_M(x)            (((x) & 0x3) << 20)
+#       define AFMT_AVI_INFO_C(x)            (((x) & 0x3) << 22)
+#       define AFMT_AVI_INFO_C_M_R(x)        (((x) & 0xff) << 16)
+#       define AFMT_AVI_INFO_SC(x)           (((x) & 0x3) << 24)
+#       define AFMT_AVI_INFO_Q(x)            (((x) & 0x3) << 26)
+#       define AFMT_AVI_INFO_EC(x)           (((x) & 0x3) << 28)
+#       define AFMT_AVI_INFO_ITC(x)          (((x) & 0x1) << 31)
+#       define AFMT_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1                       0x7458
+#       define AFMT_AVI_INFO_VIC(x)          (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_PR(x)           (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_TOP(x)          (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2                       0x745c
+#       define AFMT_AVI_INFO_BOTTOM(x)       (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_LEFT(x)         (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3                       0x7460
+#       define AFMT_AVI_INFO_RIGHT(x)        (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_VERSION(x)      (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0                      0x7464
+#       define AFMT_MPEG_INFO_CHECKSUM(x)    (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MB0(x)         (((x) & 0xff) << 8)
+#       define AFMT_MPEG_INFO_MB1(x)         (((x) & 0xff) << 16)
+#       define AFMT_MPEG_INFO_MB2(x)         (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1                      0x7468
+#       define AFMT_MPEG_INFO_MB3(x)         (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MF(x)          (((x) & 3) << 8)
+#       define AFMT_MPEG_INFO_FR(x)          (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR                    0x746c
+#define AFMT_GENERIC0_0                      0x7470
+#define AFMT_GENERIC0_1                      0x7474
+#define AFMT_GENERIC0_2                      0x7478
+#define AFMT_GENERIC0_3                      0x747c
+#define AFMT_GENERIC0_4                      0x7480
+#define AFMT_GENERIC0_5                      0x7484
+#define AFMT_GENERIC0_6                      0x7488
+#define AFMT_GENERIC1_HDR                    0x748c
+#define AFMT_GENERIC1_0                      0x7490
+#define AFMT_GENERIC1_1                      0x7494
+#define AFMT_GENERIC1_2                      0x7498
+#define AFMT_GENERIC1_3                      0x749c
+#define AFMT_GENERIC1_4                      0x74a0
+#define AFMT_GENERIC1_5                      0x74a4
+#define AFMT_GENERIC1_6                      0x74a8
+#define HDMI_ACR_32_0                        0x74ac
+#       define HDMI_ACR_CTS_32(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1                        0x74b0
+#       define HDMI_ACR_N_32(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0                        0x74b4
+#       define HDMI_ACR_CTS_44(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1                        0x74b8
+#       define HDMI_ACR_N_44(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0                        0x74bc
+#       define HDMI_ACR_CTS_48(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1                        0x74c0
+#       define HDMI_ACR_N_48(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0                    0x74c4
+#define HDMI_ACR_STATUS_1                    0x74c8
+#define AFMT_AUDIO_INFO0                     0x74cc
+#       define AFMT_AUDIO_INFO_CHECKSUM(x)   (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_CC(x)         (((x) & 7) << 8)
+#       define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x)   (((x) & 0xff) << 16)
+#define AFMT_AUDIO_INFO1                     0x74d0
+#       define AFMT_AUDIO_INFO_CA(x)         (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_LSV(x)        (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_DM_INH(x)     (((x) & 1) << 15)
+#       define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define AFMT_60958_0                         0x74d4
+#       define AFMT_60958_CS_A(x)            (((x) & 1) << 0)
+#       define AFMT_60958_CS_B(x)            (((x) & 1) << 1)
+#       define AFMT_60958_CS_C(x)            (((x) & 1) << 2)
+#       define AFMT_60958_CS_D(x)            (((x) & 3) << 3)
+#       define AFMT_60958_CS_MODE(x)         (((x) & 3) << 6)
+#       define AFMT_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define AFMT_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define AFMT_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define AFMT_60958_1                         0x74d8
+#       define AFMT_60958_CS_WORD_LENGTH(x)  (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_VALID_L(x)      (((x) & 1) << 16)
+#       define AFMT_60958_CS_VALID_R(x)      (((x) & 1) << 18)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL               0x74dc
+#       define AFMT_AUDIO_CRC_EN             (1 << 0)
+#define AFMT_RAMP_CONTROL0                   0x74e0
+#       define AFMT_RAMP_MAX_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_RAMP_DATA_SIGN           (1 << 31)
+#define AFMT_RAMP_CONTROL1                   0x74e4
+#       define AFMT_RAMP_MIN_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2                   0x74e8
+#       define AFMT_RAMP_INC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3                   0x74ec
+#       define AFMT_RAMP_DEC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_60958_2                         0x74f0
+#       define AFMT_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+#define AFMT_STATUS                          0x7600
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x7604
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL              0x7608
+#       define AFMT_GENERIC0_UPDATE          (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0              0x760c
+#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - hdmi regs */
+#       define AFMT_AUDIO_INFO_UPDATE        (1 << 7)
+#       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
+#define AFMT_GENERIC0_7                      0x7610
+/* second instance starts at 0x7800 */
+#define HDMI_OFFSET0                      (0x7400 - 0x7400)
+#define HDMI_OFFSET1                      (0x7800 - 0x7400)
+
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x71fc /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x7300
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x611c
+
+/* PCIE indirect regs */
+#define PCIE_P_CNTL                                       0x40
+#       define P_PLL_PWRDN_IN_L1L23                       (1 << 3)
+#       define P_PLL_BUF_PDNB                             (1 << 4)
+#       define P_PLL_PDNB                                 (1 << 9)
+#       define P_ALLOW_PRX_FRONTEND_SHUTOFF               (1 << 12)
+/* PCIE PORT regs */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_HW_VOLTAGE_IF_CONTROL(x)                ((x) << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_MASK              (3 << 12)
+#       define LC_HW_VOLTAGE_IF_CONTROL_SHIFT             12
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* UVD */
+#define UVD_SEMA_ADDR_LOW				0xef00
+#define UVD_SEMA_ADDR_HIGH				0xef04
+#define UVD_SEMA_CMD					0xef08
+#define UVD_GPCOM_VCPU_CMD				0xef0c
+#define UVD_GPCOM_VCPU_DATA0				0xef10
+#define UVD_GPCOM_VCPU_DATA1				0xef14
+
+#define UVD_LMI_EXT40_ADDR				0xf498
+#define UVD_VCPU_CHIP_ID				0xf4d4
+#define UVD_VCPU_CACHE_OFFSET0				0xf4d8
+#define UVD_VCPU_CACHE_SIZE0				0xf4dc
+#define UVD_VCPU_CACHE_OFFSET1				0xf4e0
+#define UVD_VCPU_CACHE_SIZE1				0xf4e4
+#define UVD_VCPU_CACHE_OFFSET2				0xf4e8
+#define UVD_VCPU_CACHE_SIZE2				0xf4ec
+#define UVD_LMI_ADDR_EXT				0xf594
+
+#define UVD_RBC_RB_RPTR					0xf690
+#define UVD_RBC_RB_WPTR					0xf694
+
+#define UVD_CONTEXT_ID					0xf6f4
+
+#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
new file mode 100644
index 0000000..85c604d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si.c
@@ -0,0 +1,7568 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include <drm/radeon_drm.h>
+#include "sid.h"
+#include "atom.h"
+#include "si_blit_shaders.h"
+#include "clearstate_si.h"
+#include "radeon_ucode.h"
+
+
+MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
+MODULE_FIRMWARE("radeon/TAHITI_me.bin");
+MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
+MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
+MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
+MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
+MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+
+MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+
+MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
+MODULE_FIRMWARE("radeon/VERDE_me.bin");
+MODULE_FIRMWARE("radeon/VERDE_ce.bin");
+MODULE_FIRMWARE("radeon/VERDE_mc.bin");
+MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
+MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+MODULE_FIRMWARE("radeon/VERDE_smc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+
+MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+MODULE_FIRMWARE("radeon/OLAND_me.bin");
+MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
+MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
+MODULE_FIRMWARE("radeon/OLAND_smc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+
+MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
+MODULE_FIRMWARE("radeon/HAINAN_me.bin");
+MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
+MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
+
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_mc.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
+
+static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
+static void si_pcie_gen3_enable(struct radeon_device *rdev);
+static void si_program_aspm(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
+extern int r600_ih_ring_alloc(struct radeon_device *rdev);
+extern void r600_ih_ring_fini(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
+					 bool enable);
+static void si_init_pg(struct radeon_device *rdev);
+static void si_init_cg(struct radeon_device *rdev);
+static void si_fini_pg(struct radeon_device *rdev);
+static void si_fini_cg(struct radeon_device *rdev);
+static void si_rlc_stop(struct radeon_device *rdev);
+
+static const u32 crtc_offsets[] =
+{
+	EVERGREEN_CRTC0_REGISTER_OFFSET,
+	EVERGREEN_CRTC1_REGISTER_OFFSET,
+	EVERGREEN_CRTC2_REGISTER_OFFSET,
+	EVERGREEN_CRTC3_REGISTER_OFFSET,
+	EVERGREEN_CRTC4_REGISTER_OFFSET,
+	EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
+static const u32 si_disp_int_status[] =
+{
+	DISP_INTERRUPT_STATUS,
+	DISP_INTERRUPT_STATUS_CONTINUE,
+	DISP_INTERRUPT_STATUS_CONTINUE2,
+	DISP_INTERRUPT_STATUS_CONTINUE3,
+	DISP_INTERRUPT_STATUS_CONTINUE4,
+	DISP_INTERRUPT_STATUS_CONTINUE5
+};
+
+#define DC_HPDx_CONTROL(x)        (DC_HPD1_CONTROL     + (x * 0xc))
+#define DC_HPDx_INT_CONTROL(x)    (DC_HPD1_INT_CONTROL + (x * 0xc))
+#define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS  + (x * 0xc))
+
+static const u32 verde_rlc_save_restore_register_list[] =
+{
+	(0x8000 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0xe80 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0xe80 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x98f0 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xe7c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9148 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9148 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9150 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x897c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8d8c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac54 >> 2),
+	0X00000000,
+	0x3,
+	(0x9c00 << 16) | (0x98f8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9910 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9914 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9918 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x991c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9920 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9924 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9928 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x992c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9930 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9934 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9938 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x993c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9940 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9944 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9948 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x994c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9950 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9954 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9958 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x995c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9960 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9964 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9968 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x996c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9970 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9974 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9978 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x997c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9980 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9984 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9988 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x998c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c00 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c04 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c08 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0xe84 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0xe84 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x914c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x914c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9354 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9354 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9060 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9364 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9100 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x913c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e0 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e4 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e0 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e4 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8bcc >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8b24 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88c4 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e50 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c0c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e58 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e5c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9508 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x950c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9494 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac0c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac10 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xae00 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac08 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88d4 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88c8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88cc >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x89b0 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8b10 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8a14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9830 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9834 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9838 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9a10 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8001 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8001 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8041 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8041 << 16) | (0x9874 >> 2),
+	0x00000000,
+	0x00000000
+};
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x00601005,
+	0xc47c, 0xffffffff, 0x10104040,
+	0xc488, 0xffffffff, 0x0100000a,
+	0xc314, 0xffffffff, 0x00000800,
+	0xc30c, 0xffffffff, 0x800000f4,
+	0xf4a8, 0xffffffff, 0x00000000
+};
+
+static const u32 tahiti_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x277c, 0x00000003, 0x000007ff,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x2a00126a,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x00000200, 0x000002fb,
+	0xac10, 0xffffffff, 0x0000543b,
+	0xac0c, 0xffffffff, 0xa9210876,
+	0x88d0, 0xffffffff, 0x000fff40,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x1410, 0x20000000, 0x20fffed8,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+	0xc64, 0x00000001, 0x00000001
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x00601004,
+	0xc47c, 0xffffffff, 0x10102020,
+	0xc488, 0xffffffff, 0x01000020,
+	0xc314, 0xffffffff, 0x00000800,
+	0xc30c, 0xffffffff, 0x800000a4
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x2a00126a,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x000000f7,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x32761054,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x033f1005,
+	0xc47c, 0xffffffff, 0x10808020,
+	0xc488, 0xffffffff, 0x00800008,
+	0xc314, 0xffffffff, 0x00001000,
+	0xc30c, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x240c, 0x000007ff, 0x00000000,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x0000124a,
+	0x28350, 0x3f3f3fff, 0x0000124a,
+	0x28350, 0x3f3f3fff, 0x0000124a,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x00000003,
+	0xac14, 0x000003ff, 0x00000003,
+	0xac14, 0x000003ff, 0x00000003,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x00001032,
+	0xac0c, 0xffffffff, 0x00001032,
+	0xac0c, 0xffffffff, 0x00001032,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x00601005,
+	0xc47c, 0xffffffff, 0x10104040,
+	0xc488, 0xffffffff, 0x0100000a,
+	0xc314, 0xffffffff, 0x00000800,
+	0xc30c, 0xffffffff, 0x800000f4
+};
+
+static const u32 oland_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x00000082,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x000000f3,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x00003210,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xd0c0, 0xff000fff, 0x00000100,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd8c0, 0xff000fff, 0x00000100,
+	0xd830, 0x000300c0, 0x00800040,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x00000000,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x03e00000, 0x03600000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x000000f1,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x00003210,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+	0x98f8, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x91c4, 0xffffffff, 0x00060005,
+	0x91c8, 0xffffffff, 0x00080007,
+	0x91cc, 0xffffffff, 0x0000000b,
+	0x91d0, 0xffffffff, 0x000a0009,
+	0x91d4, 0xffffffff, 0x000d000c,
+	0x91d8, 0xffffffff, 0x00070006,
+	0x91dc, 0xffffffff, 0x00090008,
+	0x91e0, 0xffffffff, 0x0000000c,
+	0x91e4, 0xffffffff, 0x000b000a,
+	0x91e8, 0xffffffff, 0x000e000d,
+	0x91ec, 0xffffffff, 0x00080007,
+	0x91f0, 0xffffffff, 0x000a0009,
+	0x91f4, 0xffffffff, 0x0000000d,
+	0x91f8, 0xffffffff, 0x000c000b,
+	0x91fc, 0xffffffff, 0x000f000e,
+	0x9200, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x000b000a,
+	0x9208, 0xffffffff, 0x000c000f,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x00110010,
+	0x9214, 0xffffffff, 0x000a0009,
+	0x9218, 0xffffffff, 0x000c000b,
+	0x921c, 0xffffffff, 0x0000000f,
+	0x9220, 0xffffffff, 0x000e000d,
+	0x9224, 0xffffffff, 0x00110010,
+	0x9228, 0xffffffff, 0x000b000a,
+	0x922c, 0xffffffff, 0x000d000c,
+	0x9230, 0xffffffff, 0x00000010,
+	0x9234, 0xffffffff, 0x000f000e,
+	0x9238, 0xffffffff, 0x00120011,
+	0x923c, 0xffffffff, 0x000c000b,
+	0x9240, 0xffffffff, 0x000e000d,
+	0x9244, 0xffffffff, 0x00000011,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x924c, 0xffffffff, 0x00130012,
+	0x9250, 0xffffffff, 0x000d000c,
+	0x9254, 0xffffffff, 0x000f000e,
+	0x9258, 0xffffffff, 0x00100013,
+	0x925c, 0xffffffff, 0x00120011,
+	0x9260, 0xffffffff, 0x00150014,
+	0x9264, 0xffffffff, 0x000e000d,
+	0x9268, 0xffffffff, 0x0010000f,
+	0x926c, 0xffffffff, 0x00000013,
+	0x9270, 0xffffffff, 0x00120011,
+	0x9274, 0xffffffff, 0x00150014,
+	0x9278, 0xffffffff, 0x000f000e,
+	0x927c, 0xffffffff, 0x00110010,
+	0x9280, 0xffffffff, 0x00000014,
+	0x9284, 0xffffffff, 0x00130012,
+	0x9288, 0xffffffff, 0x00160015,
+	0x928c, 0xffffffff, 0x0010000f,
+	0x9290, 0xffffffff, 0x00120011,
+	0x9294, 0xffffffff, 0x00000015,
+	0x9298, 0xffffffff, 0x00140013,
+	0x929c, 0xffffffff, 0x00170016,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x9200, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x000b000a,
+	0x9208, 0xffffffff, 0x000c000f,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x00110010,
+	0x9214, 0xffffffff, 0x000a0009,
+	0x9218, 0xffffffff, 0x000c000b,
+	0x921c, 0xffffffff, 0x0000000f,
+	0x9220, 0xffffffff, 0x000e000d,
+	0x9224, 0xffffffff, 0x00110010,
+	0x9228, 0xffffffff, 0x000b000a,
+	0x922c, 0xffffffff, 0x000d000c,
+	0x9230, 0xffffffff, 0x00000010,
+	0x9234, 0xffffffff, 0x000f000e,
+	0x9238, 0xffffffff, 0x00120011,
+	0x923c, 0xffffffff, 0x000c000b,
+	0x9240, 0xffffffff, 0x000e000d,
+	0x9244, 0xffffffff, 0x00000011,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x924c, 0xffffffff, 0x00130012,
+	0x9250, 0xffffffff, 0x000d000c,
+	0x9254, 0xffffffff, 0x000f000e,
+	0x9258, 0xffffffff, 0x00100013,
+	0x925c, 0xffffffff, 0x00120011,
+	0x9260, 0xffffffff, 0x00150014,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 verde_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x9200, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x000b000a,
+	0x9208, 0xffffffff, 0x000c000f,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x00110010,
+	0x9214, 0xffffffff, 0x000a0009,
+	0x9218, 0xffffffff, 0x000c000b,
+	0x921c, 0xffffffff, 0x0000000f,
+	0x9220, 0xffffffff, 0x000e000d,
+	0x9224, 0xffffffff, 0x00110010,
+	0x9228, 0xffffffff, 0x000b000a,
+	0x922c, 0xffffffff, 0x000d000c,
+	0x9230, 0xffffffff, 0x00000010,
+	0x9234, 0xffffffff, 0x000f000e,
+	0x9238, 0xffffffff, 0x00120011,
+	0x923c, 0xffffffff, 0x000c000b,
+	0x9240, 0xffffffff, 0x000e000d,
+	0x9244, 0xffffffff, 0x00000011,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x924c, 0xffffffff, 0x00130012,
+	0x9250, 0xffffffff, 0x000d000c,
+	0x9254, 0xffffffff, 0x000f000e,
+	0x9258, 0xffffffff, 0x00100013,
+	0x925c, 0xffffffff, 0x00120011,
+	0x9260, 0xffffffff, 0x00150014,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 oland_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x91c4, 0xffffffff, 0x00060005,
+	0x91c8, 0xffffffff, 0x00080007,
+	0x91cc, 0xffffffff, 0x0000000b,
+	0x91d0, 0xffffffff, 0x000a0009,
+	0x91d4, 0xffffffff, 0x000d000c,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x91c4, 0xffffffff, 0x00060005,
+	0x91c8, 0xffffffff, 0x00080007,
+	0x91cc, 0xffffffff, 0x0000000b,
+	0x91d0, 0xffffffff, 0x000a0009,
+	0x91d4, 0xffffffff, 0x000d000c,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static u32 verde_pg_init[] =
+{
+	0x353c, 0xffffffff, 0x40000,
+	0x3538, 0xffffffff, 0x200010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x7007,
+	0x3538, 0xffffffff, 0x300010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x400000,
+	0x3538, 0xffffffff, 0x100010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x120200,
+	0x3538, 0xffffffff, 0x500010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x1e1e16,
+	0x3538, 0xffffffff, 0x600010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x171f1e,
+	0x3538, 0xffffffff, 0x700010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x3538, 0xffffffff, 0x9ff,
+	0x3500, 0xffffffff, 0x0,
+	0x3504, 0xffffffff, 0x10000800,
+	0x3504, 0xffffffff, 0xf,
+	0x3504, 0xffffffff, 0xf,
+	0x3500, 0xffffffff, 0x4,
+	0x3504, 0xffffffff, 0x1000051e,
+	0x3504, 0xffffffff, 0xffff,
+	0x3504, 0xffffffff, 0xffff,
+	0x3500, 0xffffffff, 0x8,
+	0x3504, 0xffffffff, 0x80500,
+	0x3500, 0xffffffff, 0x12,
+	0x3504, 0xffffffff, 0x9050c,
+	0x3500, 0xffffffff, 0x1d,
+	0x3504, 0xffffffff, 0xb052c,
+	0x3500, 0xffffffff, 0x2a,
+	0x3504, 0xffffffff, 0x1053e,
+	0x3500, 0xffffffff, 0x2d,
+	0x3504, 0xffffffff, 0x10546,
+	0x3500, 0xffffffff, 0x30,
+	0x3504, 0xffffffff, 0xa054e,
+	0x3500, 0xffffffff, 0x3c,
+	0x3504, 0xffffffff, 0x1055f,
+	0x3500, 0xffffffff, 0x3f,
+	0x3504, 0xffffffff, 0x10567,
+	0x3500, 0xffffffff, 0x42,
+	0x3504, 0xffffffff, 0x1056f,
+	0x3500, 0xffffffff, 0x45,
+	0x3504, 0xffffffff, 0x10572,
+	0x3500, 0xffffffff, 0x48,
+	0x3504, 0xffffffff, 0x20575,
+	0x3500, 0xffffffff, 0x4c,
+	0x3504, 0xffffffff, 0x190801,
+	0x3500, 0xffffffff, 0x67,
+	0x3504, 0xffffffff, 0x1082a,
+	0x3500, 0xffffffff, 0x6a,
+	0x3504, 0xffffffff, 0x1b082d,
+	0x3500, 0xffffffff, 0x87,
+	0x3504, 0xffffffff, 0x310851,
+	0x3500, 0xffffffff, 0xba,
+	0x3504, 0xffffffff, 0x891,
+	0x3500, 0xffffffff, 0xbc,
+	0x3504, 0xffffffff, 0x893,
+	0x3500, 0xffffffff, 0xbe,
+	0x3504, 0xffffffff, 0x20895,
+	0x3500, 0xffffffff, 0xc2,
+	0x3504, 0xffffffff, 0x20899,
+	0x3500, 0xffffffff, 0xc6,
+	0x3504, 0xffffffff, 0x2089d,
+	0x3500, 0xffffffff, 0xca,
+	0x3504, 0xffffffff, 0x8a1,
+	0x3500, 0xffffffff, 0xcc,
+	0x3504, 0xffffffff, 0x8a3,
+	0x3500, 0xffffffff, 0xce,
+	0x3504, 0xffffffff, 0x308a5,
+	0x3500, 0xffffffff, 0xd3,
+	0x3504, 0xffffffff, 0x6d08cd,
+	0x3500, 0xffffffff, 0x142,
+	0x3504, 0xffffffff, 0x2000095a,
+	0x3504, 0xffffffff, 0x1,
+	0x3500, 0xffffffff, 0x144,
+	0x3504, 0xffffffff, 0x301f095b,
+	0x3500, 0xffffffff, 0x165,
+	0x3504, 0xffffffff, 0xc094d,
+	0x3500, 0xffffffff, 0x173,
+	0x3504, 0xffffffff, 0xf096d,
+	0x3500, 0xffffffff, 0x184,
+	0x3504, 0xffffffff, 0x15097f,
+	0x3500, 0xffffffff, 0x19b,
+	0x3504, 0xffffffff, 0xc0998,
+	0x3500, 0xffffffff, 0x1a9,
+	0x3504, 0xffffffff, 0x409a7,
+	0x3500, 0xffffffff, 0x1af,
+	0x3504, 0xffffffff, 0xcdc,
+	0x3500, 0xffffffff, 0x1b1,
+	0x3504, 0xffffffff, 0x800,
+	0x3508, 0xffffffff, 0x6c9b2000,
+	0x3510, 0xfc00, 0x2000,
+	0x3544, 0xffffffff, 0xfc0,
+	0x28d4, 0x00000100, 0x100
+};
+
+static void si_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		radeon_program_register_sequence(rdev,
+						 tahiti_golden_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 tahiti_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 tahiti_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 tahiti_golden_registers2,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+		break;
+	case CHIP_PITCAIRN:
+		radeon_program_register_sequence(rdev,
+						 pitcairn_golden_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 pitcairn_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 pitcairn_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+		break;
+	case CHIP_VERDE:
+		radeon_program_register_sequence(rdev,
+						 verde_golden_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 verde_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 verde_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 verde_pg_init,
+						 (const u32)ARRAY_SIZE(verde_pg_init));
+		break;
+	case CHIP_OLAND:
+		radeon_program_register_sequence(rdev,
+						 oland_golden_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 oland_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 oland_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+		break;
+	case CHIP_HAINAN:
+		radeon_program_register_sequence(rdev,
+						 hainan_golden_registers,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 hainan_golden_registers2,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 hainan_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * si_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int si_get_allowed_info_register(struct radeon_device *rdev,
+				 u32 reg, u32 *val)
+{
+	switch (reg) {
+	case GRBM_STATUS:
+	case GRBM_STATUS2:
+	case GRBM_STATUS_SE0:
+	case GRBM_STATUS_SE1:
+	case SRBM_STATUS:
+	case SRBM_STATUS2:
+	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
+	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
+	case UVD_STATUS:
+		*val = RREG32(reg);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+/**
+ * si_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (SI).
+ */
+u32 si_get_xclk(struct radeon_device *rdev)
+{
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 tmp;
+
+	tmp = RREG32(CG_CLKPIN_CNTL_2);
+	if (tmp & MUX_TCLK_TO_XCLK)
+		return TCLK;
+
+	tmp = RREG32(CG_CLKPIN_CNTL);
+	if (tmp & XTALIN_DIVIDE)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+/* get temperature in millidegrees */
+int si_get_temp(struct radeon_device *rdev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+		CTF_TEMP_SHIFT;
+
+	if (temp & 0x200)
+		actual_temp = 255;
+	else
+		actual_temp = temp & 0x1ff;
+
+	actual_temp = (actual_temp * 1000);
+
+	return actual_temp;
+}
+
+#define TAHITI_IO_MC_REGS_SIZE 36
+
+static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a77400}
+};
+
+static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a47400}
+};
+
+static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a37400}
+};
+
+static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a17730}
+};
+
+static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a07730}
+};
+
+/* ucode loading */
+int si_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data = NULL;
+	const __le32 *new_fw_data = NULL;
+	u32 running;
+	u32 *io_mc_regs = NULL;
+	const __le32 *new_io_mc_regs = NULL;
+	int i, regs_size, ucode_size;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	if (rdev->new_fw) {
+		const struct mc_firmware_header_v1_0 *hdr =
+			(const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
+
+		radeon_ucode_print_mc_hdr(&hdr->header);
+		regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+		new_io_mc_regs = (const __le32 *)
+			(rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+		new_fw_data = (const __le32 *)
+			(rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+	} else {
+		ucode_size = rdev->mc_fw->size / 4;
+
+		switch (rdev->family) {
+		case CHIP_TAHITI:
+			io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+			regs_size = TAHITI_IO_MC_REGS_SIZE;
+			break;
+		case CHIP_PITCAIRN:
+			io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+			regs_size = TAHITI_IO_MC_REGS_SIZE;
+			break;
+		case CHIP_VERDE:
+		default:
+			io_mc_regs = (u32 *)&verde_io_mc_regs;
+			regs_size = TAHITI_IO_MC_REGS_SIZE;
+			break;
+		case CHIP_OLAND:
+			io_mc_regs = (u32 *)&oland_io_mc_regs;
+			regs_size = TAHITI_IO_MC_REGS_SIZE;
+			break;
+		case CHIP_HAINAN:
+			io_mc_regs = (u32 *)&hainan_io_mc_regs;
+			regs_size = TAHITI_IO_MC_REGS_SIZE;
+			break;
+		}
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+	}
+
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if (running == 0) {
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			if (rdev->new_fw) {
+				WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+				WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+			} else {
+				WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+				WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+			}
+		}
+		/* load the MC ucode */
+		for (i = 0; i < ucode_size; i++) {
+			if (rdev->new_fw)
+				WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+			else
+				WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+		}
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+				break;
+			udelay(1);
+		}
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+				break;
+			udelay(1);
+		}
+	}
+
+	return 0;
+}
+
+static int si_init_microcode(struct radeon_device *rdev)
+{
+	const char *chip_name;
+	const char *new_chip_name;
+	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
+	size_t smc_req_size, mc2_req_size;
+	char fw_name[30];
+	int err;
+	int new_fw = 0;
+	bool new_smc = false;
+	bool si58_fw = false;
+	bool banks2_fw = false;
+
+	DRM_DEBUG("\n");
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		chip_name = "TAHITI";
+		new_chip_name = "tahiti";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_PITCAIRN:
+		chip_name = "PITCAIRN";
+		if ((rdev->pdev->revision == 0x81) &&
+		    ((rdev->pdev->device == 0x6810) ||
+		     (rdev->pdev->device == 0x6811)))
+			new_smc = true;
+		new_chip_name = "pitcairn";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_VERDE:
+		chip_name = "VERDE";
+		if (((rdev->pdev->device == 0x6820) &&
+		     ((rdev->pdev->revision == 0x81) ||
+		      (rdev->pdev->revision == 0x83))) ||
+		    ((rdev->pdev->device == 0x6821) &&
+		     ((rdev->pdev->revision == 0x83) ||
+		      (rdev->pdev->revision == 0x87))) ||
+		    ((rdev->pdev->revision == 0x87) &&
+		     ((rdev->pdev->device == 0x6823) ||
+		      (rdev->pdev->device == 0x682b))))
+			new_smc = true;
+		new_chip_name = "verde";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_OLAND:
+		chip_name = "OLAND";
+		if (((rdev->pdev->revision == 0x81) &&
+		     ((rdev->pdev->device == 0x6600) ||
+		      (rdev->pdev->device == 0x6604) ||
+		      (rdev->pdev->device == 0x6605) ||
+		      (rdev->pdev->device == 0x6610))) ||
+		    ((rdev->pdev->revision == 0x83) &&
+		     (rdev->pdev->device == 0x6610)))
+			new_smc = true;
+		new_chip_name = "oland";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
+		break;
+	case CHIP_HAINAN:
+		chip_name = "HAINAN";
+		if (((rdev->pdev->revision == 0x81) &&
+		     (rdev->pdev->device == 0x6660)) ||
+		    ((rdev->pdev->revision == 0x83) &&
+		     ((rdev->pdev->device == 0x6660) ||
+		      (rdev->pdev->device == 0x6663) ||
+		      (rdev->pdev->device == 0x6665) ||
+		      (rdev->pdev->device == 0x6667))))
+			new_smc = true;
+		else if ((rdev->pdev->revision == 0xc3) &&
+			 (rdev->pdev->device == 0x6665))
+			banks2_fw = true;
+		new_chip_name = "hainan";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
+		smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
+		break;
+	default: BUG();
+	}
+
+	/* this memory configuration requires special firmware */
+	if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+		si58_fw = true;
+
+	DRM_INFO("Loading %s Microcode\n", new_chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+		err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->pfp_fw->size != pfp_req_size) {
+			pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->pfp_fw->size, fw_name);
+			err = -EINVAL;
+			goto out;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->pfp_fw);
+		if (err) {
+			pr_err("si_cp: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+		err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->me_fw->size != me_req_size) {
+			pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->me_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->me_fw);
+		if (err) {
+			pr_err("si_cp: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
+	err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+		err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->ce_fw->size != ce_req_size) {
+			pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->ce_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->ce_fw);
+		if (err) {
+			pr_err("si_cp: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+		err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+		if (err)
+			goto out;
+		if (rdev->rlc_fw->size != rlc_req_size) {
+			pr_err("si_rlc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->rlc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->rlc_fw);
+		if (err) {
+			pr_err("si_cp: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	if (si58_fw)
+		snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+	else
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+	err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+		if (err) {
+			snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+			err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+			if (err)
+				goto out;
+		}
+		if ((rdev->mc_fw->size != mc_req_size) &&
+		    (rdev->mc_fw->size != mc2_req_size)) {
+			pr_err("si_mc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->mc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+		DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+	} else {
+		err = radeon_ucode_validate(rdev->mc_fw);
+		if (err) {
+			pr_err("si_cp: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	if (banks2_fw)
+		snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+	else if (new_smc)
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+	else
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+	err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+	if (err) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+		if (err) {
+			pr_err("smc: error loading firmware \"%s\"\n", fw_name);
+			release_firmware(rdev->smc_fw);
+			rdev->smc_fw = NULL;
+			err = 0;
+		} else if (rdev->smc_fw->size != smc_req_size) {
+			pr_err("si_smc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->smc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	} else {
+		err = radeon_ucode_validate(rdev->smc_fw);
+		if (err) {
+			pr_err("si_cp: validation failed for firmware \"%s\"\n",
+			       fw_name);
+			goto out;
+		} else {
+			new_fw++;
+		}
+	}
+
+	if (new_fw == 0) {
+		rdev->new_fw = false;
+	} else if (new_fw < 6) {
+		pr_err("si_fw: mixing new and old firmware!\n");
+		err = -EINVAL;
+	} else {
+		rdev->new_fw = true;
+	}
+out:
+	if (err) {
+		if (err != -EINVAL)
+			pr_err("si_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->ce_fw);
+		rdev->ce_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->mc_fw);
+		rdev->mc_fw = NULL;
+		release_firmware(rdev->smc_fw);
+		rdev->smc_fw = NULL;
+	}
+	return err;
+}
+
+/* watermark setup */
+static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
+				   struct radeon_crtc *radeon_crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *other_mode)
+{
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 21:20:
+	 *  0 - half lb
+	 *  2 - whole lb, other crtc must be disabled
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (radeon_crtc->base.enabled && mode) {
+		if (other_mode) {
+			tmp = 0; /* 1/2 */
+			buffer_alloc = 1;
+		} else {
+			tmp = 2; /* whole */
+			buffer_alloc = 2;
+		}
+	} else {
+		tmp = 0;
+		buffer_alloc = 0;
+	}
+
+	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
+	       DC_LB_MEMORY_CONFIG(tmp));
+
+	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+			break;
+		udelay(1);
+	}
+
+	if (radeon_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		default:
+			return 4096 * 2;
+		case 2:
+			return 8192 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	case 4:
+		return 3;
+	case 5:
+		return 6;
+	case 6:
+		return 10;
+	case 7:
+		return 12;
+	case 8:
+		return 16;
+	}
+}
+
+struct dce6_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate raw DRAM Bandwidth */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
+{
+	return 32;
+}
+
+static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, sclk, bandwidth;
+	fixed20_12 a, b1, b2;
+	u32 min_bandwidth;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
+	b1.full = dfixed_mul(a, disp_clk);
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
+	b2.full = dfixed_mul(a, sclk);
+
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
+
+	a.full = dfixed_const(min_bandwidth);
+	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
+	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
+{
+	/* First calcualte the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = dce6_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	u32 tmp, dmif_size = 12288;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
+
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	if (dce6_average_bandwidth(wm) <=
+	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+	if (dce6_average_bandwidth(wm) <=
+	    (dce6_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (dce6_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+static void dce6_program_watermarks(struct radeon_device *rdev,
+					 struct radeon_crtc *radeon_crtc,
+					 u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &radeon_crtc->base.mode;
+	struct dce6_wm_params wm_low, wm_high;
+	u32 dram_channels;
+	u32 active_time;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (radeon_crtc->base.enabled && num_heads && mode) {
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+
+		if (rdev->family == CHIP_ARUBA)
+			dram_channels = evergreen_get_number_of_dram_channels(rdev);
+		else
+			dram_channels = si_get_number_of_dram_channels(rdev);
+
+		/* watermark for high clocks */
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			wm_high.yclk =
+				radeon_dpm_get_mclk(rdev, false) * 10;
+			wm_high.sclk =
+				radeon_dpm_get_sclk(rdev, false) * 10;
+		} else {
+			wm_high.yclk = rdev->pm.current_mclk * 10;
+			wm_high.sclk = rdev->pm.current_sclk * 10;
+		}
+
+		wm_high.disp_clk = mode->clock;
+		wm_high.src_width = mode->crtc_hdisplay;
+		wm_high.active_time = active_time;
+		wm_high.blank_time = line_time - wm_high.active_time;
+		wm_high.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_high.interlaced = true;
+		wm_high.vsc = radeon_crtc->vsc;
+		wm_high.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm_high.vtaps = 2;
+		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_high.lb_size = lb_size;
+		wm_high.dram_channels = dram_channels;
+		wm_high.num_heads = num_heads;
+
+		/* watermark for low clocks */
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			wm_low.yclk =
+				radeon_dpm_get_mclk(rdev, true) * 10;
+			wm_low.sclk =
+				radeon_dpm_get_sclk(rdev, true) * 10;
+		} else {
+			wm_low.yclk = rdev->pm.current_mclk * 10;
+			wm_low.sclk = rdev->pm.current_sclk * 10;
+		}
+
+		wm_low.disp_clk = mode->clock;
+		wm_low.src_width = mode->crtc_hdisplay;
+		wm_low.active_time = active_time;
+		wm_low.blank_time = line_time - wm_low.active_time;
+		wm_low.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_low.interlaced = true;
+		wm_low.vsc = radeon_crtc->vsc;
+		wm_low.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm_low.vtaps = 2;
+		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_low.lb_size = lb_size;
+		wm_low.dram_channels = dram_channels;
+		wm_low.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
+		/* set for low clocks */
+		latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+		    !dce6_check_latency_hiding(&wm_high) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+		    !dce6_check_latency_hiding(&wm_low) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+		/* Save number of lines the linebuffer leads before the scanout */
+		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+	/* save values for DPM */
+	radeon_crtc->line_time = line_time;
+	radeon_crtc->wm_high = latency_watermark_a;
+	radeon_crtc->wm_low = latency_watermark_b;
+}
+
+void dce6_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	radeon_update_display_priority(rdev);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+
+/*
+ * Core functions
+ */
+static void si_tiling_mode_table_init(struct radeon_device *rdev)
+{
+	u32 *tile = rdev->config.si.tile_mode_array;
+	const u32 num_tile_mode_states =
+			ARRAY_SIZE(rdev->config.si.tile_mode_array);
+	u32 reg_offset, split_equal_to_row_size;
+
+	switch (rdev->config.si.mem_row_size_in_kb) {
+	case 1:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+		break;
+	case 2:
+	default:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+		break;
+	case 4:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+		break;
+	}
+
+	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+		tile[reg_offset] = 0;
+
+	switch(rdev->family) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+		/* non-AA compressed depth or any compressed stencil */
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 2xAA/4xAA compressed depth only */
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 8xAA compressed depth only */
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 1D and 1D Array Surfaces */
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Displayable maps. */
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 8bpp. */
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 16bpp. */
+		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 32bpp. */
+		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin. */
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 8 bpp. */
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin 16 bpp. */
+		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin 32 bpp. */
+		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin 64 bpp. */
+		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* 8 bpp PRT. */
+		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 16 bpp PRT */
+		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 32 bpp PRT */
+		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 64 bpp PRT */
+		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 128 bpp PRT */
+		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+			   NUM_BANKS(ADDR_SURF_8_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		break;
+
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+	case CHIP_HAINAN:
+		/* non-AA compressed depth or any compressed stencil */
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 2xAA/4xAA compressed depth only */
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 8xAA compressed depth only */
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 1D and 1D Array Surfaces */
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Displayable maps. */
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 8bpp. */
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* Display 16bpp. */
+		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 32bpp. */
+		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin. */
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 8 bpp. */
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 16 bpp. */
+		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 32 bpp. */
+		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 64 bpp. */
+		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 8 bpp PRT. */
+		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 16 bpp PRT */
+		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 32 bpp PRT */
+		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 64 bpp PRT */
+		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 128 bpp PRT */
+		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+			   NUM_BANKS(ADDR_SURF_8_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		break;
+
+	default:
+		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
+	}
+}
+
+static void si_select_se_sh(struct radeon_device *rdev,
+			    u32 se_num, u32 sh_num)
+{
+	u32 data = INSTANCE_BROADCAST_WRITES;
+
+	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+	else if (se_num == 0xffffffff)
+		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+	else if (sh_num == 0xffffffff)
+		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+	else
+		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+	WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+	u32 i, mask = 0;
+
+	for (i = 0; i < bit_width; i++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+	return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	if (data & 1)
+		data &= INACTIVE_CUS_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+	data >>= INACTIVE_CUS_SHIFT;
+
+	mask = si_create_bitmask(cu_per_sh);
+
+	return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+			 u32 se_num, u32 sh_per_se,
+			 u32 cu_per_sh)
+{
+	int i, j, k;
+	u32 data, mask, active_cu;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+			mask = 1;
+			for (k = 0; k < 16; k++) {
+				mask <<= k;
+				if (active_cu & mask) {
+					data &= ~mask;
+					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+					break;
+				}
+			}
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+			      u32 max_rb_num_per_se,
+			      u32 sh_per_se)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_RB_BACKEND_DISABLE);
+	if (data & 1)
+		data &= BACKEND_DISABLE_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+	data >>= BACKEND_DISABLE_SHIFT;
+
+	mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+	return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+			u32 se_num, u32 sh_per_se,
+			u32 max_rb_num_per_se)
+{
+	int i, j;
+	u32 data, mask;
+	u32 disabled_rbs = 0;
+	u32 enabled_rbs = 0;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
+			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	mask = 1;
+	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+		if (!(disabled_rbs & mask))
+			enabled_rbs |= mask;
+		mask <<= 1;
+	}
+
+	rdev->config.si.backend_enable_mask = enabled_rbs;
+
+	for (i = 0; i < se_num; i++) {
+		si_select_se_sh(rdev, i, 0xffffffff);
+		data = 0;
+		for (j = 0; j < sh_per_se; j++) {
+			switch (enabled_rbs & 3) {
+			case 1:
+				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+				break;
+			case 2:
+				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+				break;
+			case 3:
+			default:
+				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+				break;
+			}
+			enabled_rbs >>= 2;
+		}
+		WREG32(PA_SC_RASTER_CONFIG, data);
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static void si_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+	int i, j;
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		rdev->config.si.max_shader_engines = 2;
+		rdev->config.si.max_tile_pipes = 12;
+		rdev->config.si.max_cu_per_sh = 8;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 12;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PITCAIRN:
+		rdev->config.si.max_shader_engines = 2;
+		rdev->config.si.max_tile_pipes = 8;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 8;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_VERDE:
+	default:
+		rdev->config.si.max_shader_engines = 1;
+		rdev->config.si.max_tile_pipes = 4;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 4;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_OLAND:
+		rdev->config.si.max_shader_engines = 1;
+		rdev->config.si.max_tile_pipes = 4;
+		rdev->config.si.max_cu_per_sh = 6;
+		rdev->config.si.max_sh_per_se = 1;
+		rdev->config.si.max_backends_per_se = 2;
+		rdev->config.si.max_texture_channel_caches = 4;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 16;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_HAINAN:
+		rdev->config.si.max_shader_engines = 1;
+		rdev->config.si.max_tile_pipes = 4;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 1;
+		rdev->config.si.max_backends_per_se = 1;
+		rdev->config.si.max_texture_channel_caches = 2;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 16;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 1);
+	WREG32(SRBM_INT_ACK, 1);
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
+	rdev->config.si.mem_max_burst_length_bytes = 256;
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (rdev->config.si.mem_row_size_in_kb > 4)
+		rdev->config.si.mem_row_size_in_kb = 4;
+	/* XXX use MC settings? */
+	rdev->config.si.shader_engine_tile_size = 32;
+	rdev->config.si.num_gpus = 1;
+	rdev->config.si.multi_gpu_tile_size = 64;
+
+	/* fix up row size */
+	gb_addr_config &= ~ROW_SIZE_MASK;
+	switch (rdev->config.si.mem_row_size_in_kb) {
+	case 1:
+	default:
+		gb_addr_config |= ROW_SIZE(0);
+		break;
+	case 2:
+		gb_addr_config |= ROW_SIZE(1);
+		break;
+	case 4:
+		gb_addr_config |= ROW_SIZE(2);
+		break;
+	}
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.si.tile_config = 0;
+	switch (rdev->config.si.num_tile_pipes) {
+	case 1:
+		rdev->config.si.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.si.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.si.tile_config |= (2 << 0);
+		break;
+	case 8:
+	default:
+		/* XXX what about 12? */
+		rdev->config.si.tile_config |= (3 << 0);
+		break;
+	}	
+	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+	case 0: /* four banks */
+		rdev->config.si.tile_config |= 0 << 4;
+		break;
+	case 1: /* eight banks */
+		rdev->config.si.tile_config |= 1 << 4;
+		break;
+	case 2: /* sixteen banks */
+	default:
+		rdev->config.si.tile_config |= 2 << 4;
+		break;
+	}
+	rdev->config.si.tile_config |=
+		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+	rdev->config.si.tile_config |=
+		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+	if (rdev->has_uvd) {
+		WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+	}
+
+	si_tiling_mode_table_init(rdev);
+
+	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+		    rdev->config.si.max_sh_per_se,
+		    rdev->config.si.max_backends_per_se);
+
+	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+		     rdev->config.si.max_sh_per_se,
+		     rdev->config.si.max_cu_per_sh);
+
+	rdev->config.si.active_cus = 0;
+	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
+		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
+			rdev->config.si.active_cus +=
+				hweight32(si_get_cu_active_bitmap(rdev, i, j));
+		}
+	}
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
+				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_CONFIG, 0);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+static void si_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->scratch.num_reg = 7;
+	rdev->scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+void si_fence_ring_emit(struct radeon_device *rdev,
+			struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* flush read cache over gart */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+			  PACKET3_TC_ACTION_ENA |
+			  PACKET3_SH_KCACHE_ACTION_ENA |
+			  PACKET3_SH_ICACHE_ACTION_ENA);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+	radeon_ring_write(ring, lower_32_bits(addr));
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+/*
+ * IB stuff
+ */
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
+	u32 header;
+
+	if (ib->is_const_ib) {
+		/* set switch buffer packet before const IB */
+		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		radeon_ring_write(ring, 0);
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+	} else {
+		u32 next_rptr;
+		if (ring->rptr_save_reg) {
+			next_rptr = ring->wptr + 3 + 4 + 8;
+			radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+			radeon_ring_write(ring, ((ring->rptr_save_reg -
+						  PACKET3_SET_CONFIG_REG_START) >> 2));
+			radeon_ring_write(ring, next_rptr);
+		} else if (rdev->wb.enabled) {
+			next_rptr = ring->wptr + 5 + 4 + 8;
+			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+			radeon_ring_write(ring, (1 << 8));
+			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
+			radeon_ring_write(ring, next_rptr);
+		}
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+	}
+
+	radeon_ring_write(ring, header);
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
+
+	if (!ib->is_const_ib) {
+		/* flush read cache over gart for this vmid */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, vm_id);
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+				  PACKET3_TC_ACTION_ENA |
+				  PACKET3_SH_KCACHE_ACTION_ENA |
+				  PACKET3_SH_ICACHE_ACTION_ENA);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+	}
+}
+
+/*
+ * CP.
+ */
+static void si_cp_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	}
+	udelay(50);
+}
+
+static int si_cp_load_microcode(struct radeon_device *rdev)
+{
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
+		return -EINVAL;
+
+	si_cp_enable(rdev, false);
+
+	if (rdev->new_fw) {
+		const struct gfx_firmware_header_v1_0 *pfp_hdr =
+			(const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+		const struct gfx_firmware_header_v1_0 *ce_hdr =
+			(const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+		const struct gfx_firmware_header_v1_0 *me_hdr =
+			(const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+		const __le32 *fw_data;
+		u32 fw_size;
+
+		radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+		radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+		radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+		/* PFP */
+		fw_data = (const __le32 *)
+			(rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+		WREG32(CP_PFP_UCODE_ADDR, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+		WREG32(CP_PFP_UCODE_ADDR, 0);
+
+		/* CE */
+		fw_data = (const __le32 *)
+			(rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+		WREG32(CP_CE_UCODE_ADDR, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+		WREG32(CP_CE_UCODE_ADDR, 0);
+
+		/* ME */
+		fw_data = (const __be32 *)
+			(rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+		fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+		WREG32(CP_ME_RAM_WADDR, 0);
+		for (i = 0; i < fw_size; i++)
+			WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+		WREG32(CP_ME_RAM_WADDR, 0);
+	} else {
+		const __be32 *fw_data;
+
+		/* PFP */
+		fw_data = (const __be32 *)rdev->pfp_fw->data;
+		WREG32(CP_PFP_UCODE_ADDR, 0);
+		for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+			WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+		WREG32(CP_PFP_UCODE_ADDR, 0);
+
+		/* CE */
+		fw_data = (const __be32 *)rdev->ce_fw->data;
+		WREG32(CP_CE_UCODE_ADDR, 0);
+		for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+			WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+		WREG32(CP_CE_UCODE_ADDR, 0);
+
+		/* ME */
+		fw_data = (const __be32 *)rdev->me_fw->data;
+		WREG32(CP_ME_RAM_WADDR, 0);
+		for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+			WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+		WREG32(CP_ME_RAM_WADDR, 0);
+	}
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int si_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+
+	r = radeon_ring_lock(rdev, ring, 7 + 4);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	/* init the CP */
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+
+	/* init the CE partitions */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+	radeon_ring_write(ring, 0xc000);
+	radeon_ring_write(ring, 0xe000);
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	si_cp_enable(rdev, true);
+
+	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < si_default_size; i++)
+		radeon_ring_write(ring, si_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
+		ring = &rdev->ring[i];
+		r = radeon_ring_lock(rdev, ring, 2);
+
+		/* clear the compute context state */
+		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
+		radeon_ring_write(ring, 0);
+
+		radeon_ring_unlock_commit(rdev, ring, false);
+	}
+
+	return 0;
+}
+
+static void si_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	si_cp_enable(rdev, false);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int si_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	si_enable_gui_idle_interrupt(rdev, false);
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, 0);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	/* ring 0 - compute and gfx */
+	/* Set ring buffer size */
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB0_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB0_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB0_CNTL, tmp);
+
+	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+	/* ring1  - compute only */
+	/* Set ring buffer size */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB1_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB1_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB1_CNTL, tmp);
+
+	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+	/* ring2 - compute only */
+	/* Set ring buffer size */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB2_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB2_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB2_CNTL, tmp);
+
+	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+	/* start the rings */
+	si_cp_start(rdev);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	if (r) {
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+		return r;
+	}
+	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+	if (r) {
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+	}
+	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+	if (r) {
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	}
+
+	si_enable_gui_idle_interrupt(rdev, true);
+
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(GRBM_STATUS);
+	if (tmp & (PA_BUSY | SC_BUSY |
+		   BCI_BUSY | SX_BUSY |
+		   TA_BUSY | VGT_BUSY |
+		   DB_BUSY | CB_BUSY |
+		   GDS_BUSY | SPI_BUSY |
+		   IA_BUSY | IA_BUSY_NO_DMA))
+		reset_mask |= RADEON_RESET_GFX;
+
+	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+		   CP_BUSY | CP_COHERENCY_BUSY))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (tmp & GRBM_EE_BUSY)
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* GRBM_STATUS2 */
+	tmp = RREG32(GRBM_STATUS2);
+	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+		reset_mask |= RADEON_RESET_RLC;
+
+	/* DMA_STATUS_REG 0 */
+	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* DMA_STATUS_REG 1 */
+	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(SRBM_STATUS2);
+	if (tmp & DMA_BUSY)
+		reset_mask |= RADEON_RESET_DMA;
+
+	if (tmp & DMA1_BUSY)
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & IH_BUSY)
+		reset_mask |= RADEON_RESET_IH;
+
+	if (tmp & SEM_BUSY)
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (tmp & GRBM_RQ_PENDING)
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (tmp & VMC_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+		   MCC_BUSY | MCD_BUSY))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (evergreen_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* VM_L2_STATUS */
+	tmp = RREG32(VM_L2_STATUS);
+	if (tmp & L2_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	evergreen_print_gpu_status_regs(rdev);
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+	/* disable PG/CG */
+	si_fini_pg(rdev);
+	si_fini_cg(rdev);
+
+	/* stop the rlc */
+	si_rlc_stop(rdev);
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* dma0 */
+		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	}
+	if (reset_mask & RADEON_RESET_DMA1) {
+		/* dma1 */
+		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	}
+
+	udelay(50);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
+		grbm_soft_reset = SOFT_RESET_CB |
+			SOFT_RESET_DB |
+			SOFT_RESET_GDS |
+			SOFT_RESET_PA |
+			SOFT_RESET_SC |
+			SOFT_RESET_BCI |
+			SOFT_RESET_SPI |
+			SOFT_RESET_SX |
+			SOFT_RESET_TC |
+			SOFT_RESET_TA |
+			SOFT_RESET_VGT |
+			SOFT_RESET_IA;
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+	}
+
+	if (reset_mask & RADEON_RESET_DMA)
+		srbm_soft_reset |= SOFT_RESET_DMA;
+
+	if (reset_mask & RADEON_RESET_DMA1)
+		srbm_soft_reset |= SOFT_RESET_DMA1;
+
+	if (reset_mask & RADEON_RESET_DISPLAY)
+		srbm_soft_reset |= SOFT_RESET_DC;
+
+	if (reset_mask & RADEON_RESET_RLC)
+		grbm_soft_reset |= SOFT_RESET_RLC;
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= SOFT_RESET_SEM;
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= SOFT_RESET_IH;
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= SOFT_RESET_VMC;
+
+	if (reset_mask & RADEON_RESET_MC)
+		srbm_soft_reset |= SOFT_RESET_MC;
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	udelay(50);
+
+	evergreen_print_gpu_status_regs(rdev);
+}
+
+static void si_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+	u32 tmp, i;
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL);
+	tmp |= SPLL_BYPASS_EN;
+	WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+	tmp |= SPLL_CTLREQ_CHG;
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+			break;
+		udelay(1);
+	}
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	tmp = RREG32(MPLL_CNTL_MODE);
+	tmp &= ~MPLL_MCLK_SEL;
+	WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+static void si_spll_powerdown(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(SPLL_CNTL_MODE);
+	tmp |= SPLL_SW_DIR_CONTROL;
+	WREG32(SPLL_CNTL_MODE, tmp);
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL);
+	tmp |= SPLL_RESET;
+	WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL);
+	tmp |= SPLL_SLEEP;
+	WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+	tmp = RREG32(SPLL_CNTL_MODE);
+	tmp &= ~SPLL_SW_DIR_CONTROL;
+	WREG32(SPLL_CNTL_MODE, tmp);
+}
+
+static void si_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* disable cg/pg */
+	si_fini_pg(rdev);
+	si_fini_cg(rdev);
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+	/* dma0 */
+	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	/* dma1 */
+	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	/* XXX other engines? */
+
+	/* halt the rlc, disable cp internal ints */
+	si_rlc_stop(rdev);
+
+	udelay(50);
+
+	/* disable mem access */
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+	}
+
+	/* set mclk/sclk to bypass */
+	si_set_clk_bypass_mode(rdev);
+	/* powerdown spll */
+	si_spll_powerdown(rdev);
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* reset */
+	radeon_pci_config_reset(rdev);
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+}
+
+int si_asic_reset(struct radeon_device *rdev, bool hard)
+{
+	u32 reset_mask;
+
+	if (hard) {
+		si_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
+	reset_mask = si_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	/* try soft reset */
+	si_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = si_gpu_check_soft_reset(rdev);
+
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		si_gpu_pci_config_reset(rdev);
+
+	reset_mask = si_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * si_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = si_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* MC */
+static void si_mc_program(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	evergreen_mc_stop(rdev, &save);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	if (!ASIC_IS_NODCE(rdev))
+		/* Lockout access through VGA aperture*/
+		WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+	       rdev->mc.vram_start >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+	       rdev->mc.vram_end >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+	       rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	/* XXX double check these! */
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	WREG32(MC_VM_AGP_BASE, 0);
+	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	evergreen_mc_resume(rdev, &save);
+	if (!ASIC_IS_NODCE(rdev)) {
+		/* we need to own VRAM, so turn off the VGA renderer here
+		 * to stop it overwriting our objects */
+		rv515_vga_render_disable(rdev);
+	}
+}
+
+void si_vram_gtt_location(struct radeon_device *rdev,
+			  struct radeon_mc *mc)
+{
+	if (mc->mc_vram_size > 0xFFC0000000ULL) {
+		/* leave room for at least 1024M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xFFC0000000ULL;
+		mc->mc_vram_size = 0xFFC0000000ULL;
+	}
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	radeon_gtt_location(rdev, mc);
+}
+
+static int si_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	case 4:
+		numchan = 3;
+		break;
+	case 5:
+		numchan = 6;
+		break;
+	case 6:
+		numchan = 10;
+		break;
+	case 7:
+		numchan = 12;
+		break;
+	case 8:
+		numchan = 16;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* size in MB on si */
+	tmp = RREG32(CONFIG_MEMSIZE);
+	/* some boards may have garbage in the upper 16 bits */
+	if (tmp & 0xffff0000) {
+		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+		if (tmp & 0xffff)
+			tmp &= 0xffff;
+	}
+	rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+	rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	si_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+/*
+ * GART
+ */
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* flush hdp cache */
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int si_pcie_gart_enable(struct radeon_device *rdev)
+{
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_FRAGMENT_PROCESSING |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       BANK_SELECT(4) |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+	WREG32(0x15D4, 0);
+	WREG32(0x15D8, 0);
+	WREG32(0x15DC, 0);
+
+	/* empty context1-15 */
+	/* set vm size, must be a multiple of 4 */
+	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 16; i++) {
+		if (i < 8)
+			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+			       rdev->vm_manager.saved_table_addr[i]);
+		else
+			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
+			       rdev->vm_manager.saved_table_addr[i]);
+	}
+
+	/* enable context1-15 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	si_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void si_pcie_gart_disable(struct radeon_device *rdev)
+{
+	unsigned i;
+
+	for (i = 1; i < 16; ++i) {
+		uint32_t reg;
+		if (i < 8)
+			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+		else
+			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+		rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+	}
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void si_pcie_gart_fini(struct radeon_device *rdev)
+{
+	si_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+/* vm parser */
+static bool si_vm_reg_valid(u32 reg)
+{
+	/* context regs are fine */
+	if (reg >= 0x28000)
+		return true;
+
+	/* shader regs are also fine */
+	if (reg >= 0xB000 && reg < 0xC000)
+		return true;
+
+	/* check config regs */
+	switch (reg) {
+	case GRBM_GFX_INDEX:
+	case CP_STRMOUT_CNTL:
+	case VGT_VTX_VECT_EJECT_REG:
+	case VGT_CACHE_INVALIDATION:
+	case VGT_ESGS_RING_SIZE:
+	case VGT_GSVS_RING_SIZE:
+	case VGT_GS_VERTEX_REUSE:
+	case VGT_PRIMITIVE_TYPE:
+	case VGT_INDEX_TYPE:
+	case VGT_NUM_INDICES:
+	case VGT_NUM_INSTANCES:
+	case VGT_TF_RING_SIZE:
+	case VGT_HS_OFFCHIP_PARAM:
+	case VGT_TF_MEMORY_BASE:
+	case PA_CL_ENHANCE:
+	case PA_SU_LINE_STIPPLE_VALUE:
+	case PA_SC_LINE_STIPPLE_STATE:
+	case PA_SC_ENHANCE:
+	case SQC_CACHES:
+	case SPI_STATIC_THREAD_MGMT_1:
+	case SPI_STATIC_THREAD_MGMT_2:
+	case SPI_STATIC_THREAD_MGMT_3:
+	case SPI_PS_MAX_WAVE_ID:
+	case SPI_CONFIG_CNTL:
+	case SPI_CONFIG_CNTL_1:
+	case TA_CNTL_AUX:
+	case TA_CS_BC_BASE_ADDR:
+		return true;
+	default:
+		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+		return false;
+	}
+}
+
+static int si_vm_packet3_ce_check(struct radeon_device *rdev,
+				  u32 *ib, struct radeon_cs_packet *pkt)
+{
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_SET_CE_DE_COUNTERS:
+	case PACKET3_LOAD_CONST_RAM:
+	case PACKET3_WRITE_CONST_RAM:
+	case PACKET3_WRITE_CONST_RAM_OFFSET:
+	case PACKET3_DUMP_CONST_RAM:
+	case PACKET3_INCREMENT_CE_COUNTER:
+	case PACKET3_WAIT_ON_DE_COUNTER:
+	case PACKET3_CE_WRITE:
+		break;
+	default:
+		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
+{
+	u32 start_reg, reg, i;
+	u32 command = ib[idx + 4];
+	u32 info = ib[idx + 1];
+	u32 idx_value = ib[idx];
+	if (command & PACKET3_CP_DMA_CMD_SAS) {
+		/* src address space is register */
+		if (((info & 0x60000000) >> 29) == 0) {
+			start_reg = idx_value << 2;
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				reg = start_reg;
+				if (!si_vm_reg_valid(reg)) {
+					DRM_ERROR("CP DMA Bad SRC register\n");
+					return -EINVAL;
+				}
+			} else {
+				for (i = 0; i < (command & 0x1fffff); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				}
+			}
+		}
+	}
+	if (command & PACKET3_CP_DMA_CMD_DAS) {
+		/* dst address space is register */
+		if (((info & 0x00300000) >> 20) == 0) {
+			start_reg = ib[idx + 2];
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				reg = start_reg;
+				if (!si_vm_reg_valid(reg)) {
+					DRM_ERROR("CP DMA Bad DST register\n");
+					return -EINVAL;
+				}
+			} else {
+				for (i = 0; i < (command & 0x1fffff); i++) {
+					reg = start_reg + (4 * i);
+				if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
+				   u32 *ib, struct radeon_cs_packet *pkt)
+{
+	int r;
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, end_reg, reg, i;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_INDEX_BUFFER_SIZE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_ALLOC_GDS:
+	case PACKET3_WRITE_GDS_RAM:
+	case PACKET3_ATOMIC_GDS:
+	case PACKET3_ATOMIC:
+	case PACKET3_OCCLUSION_QUERY:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_DRAW_INDIRECT:
+	case PACKET3_DRAW_INDEX_INDIRECT:
+	case PACKET3_INDEX_BASE:
+	case PACKET3_DRAW_INDEX_2:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_DRAW_INDIRECT_MULTI:
+	case PACKET3_DRAW_INDEX_AUTO:
+	case PACKET3_DRAW_INDEX_IMMD:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
+	case PACKET3_MPEG_INDEX:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_PFP_SYNC_ME:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_SH_REG:
+	case PACKET3_SET_SH_REG_OFFSET:
+	case PACKET3_INCREMENT_DE_COUNTER:
+	case PACKET3_WAIT_ON_CE_COUNTER:
+	case PACKET3_WAIT_ON_AVAIL_BUFFER:
+	case PACKET3_ME_WRITE:
+		break;
+	case PACKET3_COPY_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_WRITE_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			start_reg = ib[idx + 1] * 4;
+			if (idx_value & 0x10000) {
+				if (!si_vm_reg_valid(start_reg))
+					return -EINVAL;
+			} else {
+				for (i = 0; i < (pkt->count - 2); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg))
+						return -EINVAL;
+				}
+			}
+		}
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		r = si_vm_packet3_cp_dma_check(ib, idx);
+		if (r)
+			return r;
+		break;
+	default:
+		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int si_vm_packet3_compute_check(struct radeon_device *rdev,
+				       u32 *ib, struct radeon_cs_packet *pkt)
+{
+	int r;
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, reg, i;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_ALLOC_GDS:
+	case PACKET3_WRITE_GDS_RAM:
+	case PACKET3_ATOMIC_GDS:
+	case PACKET3_ATOMIC:
+	case PACKET3_OCCLUSION_QUERY:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_PFP_SYNC_ME:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_SH_REG:
+	case PACKET3_SET_SH_REG_OFFSET:
+	case PACKET3_INCREMENT_DE_COUNTER:
+	case PACKET3_WAIT_ON_CE_COUNTER:
+	case PACKET3_WAIT_ON_AVAIL_BUFFER:
+	case PACKET3_ME_WRITE:
+		break;
+	case PACKET3_COPY_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_WRITE_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			start_reg = ib[idx + 1] * 4;
+			if (idx_value & 0x10000) {
+				if (!si_vm_reg_valid(start_reg))
+					return -EINVAL;
+			} else {
+				for (i = 0; i < (pkt->count - 2); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg))
+						return -EINVAL;
+				}
+			}
+		}
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		r = si_vm_packet3_cp_dma_check(ib, idx);
+		if (r)
+			return r;
+		break;
+	default:
+		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	int ret = 0;
+	u32 idx = 0, i;
+	struct radeon_cs_packet pkt;
+
+	do {
+		pkt.idx = idx;
+		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+		pkt.one_reg_wr = 0;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			dev_err(rdev->dev, "Packet0 not allowed!\n");
+			ret = -EINVAL;
+			break;
+		case RADEON_PACKET_TYPE2:
+			idx += 1;
+			break;
+		case RADEON_PACKET_TYPE3:
+			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+			if (ib->is_const_ib)
+				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
+			else {
+				switch (ib->ring) {
+				case RADEON_RING_TYPE_GFX_INDEX:
+					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
+					break;
+				case CAYMAN_RING_TYPE_CP1_INDEX:
+				case CAYMAN_RING_TYPE_CP2_INDEX:
+					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
+					break;
+				default:
+					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
+					ret = -EINVAL;
+					break;
+				}
+			}
+			idx += pkt.count + 2;
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+			ret = -EINVAL;
+			break;
+		}
+		if (ret) {
+			for (i = 0; i < ib->length_dw; i++) {
+				if (i == idx)
+					printk("\t0x%08x <---\n", ib->ptr[i]);
+				else
+					printk("\t0x%08x\n", ib->ptr[i]);
+			}
+			break;
+		}
+	} while (idx < ib->length_dw);
+
+	return ret;
+}
+
+/*
+ * vm
+ */
+int si_vm_init(struct radeon_device *rdev)
+{
+	/* number of VMs */
+	rdev->vm_manager.nvm = 16;
+	/* base offset of vram pages */
+	rdev->vm_manager.vram_base_offset = 0;
+
+	return 0;
+}
+
+void si_vm_fini(struct radeon_device *rdev)
+{
+}
+
+/**
+ * si_vm_decode_fault - print human readable fault info
+ *
+ * @rdev: radeon_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (SI).
+ */
+static void si_vm_decode_fault(struct radeon_device *rdev,
+			       u32 status, u32 addr)
+{
+	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
+	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
+	char *block;
+
+	if (rdev->family == CHIP_TAHITI) {
+		switch (mc_id) {
+		case 160:
+		case 144:
+		case 96:
+		case 80:
+		case 224:
+		case 208:
+		case 32:
+		case 16:
+			block = "CB";
+			break;
+		case 161:
+		case 145:
+		case 97:
+		case 81:
+		case 225:
+		case 209:
+		case 33:
+		case 17:
+			block = "CB_FMASK";
+			break;
+		case 162:
+		case 146:
+		case 98:
+		case 82:
+		case 226:
+		case 210:
+		case 34:
+		case 18:
+			block = "CB_CMASK";
+			break;
+		case 163:
+		case 147:
+		case 99:
+		case 83:
+		case 227:
+		case 211:
+		case 35:
+		case 19:
+			block = "CB_IMMED";
+			break;
+		case 164:
+		case 148:
+		case 100:
+		case 84:
+		case 228:
+		case 212:
+		case 36:
+		case 20:
+			block = "DB";
+			break;
+		case 165:
+		case 149:
+		case 101:
+		case 85:
+		case 229:
+		case 213:
+		case 37:
+		case 21:
+			block = "DB_HTILE";
+			break;
+		case 167:
+		case 151:
+		case 103:
+		case 87:
+		case 231:
+		case 215:
+		case 39:
+		case 23:
+			block = "DB_STEN";
+			break;
+		case 72:
+		case 68:
+		case 64:
+		case 8:
+		case 4:
+		case 0:
+		case 136:
+		case 132:
+		case 128:
+		case 200:
+		case 196:
+		case 192:
+			block = "TC";
+			break;
+		case 112:
+		case 48:
+			block = "CP";
+			break;
+		case 49:
+		case 177:
+		case 50:
+		case 178:
+			block = "SH";
+			break;
+		case 53:
+		case 190:
+			block = "VGT";
+			break;
+		case 117:
+			block = "IH";
+			break;
+		case 51:
+		case 115:
+			block = "RLC";
+			break;
+		case 119:
+		case 183:
+			block = "DMA0";
+			break;
+		case 61:
+			block = "DMA1";
+			break;
+		case 248:
+		case 120:
+			block = "HDP";
+			break;
+		default:
+			block = "unknown";
+			break;
+		}
+	} else {
+		switch (mc_id) {
+		case 32:
+		case 16:
+		case 96:
+		case 80:
+		case 160:
+		case 144:
+		case 224:
+		case 208:
+			block = "CB";
+			break;
+		case 33:
+		case 17:
+		case 97:
+		case 81:
+		case 161:
+		case 145:
+		case 225:
+		case 209:
+			block = "CB_FMASK";
+			break;
+		case 34:
+		case 18:
+		case 98:
+		case 82:
+		case 162:
+		case 146:
+		case 226:
+		case 210:
+			block = "CB_CMASK";
+			break;
+		case 35:
+		case 19:
+		case 99:
+		case 83:
+		case 163:
+		case 147:
+		case 227:
+		case 211:
+			block = "CB_IMMED";
+			break;
+		case 36:
+		case 20:
+		case 100:
+		case 84:
+		case 164:
+		case 148:
+		case 228:
+		case 212:
+			block = "DB";
+			break;
+		case 37:
+		case 21:
+		case 101:
+		case 85:
+		case 165:
+		case 149:
+		case 229:
+		case 213:
+			block = "DB_HTILE";
+			break;
+		case 39:
+		case 23:
+		case 103:
+		case 87:
+		case 167:
+		case 151:
+		case 231:
+		case 215:
+			block = "DB_STEN";
+			break;
+		case 72:
+		case 68:
+		case 8:
+		case 4:
+		case 136:
+		case 132:
+		case 200:
+		case 196:
+			block = "TC";
+			break;
+		case 112:
+		case 48:
+			block = "CP";
+			break;
+		case 49:
+		case 177:
+		case 50:
+		case 178:
+			block = "SH";
+			break;
+		case 53:
+			block = "VGT";
+			break;
+		case 117:
+			block = "IH";
+			break;
+		case 51:
+		case 115:
+			block = "RLC";
+			break;
+		case 119:
+		case 183:
+			block = "DMA0";
+			break;
+		case 61:
+			block = "DMA1";
+			break;
+		case 248:
+		case 120:
+			block = "HDP";
+			break;
+		default:
+			block = "unknown";
+			break;
+		}
+	}
+
+	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+	       protections, vmid, addr,
+	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
+	       block, mc_id);
+}
+
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		 unsigned vm_id, uint64_t pd_addr)
+{
+	/* write new base address */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+
+	if (vm_id < 8) {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
+	} else {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
+	}
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, pd_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0x1);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 1 << vm_id);
+
+	/* wait for the invalidate to complete */
+	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+				 WAIT_REG_MEM_ENGINE(0))); /* me */
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0); /* ref */
+	radeon_ring_write(ring, 0); /* mask */
+	radeon_ring_write(ring, 0x20); /* poll interval */
+
+	/* sync PFP to ME, otherwise we might get invalid PFP reads */
+	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+	radeon_ring_write(ring, 0x0);
+}
+
+/*
+ *  Power and clock gating
+ */
+static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
+					 bool enable)
+{
+	u32 tmp = RREG32(CP_INT_CNTL_RING0);
+	u32 mask;
+	int i;
+
+	if (enable)
+		tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	else
+		tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
+
+	if (!enable) {
+		/* read a gfx register */
+		tmp = RREG32(DB_DEPTH_INFO);
+
+		mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+				break;
+			udelay(1);
+		}
+	}
+}
+
+static void si_set_uvd_dcm(struct radeon_device *rdev,
+			   bool sw_mode)
+{
+	u32 tmp, tmp2;
+
+	tmp = RREG32(UVD_CGC_CTRL);
+	tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
+	tmp |= DCM | CG_DT(1) | CLK_OD(4);
+
+	if (sw_mode) {
+		tmp &= ~0x7ffff800;
+		tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
+	} else {
+		tmp |= 0x7ffff800;
+		tmp2 = 0;
+	}
+
+	WREG32(UVD_CGC_CTRL, tmp);
+	WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
+}
+
+void si_init_uvd_internal_cg(struct radeon_device *rdev)
+{
+	bool hw_mode = true;
+
+	if (hw_mode) {
+		si_set_uvd_dcm(rdev, false);
+	} else {
+		u32 tmp = RREG32(UVD_CGC_CTRL);
+		tmp &= ~DCM;
+		WREG32(UVD_CGC_CTRL, tmp);
+	}
+}
+
+static u32 si_halt_rlc(struct radeon_device *rdev)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_CNTL);
+
+	if (data & RLC_ENABLE) {
+		data &= ~RLC_ENABLE;
+		WREG32(RLC_CNTL, data);
+
+		si_wait_for_rlc_serdes(rdev);
+	}
+
+	return orig;
+}
+
+static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_CNTL);
+	if (tmp != rlc)
+		WREG32(RLC_CNTL, rlc);
+}
+
+static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(DMA_PG);
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
+		data |= PG_CNTL_ENABLE;
+	else
+		data &= ~PG_CNTL_ENABLE;
+	if (orig != data)
+		WREG32(DMA_PG, data);
+}
+
+static void si_init_dma_pg(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(DMA_PGFSM_WRITE,  0x00002000);
+	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+
+	for (tmp = 0; tmp < 5; tmp++)
+		WREG32(DMA_PGFSM_WRITE, 0);
+}
+
+static void si_enable_gfx_cgpg(struct radeon_device *rdev,
+			       bool enable)
+{
+	u32 tmp;
+
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
+		tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
+		WREG32(RLC_TTOP_D, tmp);
+
+		tmp = RREG32(RLC_PG_CNTL);
+		tmp |= GFX_PG_ENABLE;
+		WREG32(RLC_PG_CNTL, tmp);
+
+		tmp = RREG32(RLC_AUTO_PG_CTRL);
+		tmp |= AUTO_PG_EN;
+		WREG32(RLC_AUTO_PG_CTRL, tmp);
+	} else {
+		tmp = RREG32(RLC_AUTO_PG_CTRL);
+		tmp &= ~AUTO_PG_EN;
+		WREG32(RLC_AUTO_PG_CTRL, tmp);
+
+		tmp = RREG32(DB_RENDER_CONTROL);
+	}
+}
+
+static void si_init_gfx_cgpg(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+
+	tmp = RREG32(RLC_PG_CNTL);
+	tmp |= GFX_PG_SRC;
+	WREG32(RLC_PG_CNTL, tmp);
+
+	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+
+	tmp = RREG32(RLC_AUTO_PG_CTRL);
+
+	tmp &= ~GRBM_REG_SGIT_MASK;
+	tmp |= GRBM_REG_SGIT(0x700);
+	tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
+	WREG32(RLC_AUTO_PG_CTRL, tmp);
+}
+
+static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
+{
+	u32 mask = 0, tmp, tmp1;
+	int i;
+
+	si_select_se_sh(rdev, se, sh);
+	tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	tmp &= 0xffff0000;
+
+	tmp |= tmp1;
+	tmp >>= 16;
+
+	for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+
+	return (~tmp) & mask;
+}
+
+static void si_init_ao_cu_mask(struct radeon_device *rdev)
+{
+	u32 i, j, k, active_cu_number = 0;
+	u32 mask, counter, cu_bitmap;
+	u32 tmp = 0;
+
+	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
+		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
+			mask = 1;
+			cu_bitmap = 0;
+			counter  = 0;
+			for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
+				if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
+					if (counter < 2)
+						cu_bitmap |= mask;
+					counter++;
+				}
+				mask <<= 1;
+			}
+
+			active_cu_number += counter;
+			tmp |= (cu_bitmap << (i * 16 + j * 8));
+		}
+	}
+
+	WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+	tmp = RREG32(RLC_MAX_PG_CU);
+	tmp &= ~MAX_PU_CU_MASK;
+	tmp |= MAX_PU_CU(active_cu_number);
+	WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void si_enable_cgcg(struct radeon_device *rdev,
+			   bool enable)
+{
+	u32 data, orig, tmp;
+
+	orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+		si_enable_gui_idle_interrupt(rdev, true);
+
+		WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+
+		tmp = si_halt_rlc(rdev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+
+		si_wait_for_rlc_serdes(rdev);
+
+		si_update_rlc(rdev, tmp);
+
+		WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+
+		data |= CGCG_EN | CGLS_EN;
+	} else {
+		si_enable_gui_idle_interrupt(rdev, false);
+
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+
+		data &= ~(CGCG_EN | CGLS_EN);
+	}
+
+	if (orig != data)
+		WREG32(RLC_CGCG_CGLS_CTRL, data);
+}
+
+static void si_enable_mgcg(struct radeon_device *rdev,
+			   bool enable)
+{
+	u32 data, orig, tmp = 0;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
+		orig = data = RREG32(CGTS_SM_CTRL_REG);
+		data = 0x96940200;
+		if (orig != data)
+			WREG32(CGTS_SM_CTRL_REG, data);
+
+		if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+			orig = data = RREG32(CP_MEM_SLP_CNTL);
+			data |= CP_MEM_LS_EN;
+			if (orig != data)
+				WREG32(CP_MEM_SLP_CNTL, data);
+		}
+
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data &= 0xffffffc0;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		tmp = si_halt_rlc(rdev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+
+		si_update_rlc(rdev, tmp);
+	} else {
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data |= 0x00000003;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		data = RREG32(CP_MEM_SLP_CNTL);
+		if (data & CP_MEM_LS_EN) {
+			data &= ~CP_MEM_LS_EN;
+			WREG32(CP_MEM_SLP_CNTL, data);
+		}
+		orig = data = RREG32(CGTS_SM_CTRL_REG);
+		data |= LS_OVERRIDE | OVERRIDE;
+		if (orig != data)
+			WREG32(CGTS_SM_CTRL_REG, data);
+
+		tmp = si_halt_rlc(rdev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+
+		si_update_rlc(rdev, tmp);
+	}
+}
+
+static void si_enable_uvd_mgcg(struct radeon_device *rdev,
+			       bool enable)
+{
+	u32 orig, data, tmp;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
+		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+		tmp |= 0x3fff;
+		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
+
+		orig = data = RREG32(UVD_CGC_CTRL);
+		data |= DCM;
+		if (orig != data)
+			WREG32(UVD_CGC_CTRL, data);
+
+		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
+		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
+	} else {
+		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+		tmp &= ~0x3fff;
+		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
+
+		orig = data = RREG32(UVD_CGC_CTRL);
+		data &= ~DCM;
+		if (orig != data)
+			WREG32(UVD_CGC_CTRL, data);
+
+		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
+		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
+	}
+}
+
+static const u32 mc_cg_registers[] =
+{
+	MC_HUB_MISC_HUB_CG,
+	MC_HUB_MISC_SIP_CG,
+	MC_HUB_MISC_VM_CG,
+	MC_XPB_CLK_GAT,
+	ATC_MISC_CG,
+	MC_CITF_MISC_WR_CG,
+	MC_CITF_MISC_RD_CG,
+	MC_CITF_MISC_VM_CG,
+	VM_L2_CG,
+};
+
+static void si_enable_mc_ls(struct radeon_device *rdev,
+			    bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
+			data |= MC_LS_ENABLE;
+		else
+			data &= ~MC_LS_ENABLE;
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void si_enable_mc_mgcg(struct radeon_device *rdev,
+			       bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+			data |= MC_CG_ENABLE;
+		else
+			data &= ~MC_CG_ENABLE;
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void si_enable_dma_mgcg(struct radeon_device *rdev,
+			       bool enable)
+{
+	u32 orig, data, offset;
+	int i;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+		for (i = 0; i < 2; i++) {
+			if (i == 0)
+				offset = DMA0_REGISTER_OFFSET;
+			else
+				offset = DMA1_REGISTER_OFFSET;
+			orig = data = RREG32(DMA_POWER_CNTL + offset);
+			data &= ~MEM_POWER_OVERRIDE;
+			if (data != orig)
+				WREG32(DMA_POWER_CNTL + offset, data);
+			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+		}
+	} else {
+		for (i = 0; i < 2; i++) {
+			if (i == 0)
+				offset = DMA0_REGISTER_OFFSET;
+			else
+				offset = DMA1_REGISTER_OFFSET;
+			orig = data = RREG32(DMA_POWER_CNTL + offset);
+			data |= MEM_POWER_OVERRIDE;
+			if (data != orig)
+				WREG32(DMA_POWER_CNTL + offset, data);
+
+			orig = data = RREG32(DMA_CLK_CTRL + offset);
+			data = 0xff000000;
+			if (data != orig)
+				WREG32(DMA_CLK_CTRL + offset, data);
+		}
+	}
+}
+
+static void si_enable_bif_mgls(struct radeon_device *rdev,
+			       bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32_PCIE(PCIE_CNTL2);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+		data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+			REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+	else
+		data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+			  REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+	if (orig != data)
+		WREG32_PCIE(PCIE_CNTL2, data);
+}
+
+static void si_enable_hdp_mgcg(struct radeon_device *rdev,
+			       bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+		data &= ~CLOCK_GATING_DIS;
+	else
+		data |= CLOCK_GATING_DIS;
+
+	if (orig != data)
+		WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void si_enable_hdp_ls(struct radeon_device *rdev,
+			     bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_MEM_POWER_LS);
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+		data |= HDP_LS_ENABLE;
+	else
+		data &= ~HDP_LS_ENABLE;
+
+	if (orig != data)
+		WREG32(HDP_MEM_POWER_LS, data);
+}
+
+static void si_update_cg(struct radeon_device *rdev,
+			 u32 block, bool enable)
+{
+	if (block & RADEON_CG_BLOCK_GFX) {
+		si_enable_gui_idle_interrupt(rdev, false);
+		/* order matters! */
+		if (enable) {
+			si_enable_mgcg(rdev, true);
+			si_enable_cgcg(rdev, true);
+		} else {
+			si_enable_cgcg(rdev, false);
+			si_enable_mgcg(rdev, false);
+		}
+		si_enable_gui_idle_interrupt(rdev, true);
+	}
+
+	if (block & RADEON_CG_BLOCK_MC) {
+		si_enable_mc_mgcg(rdev, enable);
+		si_enable_mc_ls(rdev, enable);
+	}
+
+	if (block & RADEON_CG_BLOCK_SDMA) {
+		si_enable_dma_mgcg(rdev, enable);
+	}
+
+	if (block & RADEON_CG_BLOCK_BIF) {
+		si_enable_bif_mgls(rdev, enable);
+	}
+
+	if (block & RADEON_CG_BLOCK_UVD) {
+		if (rdev->has_uvd) {
+			si_enable_uvd_mgcg(rdev, enable);
+		}
+	}
+
+	if (block & RADEON_CG_BLOCK_HDP) {
+		si_enable_hdp_mgcg(rdev, enable);
+		si_enable_hdp_ls(rdev, enable);
+	}
+}
+
+static void si_init_cg(struct radeon_device *rdev)
+{
+	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+			    RADEON_CG_BLOCK_MC |
+			    RADEON_CG_BLOCK_SDMA |
+			    RADEON_CG_BLOCK_BIF |
+			    RADEON_CG_BLOCK_HDP), true);
+	if (rdev->has_uvd) {
+		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+		si_init_uvd_internal_cg(rdev);
+	}
+}
+
+static void si_fini_cg(struct radeon_device *rdev)
+{
+	if (rdev->has_uvd) {
+		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
+	}
+	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+			    RADEON_CG_BLOCK_MC |
+			    RADEON_CG_BLOCK_SDMA |
+			    RADEON_CG_BLOCK_BIF |
+			    RADEON_CG_BLOCK_HDP), false);
+}
+
+u32 si_get_csb_size(struct radeon_device *rdev)
+{
+	u32 count = 0;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (rdev->rlc.cs_data == NULL)
+		return 0;
+
+	/* begin clear state */
+	count += 2;
+	/* context control state */
+	count += 3;
+
+	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT)
+				count += 2 + ext->reg_count;
+			else
+				return 0;
+		}
+	}
+	/* pa_sc_raster_config */
+	count += 3;
+	/* end clear state */
+	count += 2;
+	/* clear state */
+	count += 2;
+
+	return count;
+}
+
+void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+	u32 count = 0, i;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (rdev->rlc.cs_data == NULL)
+		return;
+	if (buffer == NULL)
+		return;
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
+
+	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT) {
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+				for (i = 0; i < ext->reg_count; i++)
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
+			} else {
+				return;
+			}
+		}
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+		buffer[count++] = cpu_to_le32(0x2a00126a);
+		break;
+	case CHIP_VERDE:
+		buffer[count++] = cpu_to_le32(0x0000124a);
+		break;
+	case CHIP_OLAND:
+		buffer[count++] = cpu_to_le32(0x00000082);
+		break;
+	case CHIP_HAINAN:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	default:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
+}
+
+static void si_init_pg(struct radeon_device *rdev)
+{
+	if (rdev->pg_flags) {
+		if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
+			si_init_dma_pg(rdev);
+		}
+		si_init_ao_cu_mask(rdev);
+		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
+			si_init_gfx_cgpg(rdev);
+		} else {
+			WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+			WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+		}
+		si_enable_dma_pg(rdev, true);
+		si_enable_gfx_cgpg(rdev, true);
+	} else {
+		WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+		WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+	}
+}
+
+static void si_fini_pg(struct radeon_device *rdev)
+{
+	if (rdev->pg_flags) {
+		si_enable_dma_pg(rdev, false);
+		si_enable_gfx_cgpg(rdev, false);
+	}
+}
+
+/*
+ * RLC
+ */
+void si_rlc_reset(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(GRBM_SOFT_RESET);
+
+	tmp |= SOFT_RESET_RLC;
+	WREG32(GRBM_SOFT_RESET, tmp);
+	udelay(50);
+	tmp &= ~SOFT_RESET_RLC;
+	WREG32(GRBM_SOFT_RESET, tmp);
+	udelay(50);
+}
+
+static void si_rlc_stop(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, 0);
+
+	si_enable_gui_idle_interrupt(rdev, false);
+
+	si_wait_for_rlc_serdes(rdev);
+}
+
+static void si_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+
+	si_enable_gui_idle_interrupt(rdev, true);
+
+	udelay(50);
+}
+
+static bool si_lbpw_supported(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* Enable LBPW only for DDR3 */
+	tmp = RREG32(MC_SEQ_MISC0);
+	if ((tmp & 0xF0000000) == 0xB0000000)
+		return true;
+	return false;
+}
+
+static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_LB_CNTL);
+	if (enable)
+		tmp |= LOAD_BALANCE_ENABLE;
+	else
+		tmp &= ~LOAD_BALANCE_ENABLE;
+	WREG32(RLC_LB_CNTL, tmp);
+
+	if (!enable) {
+		si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+		WREG32(SPI_LB_CU_MASK, 0x00ff);
+	}
+}
+
+static int si_rlc_resume(struct radeon_device *rdev)
+{
+	u32 i;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	si_rlc_stop(rdev);
+
+	si_rlc_reset(rdev);
+
+	si_init_pg(rdev);
+
+	si_init_cg(rdev);
+
+	WREG32(RLC_RL_BASE, 0);
+	WREG32(RLC_RL_SIZE, 0);
+	WREG32(RLC_LB_CNTL, 0);
+	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+	WREG32(RLC_LB_CNTR_INIT, 0);
+	WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	if (rdev->new_fw) {
+		const struct rlc_firmware_header_v1_0 *hdr =
+			(const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+		u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+		const __le32 *fw_data = (const __le32 *)
+			(rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+		radeon_ucode_print_rlc_hdr(&hdr->header);
+
+		for (i = 0; i < fw_size; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+		}
+	} else {
+		const __be32 *fw_data =
+			(const __be32 *)rdev->rlc_fw->data;
+		for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	si_enable_lbpw(rdev, si_lbpw_supported(rdev));
+
+	si_rlc_start(rdev);
+
+	return 0;
+}
+
+static void si_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+static void si_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.rptr = 0;
+}
+
+static void si_disable_interrupt_state(struct radeon_device *rdev)
+{
+	int i;
+	u32 tmp;
+
+	tmp = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
+	WREG32(CP_INT_CNTL_RING1, 0);
+	WREG32(CP_INT_CNTL_RING2, 0);
+	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(SRBM_INT_CNTL, 0);
+	for (i = 0; i < rdev->num_crtc; i++)
+		WREG32(INT_MASK + crtc_offsets[i], 0);
+	for (i = 0; i < rdev->num_crtc; i++)
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], 0);
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
+
+		for (i = 0; i < 6; i++)
+			WREG32_AND(DC_HPDx_INT_CONTROL(i),
+				   DC_HPDx_INT_POLARITY);
+	}
+}
+
+static int si_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	si_disable_interrupts(rdev);
+
+	/* init rlc */
+	ret = si_rlc_resume(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* set dummy read address to ring address */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+
+	if (rdev->wb.enabled)
+		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+	/* set the writeback address whether it's enabled or not */
+	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	si_disable_interrupt_state(rdev);
+
+	pci_set_master(rdev->pdev);
+
+	/* enable irqs */
+	si_enable_interrupts(rdev);
+
+	return ret;
+}
+
+/* The order we write back each register here is important */
+int si_irq_set(struct radeon_device *rdev)
+{
+	int i;
+	u32 cp_int_cntl;
+	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+	u32 grbm_int_cntl = 0;
+	u32 dma_cntl, dma_cntl1;
+	u32 thermal_int = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		si_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		si_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+
+	dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
+	thermal_int = RREG32(CG_THERMAL_INT) &
+		~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+
+	/* enable CP interrupts on all rings */
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int gfx\n");
+		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int cp1\n");
+		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int cp2\n");
+		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma1\n");
+		dma_cntl1 |= TRAP_ENABLE;
+	}
+
+	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
+	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	if (rdev->irq.dpm_thermal) {
+		DRM_DEBUG("dpm thermal\n");
+		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		radeon_irq_kms_set_irq_n_enabled(
+		    rdev, INT_MASK + crtc_offsets[i], VBLANK_INT_MASK,
+		    rdev->irq.crtc_vblank_int[i] ||
+		    atomic_read(&rdev->irq.pflip[i]), "vblank", i);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++)
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], GRPH_PFLIP_INT_MASK);
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		for (i = 0; i < 6; i++) {
+			radeon_irq_kms_set_irq_n_enabled(
+			    rdev, DC_HPDx_INT_CONTROL(i),
+			    DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN,
+			    rdev->irq.hpd[i], "HPD", i);
+		}
+	}
+
+	WREG32(CG_THERMAL_INT, thermal_int);
+
+	/* posting read */
+	RREG32(SRBM_STATUS);
+
+	return 0;
+}
+
+/* The order we write back each register here is important */
+static inline void si_irq_ack(struct radeon_device *rdev)
+{
+	int i, j;
+	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
+	u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
+
+	if (ASIC_IS_NODCE(rdev))
+		return;
+
+	for (i = 0; i < 6; i++) {
+		disp_int[i] = RREG32(si_disp_int_status[i]);
+		if (i < rdev->num_crtc)
+			grph_int[i] = RREG32(GRPH_INT_STATUS + crtc_offsets[i]);
+	}
+
+	/* We write back each interrupt register in pairs of two */
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		for (j = i; j < (i + 2); j++) {
+			if (grph_int[j] & GRPH_PFLIP_INT_OCCURRED)
+				WREG32(GRPH_INT_STATUS + crtc_offsets[j],
+				       GRPH_PFLIP_INT_CLEAR);
+		}
+
+		for (j = i; j < (i + 2); j++) {
+			if (disp_int[j] & LB_D1_VBLANK_INTERRUPT)
+				WREG32(VBLANK_STATUS + crtc_offsets[j],
+				       VBLANK_ACK);
+			if (disp_int[j] & LB_D1_VLINE_INTERRUPT)
+				WREG32(VLINE_STATUS + crtc_offsets[j],
+				       VLINE_ACK);
+		}
+	}
+
+	for (i = 0; i < 6; i++) {
+		if (disp_int[i] & DC_HPD1_INTERRUPT)
+			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_ACK);
+	}
+
+	for (i = 0; i < 6; i++) {
+		if (disp_int[i] & DC_HPD1_RX_INTERRUPT)
+			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_RX_INT_ACK);
+	}
+}
+
+static void si_irq_disable(struct radeon_device *rdev)
+{
+	si_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	si_irq_ack(rdev);
+	si_disable_interrupt_state(rdev);
+}
+
+static void si_irq_suspend(struct radeon_device *rdev)
+{
+	si_irq_disable(rdev);
+	si_rlc_stop(rdev);
+}
+
+static void si_irq_fini(struct radeon_device *rdev)
+{
+	si_irq_suspend(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		wptr &= ~RB_OVERFLOW;
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+/*        SI IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [63:60]  - reserved
+ * [71:64]  - RINGID
+ * [79:72]  - VMID
+ * [127:80] - reserved
+ */
+int si_irq_process(struct radeon_device *rdev)
+{
+	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
+	u32 crtc_idx, hpd_idx;
+	u32 mask;
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data, ring_id;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_dp = false;
+	bool queue_thermal = false;
+	u32 status, addr;
+	const char *event_name;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	wptr = si_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	si_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+		case 2: /* D2 vblank/vline */
+		case 3: /* D3 vblank/vline */
+		case 4: /* D4 vblank/vline */
+		case 5: /* D5 vblank/vline */
+		case 6: /* D6 vblank/vline */
+			crtc_idx = src_id - 1;
+
+			if (src_data == 0) { /* vblank */
+				mask = LB_D1_VBLANK_INTERRUPT;
+				event_name = "vblank";
+
+				if (rdev->irq.crtc_vblank_int[crtc_idx]) {
+					drm_handle_vblank(rdev->ddev, crtc_idx);
+					rdev->pm.vblank_sync = true;
+					wake_up(&rdev->irq.vblank_queue);
+				}
+				if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
+					radeon_crtc_handle_vblank(rdev,
+								  crtc_idx);
+				}
+
+			} else if (src_data == 1) { /* vline */
+				mask = LB_D1_VLINE_INTERRUPT;
+				event_name = "vline";
+			} else {
+				DRM_DEBUG("Unhandled interrupt: %d %d\n",
+					  src_id, src_data);
+				break;
+			}
+
+			if (!(disp_int[crtc_idx] & mask)) {
+				DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
+					  crtc_idx + 1, event_name);
+			}
+
+			disp_int[crtc_idx] &= ~mask;
+			DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
+
+			break;
+		case 8: /* D1 page flip */
+		case 10: /* D2 page flip */
+		case 12: /* D3 page flip */
+		case 14: /* D4 page flip */
+		case 16: /* D5 page flip */
+		case 18: /* D6 page flip */
+			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+			if (radeon_use_pflipirq > 0)
+				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+			break;
+		case 42: /* HPD hotplug */
+			if (src_data <= 5) {
+				hpd_idx = src_data;
+				mask = DC_HPD1_INTERRUPT;
+				queue_hotplug = true;
+				event_name = "HPD";
+
+			} else if (src_data <= 11) {
+				hpd_idx = src_data - 6;
+				mask = DC_HPD1_RX_INTERRUPT;
+				queue_dp = true;
+				event_name = "HPD_RX";
+
+			} else {
+				DRM_DEBUG("Unhandled interrupt: %d %d\n",
+					  src_id, src_data);
+				break;
+			}
+
+			if (!(disp_int[hpd_idx] & mask))
+				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+			disp_int[hpd_idx] &= ~mask;
+			DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
+			break;
+		case 96:
+			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+			WREG32(SRBM_INT_ACK, 0x1);
+			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
+		case 146:
+		case 147:
+			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			if (addr == 0x0 && status == 0x0)
+				break;
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				addr);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				status);
+			si_vm_decode_fault(rdev, status, addr);
+			break;
+		case 176: /* RINGID0 CP_INT */
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 177: /* RINGID1 CP_INT */
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+			break;
+		case 178: /* RINGID2 CP_INT */
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			switch (ring_id) {
+			case 0:
+				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+				break;
+			case 1:
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+				break;
+			case 2:
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+				break;
+			}
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 230: /* thermal low to high */
+			DRM_DEBUG("IH: thermal low to high\n");
+			rdev->pm.dpm.thermal.high_to_low = false;
+			queue_thermal = true;
+			break;
+		case 231: /* thermal high to low */
+			DRM_DEBUG("IH: thermal high to low\n");
+			rdev->pm.dpm.thermal.high_to_low = true;
+			queue_thermal = true;
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		case 244: /* DMA trap event */
+			DRM_DEBUG("IH: DMA1 trap\n");
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+		WREG32(IH_RB_RPTR, rptr);
+	}
+	if (queue_dp)
+		schedule_work(&rdev->dp_work);
+	if (queue_hotplug)
+		schedule_delayed_work(&rdev->hotplug_work, 0);
+	if (queue_thermal && rdev->pm.dpm_enabled)
+		schedule_work(&rdev->pm.dpm.thermal.work);
+	rdev->ih.rptr = rptr;
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = si_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * startup/shutdown callbacks
+ */
+static void si_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void si_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void si_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static void si_vce_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+		/*
+		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
+		 * to early fails si_vce_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable vce here.
+		 */
+		rdev->has_vce = 0;
+		return;
+	}
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void si_vce_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = vce_v1_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void si_vce_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	r = vce_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+		return;
+	}
+}
+
+static int si_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2/3 link */
+	si_pcie_gen3_enable(rdev);
+	/* enable aspm */
+	si_program_aspm(rdev);
+
+	/* scratch needs to be initialized before MC */
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	si_mc_program(rdev);
+
+	if (!rdev->pm.dpm_enabled) {
+		r = si_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	r = si_pcie_gart_enable(rdev);
+	if (r)
+		return r;
+	si_gpu_init(rdev);
+
+	/* allocate rlc buffers */
+	if (rdev->family == CHIP_VERDE) {
+		rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
+		rdev->rlc.reg_list_size =
+			(u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+	}
+	rdev->rlc.cs_data = si_cs_data;
+	r = sumo_rlc_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init rlc BOs!\n");
+		return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	si_uvd_start(rdev);
+	si_vce_start(rdev);
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = si_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	si_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
+			     RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = si_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = si_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
+	si_uvd_resume(rdev);
+	si_vce_resume(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_vm_manager_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_audio_init(rdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+int si_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	si_init_golden_registers(rdev);
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM)
+		radeon_pm_resume(rdev);
+
+	rdev->accel_working = true;
+	r = si_startup(rdev);
+	if (r) {
+		DRM_ERROR("si startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int si_suspend(struct radeon_device *rdev)
+{
+	radeon_pm_suspend(rdev);
+	radeon_audio_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	si_cp_enable(rdev, false);
+	cayman_dma_stop(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	if (rdev->has_vce)
+		radeon_vce_suspend(rdev);
+	si_fini_pg(rdev);
+	si_fini_cg(rdev);
+	si_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	si_pcie_gart_disable(rdev);
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int si_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	si_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	si_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+
+	/* initialize memory controller */
+	r = si_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+	    !rdev->rlc_fw || !rdev->mc_fw) {
+		r = si_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	si_uvd_init(rdev);
+	si_vce_init(rdev);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = si_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		si_cp_fini(rdev);
+		cayman_dma_fini(rdev);
+		si_irq_fini(rdev);
+		sumo_rlc_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_vm_manager_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		si_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (!rdev->mc_fw) {
+		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void si_fini(struct radeon_device *rdev)
+{
+	radeon_pm_fini(rdev);
+	si_cp_fini(rdev);
+	cayman_dma_fini(rdev);
+	si_fini_pg(rdev);
+	si_fini_cg(rdev);
+	si_irq_fini(rdev);
+	sumo_rlc_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_fini(rdev);
+	}
+	if (rdev->has_vce)
+		radeon_vce_fini(rdev);
+	si_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+/**
+ * si_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
+
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+	int r;
+
+	/* bypass vclk and dclk with bclk */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	/* put PLL in bypass mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+	if (!vclk || !dclk) {
+		/* keep the Bypass mode */
+		return 0;
+	}
+
+	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+					  16384, 0x03FFFFFF, 0, 128, 5,
+					  &fb_div, &vclk_div, &dclk_div);
+	if (r)
+		return r;
+
+	/* set RESET_ANTI_MUX to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
+
+	/* set VCO_MODE to 1 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+	/* disable sleep mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+	/* deassert UPLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(1);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* assert UPLL_RESET again */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+	/* disable spread spectrum. */
+	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+	/* set feedback divider */
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+	/* set ref divider to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+	if (fb_div < 307200)
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+	else
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+	/* set PDIV_A and PDIV_B */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* switch from bypass mode to normal mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
+
+static void si_pcie_gen3_enable(struct radeon_device *rdev)
+{
+	struct pci_dev *root = rdev->pdev->bus->self;
+	enum pci_bus_speed speed_cap;
+	int bridge_pos, gpu_pos;
+	u32 speed_cntl, current_data_rate;
+	int i;
+	u16 tmp16;
+
+	if (pci_is_root_bus(rdev->pdev->bus))
+		return;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	speed_cap = pcie_get_speed_cap(root);
+	if (speed_cap == PCI_SPEED_UNKNOWN)
+		return;
+
+	if ((speed_cap != PCIE_SPEED_8_0GT) &&
+	    (speed_cap != PCIE_SPEED_5_0GT))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+		LC_CURRENT_DATA_RATE_SHIFT;
+	if (speed_cap == PCIE_SPEED_8_0GT) {
+		if (current_data_rate == 2) {
+			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
+	} else if (speed_cap == PCIE_SPEED_5_0GT) {
+		if (current_data_rate == 1) {
+			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+	}
+
+	bridge_pos = pci_pcie_cap(root);
+	if (!bridge_pos)
+		return;
+
+	gpu_pos = pci_pcie_cap(rdev->pdev);
+	if (!gpu_pos)
+		return;
+
+	if (speed_cap == PCIE_SPEED_8_0GT) {
+		/* re-try equalization if gen3 is not already enabled */
+		if (current_data_rate != 2) {
+			u16 bridge_cfg, gpu_cfg;
+			u16 bridge_cfg2, gpu_cfg2;
+			u32 max_lw, current_lw, tmp;
+
+			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+			pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+			if (current_lw < max_lw) {
+				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+				if (tmp & LC_RENEGOTIATION_SUPPORT) {
+					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+				}
+			}
+
+			for (i = 0; i < 10; i++) {
+				/* check status */
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+					break;
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_REDO_EQ;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				mdelay(100);
+
+				/* linkctl */
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+				/* linkctl2 */
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp &= ~LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+			}
+		}
+	}
+
+	/* set the link speed */
+	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+	tmp16 &= ~0xf;
+	if (speed_cap == PCIE_SPEED_8_0GT)
+		tmp16 |= 3; /* gen3 */
+	else if (speed_cap == PCIE_SPEED_5_0GT)
+		tmp16 |= 2; /* gen2 */
+	else
+		tmp16 |= 1; /* gen1 */
+	pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void si_program_aspm(struct radeon_device *rdev)
+{
+	u32 data, orig;
+	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+	bool disable_clkreq = false;
+
+	if (radeon_aspm == 0)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+	data &= ~LC_XMIT_N_FTS_MASK;
+	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+	data |= LC_GO_TO_RECOVERY;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+	orig = data = RREG32_PCIE(PCIE_P_CNTL);
+	data |= P_IGNORE_EDB_ERR;
+	if (orig != data)
+		WREG32_PCIE(PCIE_P_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+	data |= LC_PMI_TO_L1_DIS;
+	if (!disable_l0s)
+		data |= LC_L0S_INACTIVITY(7);
+
+	if (!disable_l1) {
+		data |= LC_L1_INACTIVITY(7);
+		data &= ~LC_PMI_TO_L1_DIS;
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+		if (!disable_plloff_in_l1) {
+			bool clk_req_support;
+
+			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
+
+			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
+
+			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
+
+			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
+
+			if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
+				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
+
+				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
+
+				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
+				data &= ~PLL_RAMP_UP_TIME_2_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
+
+				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
+				data &= ~PLL_RAMP_UP_TIME_3_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
+
+				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
+
+				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
+
+				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
+				data &= ~PLL_RAMP_UP_TIME_2_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
+
+				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
+				data &= ~PLL_RAMP_UP_TIME_3_MASK;
+				if (orig != data)
+					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
+			}
+			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+			data |= LC_DYN_LANES_PWR_STATE(3);
+			if (orig != data)
+				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+			orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
+			data &= ~LS2_EXIT_TIME_MASK;
+			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
+				data |= LS2_EXIT_TIME(5);
+			if (orig != data)
+				WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
+
+			orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
+			data &= ~LS2_EXIT_TIME_MASK;
+			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
+				data |= LS2_EXIT_TIME(5);
+			if (orig != data)
+				WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
+
+			if (!disable_clkreq &&
+			    !pci_is_root_bus(rdev->pdev->bus)) {
+				struct pci_dev *root = rdev->pdev->bus->self;
+				u32 lnkcap;
+
+				clk_req_support = false;
+				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+					clk_req_support = true;
+			} else {
+				clk_req_support = false;
+			}
+
+			if (clk_req_support) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+				orig = data = RREG32(THM_CLK_CNTL);
+				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+				if (orig != data)
+					WREG32(THM_CLK_CNTL, data);
+
+				orig = data = RREG32(MISC_CLK_CNTL);
+				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+				if (orig != data)
+					WREG32(MISC_CLK_CNTL, data);
+
+				orig = data = RREG32(CG_CLKPIN_CNTL);
+				data &= ~BCLK_AS_XCLK;
+				if (orig != data)
+					WREG32(CG_CLKPIN_CNTL, data);
+
+				orig = data = RREG32(CG_CLKPIN_CNTL_2);
+				data &= ~FORCE_BIF_REFCLK_EN;
+				if (orig != data)
+					WREG32(CG_CLKPIN_CNTL_2, data);
+
+				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
+				data &= ~MPLL_CLKOUT_SEL_MASK;
+				data |= MPLL_CLKOUT_SEL(4);
+				if (orig != data)
+					WREG32(MPLL_BYPASSCLK_SEL, data);
+
+				orig = data = RREG32(SPLL_CNTL_MODE);
+				data &= ~SPLL_REFCLK_SEL_MASK;
+				if (orig != data)
+					WREG32(SPLL_CNTL_MODE, data);
+			}
+		}
+	} else {
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+	}
+
+	orig = data = RREG32_PCIE(PCIE_CNTL2);
+	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+	if (orig != data)
+		WREG32_PCIE(PCIE_CNTL2, data);
+
+	if (!disable_l0s) {
+		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+			data = RREG32_PCIE(PCIE_LC_STATUS1);
+			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+				data &= ~LC_L0S_INACTIVITY_MASK;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+			}
+		}
+	}
+}
+
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+{
+	unsigned i;
+
+	/* make sure VCEPLL_CTLREQ is deasserted */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+
+	mdelay(10);
+
+	/* assert UPLL_CTLREQ */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+
+	/* wait for CTLACK and CTLACK2 to get asserted */
+	for (i = 0; i < 100; ++i) {
+		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+		if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
+			break;
+		mdelay(10);
+	}
+
+	/* deassert UPLL_CTLREQ */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+
+	if (i == 100) {
+		DRM_ERROR("Timeout setting UVD clocks!\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
+{
+	unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
+	int r;
+
+	/* bypass evclk and ecclk with bclk */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
+		     EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
+		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
+
+	/* put PLL in bypass mode */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
+		     ~VCEPLL_BYPASS_EN_MASK);
+
+	if (!evclk || !ecclk) {
+		/* keep the Bypass mode, put PLL to sleep */
+		WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
+			     ~VCEPLL_SLEEP_MASK);
+		return 0;
+	}
+
+	r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
+					  16384, 0x03FFFFFF, 0, 128, 5,
+					  &fb_div, &evclk_div, &ecclk_div);
+	if (r)
+		return r;
+
+	/* set RESET_ANTI_MUX to 0 */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
+
+	/* set VCO_MODE to 1 */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
+		     ~VCEPLL_VCO_MODE_MASK);
+
+	/* toggle VCEPLL_SLEEP to 1 then back to 0 */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
+		     ~VCEPLL_SLEEP_MASK);
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
+
+	/* deassert VCEPLL_RESET */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
+
+	mdelay(1);
+
+	r = si_vce_send_vcepll_ctlreq(rdev);
+	if (r)
+		return r;
+
+	/* assert VCEPLL_RESET again */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
+
+	/* disable spread spectrum. */
+	WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+	/* set feedback divider */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK);
+
+	/* set ref divider to 0 */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
+
+	/* set PDIV_A and PDIV_B */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
+		     VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
+		     ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* switch from bypass mode to normal mode */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
+
+	r = si_vce_send_vcepll_ctlreq(rdev);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
+		     EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
+		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.c b/drivers/gpu/drm/radeon/si_blit_shaders.c
new file mode 100644
index 0000000..ec415e7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_blit_shaders.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+const u32 si_default_state[] =
+{
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0046900,
+	0x00000008,
+	0x00000000, /* DB_DEPTH_BOUNDS_MIN */
+	0x00000000, /* DB_DEPTH_BOUNDS_MAX */
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0036900,
+	0x0000000f,
+	0x00000000, /* DB_DEPTH_INFO */
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0026900,
+	0x000000d9,
+	0x00000000, /* CP_RINGID */
+	0x00000000, /* CP_VMID */
+
+	0xc0046900,
+	0x00000100,
+	0xffffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+
+	0xc0046900,
+	0x00000105,
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc00e6900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+	0x00000000, /* DB_EQAA */
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000,
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000,
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000,
+
+	0xc01b6900,
+	0x000002f5,
+	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+	0xffffffff,
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 si_default_size = ARRAY_SIZE(si_default_state);
diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.h b/drivers/gpu/drm/radeon/si_blit_shaders.h
new file mode 100644
index 0000000..c739e51
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_blit_shaders.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SI_BLIT_SHADERS_H
+#define SI_BLIT_SHADERS_H
+
+extern const u32 si_default_state[];
+
+extern const u32 si_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
new file mode 100644
index 0000000..8320792
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_trace.h"
+#include "sid.h"
+
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = si_gpu_check_soft_reset(rdev);
+	u32 mask;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		mask = RADEON_RESET_DMA;
+	else
+		mask = RADEON_RESET_DMA1;
+
+	if (!(reset_mask & mask)) {
+		radeon_ring_lockup_update(rdev, ring);
+		return false;
+	}
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (SI).
+ */
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+			  struct radeon_ib *ib,
+			  uint64_t pe, uint64_t src,
+			  unsigned count)
+{
+	while (count) {
+		unsigned bytes = count * 8;
+		if (bytes > 0xFFFF8)
+			bytes = 0xFFFF8;
+
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+						      1, 0, 0, bytes);
+		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+		ib->ptr[ib->length_dw++] = lower_32_bits(src);
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+		pe += bytes;
+		src += bytes;
+		count -= bytes / 8;
+	}
+}
+
+/**
+ * si_dma_vm_write_pages - update PTEs by writing them manually
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update PTEs by writing them manually using the DMA (SI).
+ */
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+			   struct radeon_ib *ib,
+			   uint64_t pe,
+			   uint64_t addr, unsigned count,
+			   uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		/* for non-physically contiguous pages (system) */
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+		ib->ptr[ib->length_dw++] = pe;
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+			if (flags & R600_PTE_SYSTEM) {
+				value = radeon_vm_map_gart(rdev, addr);
+			} else if (flags & R600_PTE_VALID) {
+				value = addr;
+			} else {
+				value = 0;
+			}
+			addr += incr;
+			value |= flags;
+			ib->ptr[ib->length_dw++] = value;
+			ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		}
+	}
+}
+
+/**
+ * si_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+			 struct radeon_ib *ib,
+			 uint64_t pe,
+			 uint64_t addr, unsigned count,
+			 uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		if (flags & R600_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+		pe += ndw * 4;
+		addr += (ndw / 2) * incr;
+		count -= ndw / 2;
+	}
+}
+
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+		     unsigned vm_id, uint64_t pd_addr)
+
+{
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	if (vm_id < 8) {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
+	} else {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
+	}
+	radeon_ring_write(ring, pd_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm_id);
+
+	/* wait for invalidate to complete */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
+	radeon_ring_write(ring, 0xff << 16); /* retry */
+	radeon_ring_write(ring, 1 << vm_id); /* mask */
+	radeon_ring_write(ring, 0); /* value */
+	radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+}
+
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @resv: reservation object to sync to
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
+				 uint64_t src_offset, uint64_t dst_offset,
+				 unsigned num_gpu_pages,
+				 struct reservation_object *resv)
+{
+	struct radeon_fence *fence;
+	struct radeon_sync sync;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes;
+	int i, num_loops;
+	int r = 0;
+
+	radeon_sync_create(&sync);
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_rings(rdev, &sync, ring->idx);
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0xFFFFF)
+			cur_size_in_bytes = 0xFFFFF;
+		size_in_bytes -= cur_size_in_bytes;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+		radeon_ring_write(ring, lower_32_bits(dst_offset));
+		radeon_ring_write(ring, lower_32_bits(src_offset));
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		radeon_sync_free(rdev, &sync, NULL);
+		return ERR_PTR(r);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+	radeon_sync_free(rdev, &sync, fence);
+
+	return fence;
+}
+
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
new file mode 100644
index 0000000..8fb60b3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -0,0 +1,7141 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+#include "r600_dpm.h"
+#include "si_dpm.h"
+#include "atom.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END                 0x20000
+
+#define SCLK_MIN_DEEPSLEEP_FREQ     1350
+
+static const struct si_cac_config_reg cac_weights_tahiti[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_tahiti[] =
+{
+	{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+
+};
+
+static const struct si_cac_config_reg cac_override_tahiti[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_tahiti =
+{
+	((1 << 16) | 27027),
+	6,
+	0,
+	4,
+	95,
+	{
+		0UL,
+		0UL,
+		4521550UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		40
+	},
+	595000000UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_tahiti =
+{
+	{ 1159409, 0, 0, 0, 0 },
+	{ 777, 0, 0, 0, 0 },
+	2,
+	54000,
+	127000,
+	25,
+	2,
+	10,
+	13,
+	{ 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
+	{ 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
+	{ 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
+	85,
+	false
+};
+
+static const struct si_dte_data dte_data_tahiti_le =
+{
+	{ 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
+	{ 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
+	0x5,
+	0xAFC8,
+	0x64,
+	0x32,
+	1,
+	0,
+	0x10,
+	{ 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
+	{ 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
+	{ 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
+	85,
+	true
+};
+
+static const struct si_dte_data dte_data_tahiti_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_new_zealand =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
+	{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
+	0x5,
+	0xAFC8,
+	0x69,
+	0x32,
+	1,
+	0,
+	0x10,
+	{ 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
+	85,
+	true
+};
+
+static const struct si_dte_data dte_data_aruba_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_malta =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+struct si_cac_config_reg cac_weights_pitcairn[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_pitcairn[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_pitcairn[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_pitcairn =
+{
+	((1 << 16) | 27027),
+	5,
+	0,
+	6,
+	100,
+	{
+		51600000UL,
+		1800000UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_pitcairn =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_curacao_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_curacao_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_neptune_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_heathrow[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_cape_verde[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_cape_verde[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_cape_verde =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_cape_verde =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_venus_xtx =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_venus_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_venus_pro =
+{
+	{  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+struct si_cac_config_reg cac_weights_oland[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_oland[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_mars_pro[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_oland[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_oland =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_powertune_data powertune_data_mars_pro =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_oland =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_mars_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	55000,
+	105,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_sun_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	55000,
+	105,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+
+static const struct si_cac_config_reg cac_weights_hainan[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_hainan =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	9,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
+struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+struct ni_power_info *ni_get_pi(struct radeon_device *rdev);
+struct ni_ps *ni_get_ps(struct radeon_ps *rps);
+
+extern int si_mc_load_microcode(struct radeon_device *rdev);
+extern void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable);
+
+static int si_populate_voltage_value(struct radeon_device *rdev,
+				     const struct atom_voltage_table *table,
+				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
+static int si_get_std_voltage_value(struct radeon_device *rdev,
+				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+				    u16 *std_voltage);
+static int si_write_smc_soft_register(struct radeon_device *rdev,
+				      u16 reg_offset, u32 value);
+static int si_convert_power_level_to_smc(struct radeon_device *rdev,
+					 struct rv7xx_pl *pl,
+					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
+static int si_calculate_sclk_params(struct radeon_device *rdev,
+				    u32 engine_clock,
+				    SISLANDS_SMC_SCLK_VALUE *sclk);
+
+static void si_thermal_start_smc_fan_control(struct radeon_device *rdev);
+static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev);
+
+static struct si_power_info *si_get_pi(struct radeon_device *rdev)
+{
+	struct si_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+						     u16 v, s32 t, u32 ileakage, u32 *leakage)
+{
+	s64 kt, kv, leakage_w, i_leakage, vddc;
+	s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+	s64 tmp;
+
+	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+	vddc = div64_s64(drm_int2fixp(v), 1000);
+	temperature = div64_s64(drm_int2fixp(t), 1000);
+
+	t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
+	t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
+	av = div64_s64(drm_int2fixp(coeff->av), 100000000);
+	bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
+	t_ref = drm_int2fixp(coeff->t_ref);
+
+	tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+	kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+	kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
+	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
+
+	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+	*leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
+					     const struct ni_leakage_coeffients *coeff,
+					     u16 v,
+					     s32 t,
+					     u32 i_leakage,
+					     u32 *leakage)
+{
+	si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
+					       const u32 fixed_kt, u16 v,
+					       u32 ileakage, u32 *leakage)
+{
+	s64 kt, kv, leakage_w, i_leakage, vddc;
+
+	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+	vddc = div64_s64(drm_int2fixp(v), 1000);
+
+	kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
+	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
+			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
+
+	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+	*leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v(struct radeon_device *rdev,
+				       const struct ni_leakage_coeffients *coeff,
+				       const u32 fixed_kt,
+				       u16 v,
+				       u32 i_leakage,
+				       u32 *leakage)
+{
+	si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
+}
+
+
+static void si_update_dte_from_pl2(struct radeon_device *rdev,
+				   struct si_dte_data *dte_data)
+{
+	u32 p_limit1 = rdev->pm.dpm.tdp_limit;
+	u32 p_limit2 = rdev->pm.dpm.near_tdp_limit;
+	u32 k = dte_data->k;
+	u32 t_max = dte_data->max_t;
+	u32 t_split[5] = { 10, 15, 20, 25, 30 };
+	u32 t_0 = dte_data->t0;
+	u32 i;
+
+	if (p_limit2 != 0 && p_limit2 <= p_limit1) {
+		dte_data->tdep_count = 3;
+
+		for (i = 0; i < k; i++) {
+			dte_data->r[i] =
+				(t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
+				(p_limit2  * (u32)100);
+		}
+
+		dte_data->tdep_r[1] = dte_data->r[4] * 2;
+
+		for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
+			dte_data->tdep_r[i] = dte_data->r[4];
+		}
+	} else {
+		DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
+	}
+}
+
+static void si_initialize_powertune_defaults(struct radeon_device *rdev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	bool update_dte_from_pl2 = false;
+
+	if (rdev->family == CHIP_TAHITI) {
+		si_pi->cac_weights = cac_weights_tahiti;
+		si_pi->lcac_config = lcac_tahiti;
+		si_pi->cac_override = cac_override_tahiti;
+		si_pi->powertune_data = &powertune_data_tahiti;
+		si_pi->dte_data = dte_data_tahiti;
+
+		switch (rdev->pdev->device) {
+		case 0x6798:
+			si_pi->dte_data.enable_dte_by_default = true;
+			break;
+		case 0x6799:
+			si_pi->dte_data = dte_data_new_zealand;
+			break;
+		case 0x6790:
+		case 0x6791:
+		case 0x6792:
+		case 0x679E:
+			si_pi->dte_data = dte_data_aruba_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x679B:
+			si_pi->dte_data = dte_data_malta;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x679A:
+			si_pi->dte_data = dte_data_tahiti_pro;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			if (si_pi->dte_data.enable_dte_by_default == true)
+				DRM_ERROR("DTE is not enabled!\n");
+			break;
+		}
+	} else if (rdev->family == CHIP_PITCAIRN) {
+		switch (rdev->pdev->device) {
+		case 0x6810:
+		case 0x6818:
+			si_pi->cac_weights = cac_weights_pitcairn;
+			si_pi->lcac_config = lcac_pitcairn;
+			si_pi->cac_override = cac_override_pitcairn;
+			si_pi->powertune_data = &powertune_data_pitcairn;
+			si_pi->dte_data = dte_data_curacao_xt;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6819:
+		case 0x6811:
+			si_pi->cac_weights = cac_weights_pitcairn;
+			si_pi->lcac_config = lcac_pitcairn;
+			si_pi->cac_override = cac_override_pitcairn;
+			si_pi->powertune_data = &powertune_data_pitcairn;
+			si_pi->dte_data = dte_data_curacao_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6800:
+		case 0x6806:
+			si_pi->cac_weights = cac_weights_pitcairn;
+			si_pi->lcac_config = lcac_pitcairn;
+			si_pi->cac_override = cac_override_pitcairn;
+			si_pi->powertune_data = &powertune_data_pitcairn;
+			si_pi->dte_data = dte_data_neptune_xt;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			si_pi->cac_weights = cac_weights_pitcairn;
+			si_pi->lcac_config = lcac_pitcairn;
+			si_pi->cac_override = cac_override_pitcairn;
+			si_pi->powertune_data = &powertune_data_pitcairn;
+			si_pi->dte_data = dte_data_pitcairn;
+			break;
+		}
+	} else if (rdev->family == CHIP_VERDE) {
+		si_pi->lcac_config = lcac_cape_verde;
+		si_pi->cac_override = cac_override_cape_verde;
+		si_pi->powertune_data = &powertune_data_cape_verde;
+
+		switch (rdev->pdev->device) {
+		case 0x683B:
+		case 0x683F:
+		case 0x6829:
+		case 0x6835:
+			si_pi->cac_weights = cac_weights_cape_verde_pro;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x682C:
+			si_pi->cac_weights = cac_weights_cape_verde_pro;
+			si_pi->dte_data = dte_data_sun_xt;
+			break;
+		case 0x6825:
+		case 0x6827:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x6824:
+		case 0x682D:
+			si_pi->cac_weights = cac_weights_chelsea_xt;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x682F:
+			si_pi->cac_weights = cac_weights_chelsea_pro;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x6820:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_venus_xtx;
+			break;
+		case 0x6821:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_venus_xt;
+			break;
+		case 0x6823:
+		case 0x682B:
+		case 0x6822:
+		case 0x682A:
+			si_pi->cac_weights = cac_weights_chelsea_pro;
+			si_pi->dte_data = dte_data_venus_pro;
+			break;
+		default:
+			si_pi->cac_weights = cac_weights_cape_verde;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		}
+	} else if (rdev->family == CHIP_OLAND) {
+		switch (rdev->pdev->device) {
+		case 0x6601:
+		case 0x6621:
+		case 0x6603:
+		case 0x6605:
+			si_pi->cac_weights = cac_weights_mars_pro;
+			si_pi->lcac_config = lcac_mars_pro;
+			si_pi->cac_override = cac_override_oland;
+			si_pi->powertune_data = &powertune_data_mars_pro;
+			si_pi->dte_data = dte_data_mars_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6600:
+		case 0x6606:
+		case 0x6620:
+		case 0x6604:
+			si_pi->cac_weights = cac_weights_mars_xt;
+			si_pi->lcac_config = lcac_mars_pro;
+			si_pi->cac_override = cac_override_oland;
+			si_pi->powertune_data = &powertune_data_mars_pro;
+			si_pi->dte_data = dte_data_mars_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6611:
+		case 0x6613:
+		case 0x6608:
+			si_pi->cac_weights = cac_weights_oland_pro;
+			si_pi->lcac_config = lcac_mars_pro;
+			si_pi->cac_override = cac_override_oland;
+			si_pi->powertune_data = &powertune_data_mars_pro;
+			si_pi->dte_data = dte_data_mars_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6610:
+			si_pi->cac_weights = cac_weights_oland_xt;
+			si_pi->lcac_config = lcac_mars_pro;
+			si_pi->cac_override = cac_override_oland;
+			si_pi->powertune_data = &powertune_data_mars_pro;
+			si_pi->dte_data = dte_data_mars_pro;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			si_pi->cac_weights = cac_weights_oland;
+			si_pi->lcac_config = lcac_oland;
+			si_pi->cac_override = cac_override_oland;
+			si_pi->powertune_data = &powertune_data_oland;
+			si_pi->dte_data = dte_data_oland;
+			break;
+		}
+	} else if (rdev->family == CHIP_HAINAN) {
+		si_pi->cac_weights = cac_weights_hainan;
+		si_pi->lcac_config = lcac_oland;
+		si_pi->cac_override = cac_override_oland;
+		si_pi->powertune_data = &powertune_data_hainan;
+		si_pi->dte_data = dte_data_sun_xt;
+		update_dte_from_pl2 = true;
+	} else {
+		DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
+		return;
+	}
+
+	ni_pi->enable_power_containment = false;
+	ni_pi->enable_cac = false;
+	ni_pi->enable_sq_ramping = false;
+	si_pi->enable_dte = false;
+
+	if (si_pi->powertune_data->enable_powertune_by_default) {
+		ni_pi->enable_power_containment= true;
+		ni_pi->enable_cac = true;
+		if (si_pi->dte_data.enable_dte_by_default) {
+			si_pi->enable_dte = true;
+			if (update_dte_from_pl2)
+				si_update_dte_from_pl2(rdev, &si_pi->dte_data);
+
+		}
+		ni_pi->enable_sq_ramping = true;
+	}
+
+	ni_pi->driver_calculate_cac_leakage = true;
+	ni_pi->cac_configuration_required = true;
+
+	if (ni_pi->cac_configuration_required) {
+		ni_pi->support_cac_long_term_average = true;
+		si_pi->dyn_powertune_data.l2_lta_window_size =
+			si_pi->powertune_data->l2_lta_window_size_default;
+		si_pi->dyn_powertune_data.lts_truncate =
+			si_pi->powertune_data->lts_truncate_default;
+	} else {
+		ni_pi->support_cac_long_term_average = false;
+		si_pi->dyn_powertune_data.l2_lta_window_size = 0;
+		si_pi->dyn_powertune_data.lts_truncate = 0;
+	}
+
+	si_pi->dyn_powertune_data.disable_uvd_powertune = false;
+}
+
+static u32 si_get_smc_power_scaling_factor(struct radeon_device *rdev)
+{
+	return 1;
+}
+
+static u32 si_calculate_cac_wintime(struct radeon_device *rdev)
+{
+	u32 xclk;
+	u32 wintime;
+	u32 cac_window;
+	u32 cac_window_size;
+
+	xclk = radeon_get_xclk(rdev);
+
+	if (xclk == 0)
+		return 0;
+
+	cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+	cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
+
+	wintime = (cac_window_size * 100) / xclk;
+
+	return wintime;
+}
+
+static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+	return power_in_watts;
+}
+
+static int si_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
+					    bool adjust_polarity,
+					    u32 tdp_adjustment,
+					    u32 *tdp_limit,
+					    u32 *near_tdp_limit)
+{
+	u32 adjustment_delta, max_tdp_limit;
+
+	if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
+		return -EINVAL;
+
+	max_tdp_limit = ((100 + 100) * rdev->pm.dpm.tdp_limit) / 100;
+
+	if (adjust_polarity) {
+		*tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
+		*near_tdp_limit = rdev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - rdev->pm.dpm.tdp_limit);
+	} else {
+		*tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
+		adjustment_delta  = rdev->pm.dpm.tdp_limit - *tdp_limit;
+		if (adjustment_delta < rdev->pm.dpm.near_tdp_limit_adjusted)
+			*near_tdp_limit = rdev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
+		else
+			*near_tdp_limit = 0;
+	}
+
+	if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
+		return -EINVAL;
+	if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int si_populate_smc_tdp_limits(struct radeon_device *rdev,
+				      struct radeon_ps *radeon_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	if (ni_pi->enable_power_containment) {
+		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+		PP_SIslands_PAPMParameters *papm_parm;
+		struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
+		u32 scaling_factor = si_get_smc_power_scaling_factor(rdev);
+		u32 tdp_limit;
+		u32 near_tdp_limit;
+		int ret;
+
+		if (scaling_factor == 0)
+			return -EINVAL;
+
+		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+		ret = si_calculate_adjusted_tdp_limits(rdev,
+						       false, /* ??? */
+						       rdev->pm.dpm.tdp_adjustment,
+						       &tdp_limit,
+						       &near_tdp_limit);
+		if (ret)
+			return ret;
+
+		smc_table->dpm2Params.TDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
+		smc_table->dpm2Params.NearTDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
+		smc_table->dpm2Params.SafePowerLimit =
+			cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+		ret = si_copy_bytes_to_smc(rdev,
+					   (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+						 offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
+					   (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
+					   sizeof(u32) * 3,
+					   si_pi->sram_end);
+		if (ret)
+			return ret;
+
+		if (si_pi->enable_ppm) {
+			papm_parm = &si_pi->papm_parm;
+			memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
+			papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
+			papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
+			papm_parm->dGPU_T_Warning = cpu_to_be32(95);
+			papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
+			papm_parm->PlatformPowerLimit = 0xffffffff;
+			papm_parm->NearTDPLimitPAPM = 0xffffffff;
+
+			ret = si_copy_bytes_to_smc(rdev, si_pi->papm_cfg_table_start,
+						   (u8 *)papm_parm,
+						   sizeof(PP_SIslands_PAPMParameters),
+						   si_pi->sram_end);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static int si_populate_smc_tdp_limits_2(struct radeon_device *rdev,
+					struct radeon_ps *radeon_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	if (ni_pi->enable_power_containment) {
+		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+		u32 scaling_factor = si_get_smc_power_scaling_factor(rdev);
+		int ret;
+
+		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+		smc_table->dpm2Params.NearTDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(rdev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
+		smc_table->dpm2Params.SafePowerLimit =
+			cpu_to_be32(si_scale_power_for_smc((rdev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+		ret = si_copy_bytes_to_smc(rdev,
+					   (si_pi->state_table_start +
+					    offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+					    offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
+					   (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
+					   sizeof(u32) * 2,
+					   si_pi->sram_end);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static u16 si_calculate_power_efficiency_ratio(struct radeon_device *rdev,
+					       const u16 prev_std_vddc,
+					       const u16 curr_std_vddc)
+{
+	u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
+	u64 prev_vddc = (u64)prev_std_vddc;
+	u64 curr_vddc = (u64)curr_std_vddc;
+	u64 pwr_efficiency_ratio, n, d;
+
+	if ((prev_vddc == 0) || (curr_vddc == 0))
+		return 0;
+
+	n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
+	d = prev_vddc * prev_vddc;
+	pwr_efficiency_ratio = div64_u64(n, d);
+
+	if (pwr_efficiency_ratio > (u64)0xFFFF)
+		return 0;
+
+	return (u16)pwr_efficiency_ratio;
+}
+
+static bool si_should_disable_uvd_powertune(struct radeon_device *rdev,
+					    struct radeon_ps *radeon_state)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
+	    radeon_state->vclk && radeon_state->dclk)
+		return true;
+
+	return false;
+}
+
+static int si_populate_power_containment_values(struct radeon_device *rdev,
+						struct radeon_ps *radeon_state,
+						SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	SISLANDS_SMC_VOLTAGE_VALUE vddc;
+	u32 prev_sclk;
+	u32 max_sclk;
+	u32 min_sclk;
+	u16 prev_std_vddc;
+	u16 curr_std_vddc;
+	int i;
+	u16 pwr_efficiency_ratio;
+	u8 max_ps_percent;
+	bool disable_uvd_power_tune;
+	int ret;
+
+	if (ni_pi->enable_power_containment == false)
+		return 0;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	disable_uvd_power_tune = si_should_disable_uvd_powertune(rdev, radeon_state);
+
+	smc_state->levels[0].dpm2.MaxPS = 0;
+	smc_state->levels[0].dpm2.NearTDPDec = 0;
+	smc_state->levels[0].dpm2.AboveSafeInc = 0;
+	smc_state->levels[0].dpm2.BelowSafeInc = 0;
+	smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	for (i = 1; i < state->performance_level_count; i++) {
+		prev_sclk = state->performance_levels[i-1].sclk;
+		max_sclk  = state->performance_levels[i].sclk;
+		if (i == 1)
+			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
+		else
+			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
+
+		if (prev_sclk > max_sclk)
+			return -EINVAL;
+
+		if ((max_ps_percent == 0) ||
+		    (prev_sclk == max_sclk) ||
+		    disable_uvd_power_tune) {
+			min_sclk = max_sclk;
+		} else if (i == 1) {
+			min_sclk = prev_sclk;
+		} else {
+			min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+		}
+
+		if (min_sclk < state->performance_levels[0].sclk)
+			min_sclk = state->performance_levels[0].sclk;
+
+		if (min_sclk == 0)
+			return -EINVAL;
+
+		ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[i-1].vddc, &vddc);
+		if (ret)
+			return ret;
+
+		ret = si_get_std_voltage_value(rdev, &vddc, &prev_std_vddc);
+		if (ret)
+			return ret;
+
+		ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[i].vddc, &vddc);
+		if (ret)
+			return ret;
+
+		ret = si_get_std_voltage_value(rdev, &vddc, &curr_std_vddc);
+		if (ret)
+			return ret;
+
+		pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(rdev,
+									   prev_std_vddc, curr_std_vddc);
+
+		smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+		smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
+		smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
+		smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
+		smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
+	}
+
+	return 0;
+}
+
+static int si_populate_sq_ramping_values(struct radeon_device *rdev,
+					 struct radeon_ps *radeon_state,
+					 SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	u32 sq_power_throttle, sq_power_throttle2;
+	bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+	int i;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	if (rdev->pm.dpm.sq_ramping_threshold == 0)
+		return -EINVAL;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+		enable_sq_ramping = false;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		sq_power_throttle = 0;
+		sq_power_throttle2 = 0;
+
+		if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
+		    enable_sq_ramping) {
+			sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+			sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+			sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+			sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+			sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+		} else {
+			sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+			sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+		}
+
+		smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
+		smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+	}
+
+	return 0;
+}
+
+static int si_enable_power_containment(struct radeon_device *rdev,
+				       struct radeon_ps *radeon_new_state,
+				       bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (ni_pi->enable_power_containment) {
+		if (enable) {
+			if (!si_should_disable_uvd_powertune(rdev, radeon_new_state)) {
+				smc_result = si_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+					ni_pi->pc_enabled = false;
+				} else {
+					ni_pi->pc_enabled = true;
+				}
+			}
+		} else {
+			smc_result = si_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
+			if (smc_result != PPSMC_Result_OK)
+				ret = -EINVAL;
+			ni_pi->pc_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+static int si_initialize_smc_dte_tables(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	int ret = 0;
+	struct si_dte_data *dte_data = &si_pi->dte_data;
+	Smc_SIslands_DTE_Configuration *dte_tables = NULL;
+	u32 table_size;
+	u8 tdep_count;
+	u32 i;
+
+	if (dte_data == NULL)
+		si_pi->enable_dte = false;
+
+	if (si_pi->enable_dte == false)
+		return 0;
+
+	if (dte_data->k <= 0)
+		return -EINVAL;
+
+	dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
+	if (dte_tables == NULL) {
+		si_pi->enable_dte = false;
+		return -ENOMEM;
+	}
+
+	table_size = dte_data->k;
+
+	if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
+		table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
+
+	tdep_count = dte_data->tdep_count;
+	if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
+		tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
+
+	dte_tables->K = cpu_to_be32(table_size);
+	dte_tables->T0 = cpu_to_be32(dte_data->t0);
+	dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
+	dte_tables->WindowSize = dte_data->window_size;
+	dte_tables->temp_select = dte_data->temp_select;
+	dte_tables->DTE_mode = dte_data->dte_mode;
+	dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
+
+	if (tdep_count > 0)
+		table_size--;
+
+	for (i = 0; i < table_size; i++) {
+		dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
+		dte_tables->R[i]   = cpu_to_be32(dte_data->r[i]);
+	}
+
+	dte_tables->Tdep_count = tdep_count;
+
+	for (i = 0; i < (u32)tdep_count; i++) {
+		dte_tables->T_limits[i] = dte_data->t_limits[i];
+		dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
+		dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
+	}
+
+	ret = si_copy_bytes_to_smc(rdev, si_pi->dte_table_start, (u8 *)dte_tables,
+				   sizeof(Smc_SIslands_DTE_Configuration), si_pi->sram_end);
+	kfree(dte_tables);
+
+	return ret;
+}
+
+static int si_get_cac_std_voltage_max_min(struct radeon_device *rdev,
+					  u16 *max, u16 *min)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct radeon_cac_leakage_table *table =
+		&rdev->pm.dpm.dyn_state.cac_leakage_table;
+	u32 i;
+	u32 v0_loadline;
+
+
+	if (table == NULL)
+		return -EINVAL;
+
+	*max = 0;
+	*min = 0xFFFF;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].vddc > *max)
+			*max = table->entries[i].vddc;
+		if (table->entries[i].vddc < *min)
+			*min = table->entries[i].vddc;
+	}
+
+	if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
+		return -EINVAL;
+
+	v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
+
+	if (v0_loadline > 0xFFFFUL)
+		return -EINVAL;
+
+	*min = (u16)v0_loadline;
+
+	if ((*min > *max) || (*max == 0) || (*min == 0))
+		return -EINVAL;
+
+	return 0;
+}
+
+static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
+{
+	return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
+		SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+}
+
+static int si_init_dte_leakage_table(struct radeon_device *rdev,
+				     PP_SIslands_CacConfig *cac_tables,
+				     u16 vddc_max, u16 vddc_min, u16 vddc_step,
+				     u16 t0, u16 t_step)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 leakage;
+	unsigned int i, j;
+	s32 t;
+	u32 smc_leakage;
+	u32 scaling_factor;
+	u16 voltage;
+
+	scaling_factor = si_get_smc_power_scaling_factor(rdev);
+
+	for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
+		t = (1000 * (i * t_step + t0));
+
+		for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+			voltage = vddc_max - (vddc_step * j);
+
+			si_calculate_leakage_for_v_and_t(rdev,
+							 &si_pi->powertune_data->leakage_coefficients,
+							 voltage,
+							 t,
+							 si_pi->dyn_powertune_data.cac_leakage,
+							 &leakage);
+
+			smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+			if (smc_leakage > 0xFFFF)
+				smc_leakage = 0xFFFF;
+
+			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+				cpu_to_be16((u16)smc_leakage);
+		}
+	}
+	return 0;
+}
+
+static int si_init_simplified_leakage_table(struct radeon_device *rdev,
+					    PP_SIslands_CacConfig *cac_tables,
+					    u16 vddc_max, u16 vddc_min, u16 vddc_step)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 leakage;
+	unsigned int i, j;
+	u32 smc_leakage;
+	u32 scaling_factor;
+	u16 voltage;
+
+	scaling_factor = si_get_smc_power_scaling_factor(rdev);
+
+	for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+		voltage = vddc_max - (vddc_step * j);
+
+		si_calculate_leakage_for_v(rdev,
+					   &si_pi->powertune_data->leakage_coefficients,
+					   si_pi->powertune_data->fixed_kt,
+					   voltage,
+					   si_pi->dyn_powertune_data.cac_leakage,
+					   &leakage);
+
+		smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+		if (smc_leakage > 0xFFFF)
+			smc_leakage = 0xFFFF;
+
+		for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
+			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+				cpu_to_be16((u16)smc_leakage);
+	}
+	return 0;
+}
+
+static int si_initialize_smc_cac_tables(struct radeon_device *rdev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	PP_SIslands_CacConfig *cac_tables = NULL;
+	u16 vddc_max, vddc_min, vddc_step;
+	u16 t0, t_step;
+	u32 load_line_slope, reg;
+	int ret = 0;
+	u32 ticks_per_us = radeon_get_xclk(rdev) / 100;
+
+	if (ni_pi->enable_cac == false)
+		return 0;
+
+	cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
+	if (!cac_tables)
+		return -ENOMEM;
+
+	reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
+	reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
+	WREG32(CG_CAC_CTRL, reg);
+
+	si_pi->dyn_powertune_data.cac_leakage = rdev->pm.dpm.cac_leakage;
+	si_pi->dyn_powertune_data.dc_pwr_value =
+		si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
+	si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(rdev);
+	si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
+
+	si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
+
+	ret = si_get_cac_std_voltage_max_min(rdev, &vddc_max, &vddc_min);
+	if (ret)
+		goto done_free;
+
+	vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
+	vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
+	t_step = 4;
+	t0 = 60;
+
+	if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
+		ret = si_init_dte_leakage_table(rdev, cac_tables,
+						vddc_max, vddc_min, vddc_step,
+						t0, t_step);
+	else
+		ret = si_init_simplified_leakage_table(rdev, cac_tables,
+						       vddc_max, vddc_min, vddc_step);
+	if (ret)
+		goto done_free;
+
+	load_line_slope = ((u32)rdev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
+
+	cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
+	cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
+	cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
+	cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
+	cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
+	cac_tables->R_LL = cpu_to_be32(load_line_slope);
+	cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
+	cac_tables->calculation_repeats = cpu_to_be32(2);
+	cac_tables->dc_cac = cpu_to_be32(0);
+	cac_tables->log2_PG_LKG_SCALE = 12;
+	cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
+	cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
+	cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
+
+	ret = si_copy_bytes_to_smc(rdev, si_pi->cac_table_start, (u8 *)cac_tables,
+				   sizeof(PP_SIslands_CacConfig), si_pi->sram_end);
+
+	if (ret)
+		goto done_free;
+
+	ret = si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
+
+done_free:
+	if (ret) {
+		ni_pi->enable_cac = false;
+		ni_pi->enable_power_containment = false;
+	}
+
+	kfree(cac_tables);
+
+	return 0;
+}
+
+static int si_program_cac_config_registers(struct radeon_device *rdev,
+					   const struct si_cac_config_reg *cac_config_regs)
+{
+	const struct si_cac_config_reg *config_regs = cac_config_regs;
+	u32 data = 0, offset;
+
+	if (!config_regs)
+		return -EINVAL;
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		switch (config_regs->type) {
+		case SISLANDS_CACCONFIG_CGIND:
+			offset = SMC_CG_IND_START + config_regs->offset;
+			if (offset < SMC_CG_IND_END)
+				data = RREG32_SMC(offset);
+			break;
+		default:
+			data = RREG32(config_regs->offset << 2);
+			break;
+		}
+
+		data &= ~config_regs->mask;
+		data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+
+		switch (config_regs->type) {
+		case SISLANDS_CACCONFIG_CGIND:
+			offset = SMC_CG_IND_START + config_regs->offset;
+			if (offset < SMC_CG_IND_END)
+				WREG32_SMC(offset, data);
+			break;
+		default:
+			WREG32(config_regs->offset << 2, data);
+			break;
+		}
+		config_regs++;
+	}
+	return 0;
+}
+
+static int si_initialize_hardware_cac_manager(struct radeon_device *rdev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	int ret;
+
+	if ((ni_pi->enable_cac == false) ||
+	    (ni_pi->cac_configuration_required == false))
+		return 0;
+
+	ret = si_program_cac_config_registers(rdev, si_pi->lcac_config);
+	if (ret)
+		return ret;
+	ret = si_program_cac_config_registers(rdev, si_pi->cac_override);
+	if (ret)
+		return ret;
+	ret = si_program_cac_config_registers(rdev, si_pi->cac_weights);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int si_enable_smc_cac(struct radeon_device *rdev,
+			     struct radeon_ps *radeon_new_state,
+			     bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (ni_pi->enable_cac) {
+		if (enable) {
+			if (!si_should_disable_uvd_powertune(rdev, radeon_new_state)) {
+				if (ni_pi->support_cac_long_term_average) {
+					smc_result = si_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
+					if (smc_result != PPSMC_Result_OK)
+						ni_pi->support_cac_long_term_average = false;
+				}
+
+				smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+					ni_pi->cac_enabled = false;
+				} else {
+					ni_pi->cac_enabled = true;
+				}
+
+				if (si_pi->enable_dte) {
+					smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
+					if (smc_result != PPSMC_Result_OK)
+						ret = -EINVAL;
+				}
+			}
+		} else if (ni_pi->cac_enabled) {
+			if (si_pi->enable_dte)
+				smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
+
+			smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
+
+			ni_pi->cac_enabled = false;
+
+			if (ni_pi->support_cac_long_term_average)
+				smc_result = si_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
+		}
+	}
+	return ret;
+}
+
+static int si_init_smc_spll_table(struct radeon_device *rdev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
+	SISLANDS_SMC_SCLK_VALUE sclk_params;
+	u32 fb_div, p_div;
+	u32 clk_s, clk_v;
+	u32 sclk = 0;
+	int ret = 0;
+	u32 tmp;
+	int i;
+
+	if (si_pi->spll_table_start == 0)
+		return -EINVAL;
+
+	spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+	if (spll_table == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < 256; i++) {
+		ret = si_calculate_sclk_params(rdev, sclk, &sclk_params);
+		if (ret)
+			break;
+
+		p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+		fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+		clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+		clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+		fb_div &= ~0x00001FFF;
+		fb_div >>= 1;
+		clk_v >>= 6;
+
+		if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+			ret = -EINVAL;
+		if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+			ret = -EINVAL;
+		if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+			ret = -EINVAL;
+		if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+			ret = -EINVAL;
+
+		if (ret)
+			break;
+
+		tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+			((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+		spll_table->freq[i] = cpu_to_be32(tmp);
+
+		tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+			((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+		spll_table->ss[i] = cpu_to_be32(tmp);
+
+		sclk += 512;
+	}
+
+
+	if (!ret)
+		ret = si_copy_bytes_to_smc(rdev, si_pi->spll_table_start,
+					   (u8 *)spll_table, sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
+					   si_pi->sram_end);
+
+	if (ret)
+		ni_pi->enable_power_containment = false;
+
+	kfree(spll_table);
+
+	return ret;
+}
+
+static u16 si_get_lower_of_leakage_and_vce_voltage(struct radeon_device *rdev,
+						   u16 vce_voltage)
+{
+	u16 highest_leakage = 0;
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	int i;
+
+	for (i = 0; i < si_pi->leakage_voltage.count; i++){
+		if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
+			highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
+	}
+
+	if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
+		return highest_leakage;
+
+	return vce_voltage;
+}
+
+static int si_get_vce_clock_voltage(struct radeon_device *rdev,
+				    u32 evclk, u32 ecclk, u16 *voltage)
+{
+	u32 i;
+	int ret = -EINVAL;
+	struct radeon_vce_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+	if (((evclk == 0) && (ecclk == 0)) ||
+	    (table && (table->count == 0))) {
+		*voltage = 0;
+		return 0;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if ((evclk <= table->entries[i].evclk) &&
+		    (ecclk <= table->entries[i].ecclk)) {
+			*voltage = table->entries[i].v;
+			ret = 0;
+			break;
+		}
+	}
+
+	/* if no match return the highest voltage */
+	if (ret)
+		*voltage = table->entries[table->count - 1].v;
+
+	*voltage = si_get_lower_of_leakage_and_vce_voltage(rdev, *voltage);
+
+	return ret;
+}
+
+static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+					struct radeon_ps *rps)
+{
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct radeon_clock_and_voltage_limits *max_limits;
+	bool disable_mclk_switching = false;
+	bool disable_sclk_switching = false;
+	u32 mclk, sclk;
+	u16 vddc, vddci, min_vce_voltage = 0;
+	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+	u32 max_sclk = 0, max_mclk = 0;
+	int i;
+
+	if (rdev->family == CHIP_HAINAN) {
+		if ((rdev->pdev->revision == 0x81) ||
+		    (rdev->pdev->revision == 0x83) ||
+		    (rdev->pdev->revision == 0xC3) ||
+		    (rdev->pdev->device == 0x6664) ||
+		    (rdev->pdev->device == 0x6665) ||
+		    (rdev->pdev->device == 0x6667)) {
+			max_sclk = 75000;
+		}
+		if ((rdev->pdev->revision == 0xC3) ||
+		    (rdev->pdev->device == 0x6665)) {
+			max_sclk = 60000;
+			max_mclk = 80000;
+		}
+	} else if (rdev->family == CHIP_OLAND) {
+		if ((rdev->pdev->revision == 0xC7) ||
+		    (rdev->pdev->revision == 0x80) ||
+		    (rdev->pdev->revision == 0x81) ||
+		    (rdev->pdev->revision == 0x83) ||
+		    (rdev->pdev->revision == 0x87) ||
+		    (rdev->pdev->device == 0x6604) ||
+		    (rdev->pdev->device == 0x6605)) {
+			max_sclk = 75000;
+		}
+	}
+
+	if (rps->vce_active) {
+		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
+		si_get_vce_clock_voltage(rdev, rps->evclk, rps->ecclk,
+					 &min_vce_voltage);
+	} else {
+		rps->evclk = 0;
+		rps->ecclk = 0;
+	}
+
+	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+	    ni_dpm_vblank_too_short(rdev))
+		disable_mclk_switching = true;
+
+	if (rps->vclk || rps->dclk) {
+		disable_mclk_switching = true;
+		disable_sclk_switching = true;
+	}
+
+	if (rdev->pm.dpm.ac_power)
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	for (i = ps->performance_level_count - 2; i >= 0; i--) {
+		if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+			ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+	}
+	if (rdev->pm.dpm.ac_power == false) {
+		for (i = 0; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk > max_limits->mclk)
+				ps->performance_levels[i].mclk = max_limits->mclk;
+			if (ps->performance_levels[i].sclk > max_limits->sclk)
+				ps->performance_levels[i].sclk = max_limits->sclk;
+			if (ps->performance_levels[i].vddc > max_limits->vddc)
+				ps->performance_levels[i].vddc = max_limits->vddc;
+			if (ps->performance_levels[i].vddci > max_limits->vddci)
+				ps->performance_levels[i].vddci = max_limits->vddci;
+		}
+	}
+
+	/* limit clocks to max supported clocks based on voltage dependency tables */
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							&max_sclk_vddc);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							&max_mclk_vddci);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							&max_mclk_vddc);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (max_sclk_vddc) {
+			if (ps->performance_levels[i].sclk > max_sclk_vddc)
+				ps->performance_levels[i].sclk = max_sclk_vddc;
+		}
+		if (max_mclk_vddci) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddci)
+				ps->performance_levels[i].mclk = max_mclk_vddci;
+		}
+		if (max_mclk_vddc) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddc)
+				ps->performance_levels[i].mclk = max_mclk_vddc;
+		}
+		if (max_mclk) {
+			if (ps->performance_levels[i].mclk > max_mclk)
+				ps->performance_levels[i].mclk = max_mclk;
+		}
+		if (max_sclk) {
+			if (ps->performance_levels[i].sclk > max_sclk)
+				ps->performance_levels[i].sclk = max_sclk;
+		}
+	}
+
+	/* XXX validate the min clocks required for display */
+
+	if (disable_mclk_switching) {
+		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
+		vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
+	} else {
+		mclk = ps->performance_levels[0].mclk;
+		vddci = ps->performance_levels[0].vddci;
+	}
+
+	if (disable_sclk_switching) {
+		sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+		vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+	} else {
+		sclk = ps->performance_levels[0].sclk;
+		vddc = ps->performance_levels[0].vddc;
+	}
+
+	if (rps->vce_active) {
+		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
+			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
+		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
+			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
+	}
+
+	/* adjusted low state */
+	ps->performance_levels[0].sclk = sclk;
+	ps->performance_levels[0].mclk = mclk;
+	ps->performance_levels[0].vddc = vddc;
+	ps->performance_levels[0].vddci = vddci;
+
+	if (disable_sclk_switching) {
+		sclk = ps->performance_levels[0].sclk;
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (sclk < ps->performance_levels[i].sclk)
+				sclk = ps->performance_levels[i].sclk;
+		}
+		for (i = 0; i < ps->performance_level_count; i++) {
+			ps->performance_levels[i].sclk = sclk;
+			ps->performance_levels[i].vddc = vddc;
+		}
+	} else {
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+				ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+			if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+				ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+		}
+	}
+
+	if (disable_mclk_switching) {
+		mclk = ps->performance_levels[0].mclk;
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (mclk < ps->performance_levels[i].mclk)
+				mclk = ps->performance_levels[i].mclk;
+		}
+		for (i = 0; i < ps->performance_level_count; i++) {
+			ps->performance_levels[i].mclk = mclk;
+			ps->performance_levels[i].vddci = vddci;
+		}
+	} else {
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+				ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+			if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+				ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+		}
+	}
+
+	for (i = 0; i < ps->performance_level_count; i++)
+		btc_adjust_clock_combinations(rdev, max_limits,
+					      &ps->performance_levels[i]);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].vddc < min_vce_voltage)
+			ps->performance_levels[i].vddc = min_vce_voltage;
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+						   ps->performance_levels[i].sclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddci, &ps->performance_levels[i].vddci);
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+						   rdev->clock.current_dispclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+	}
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		btc_apply_voltage_delta_rules(rdev,
+					      max_limits->vddc, max_limits->vddci,
+					      &ps->performance_levels[i].vddc,
+					      &ps->performance_levels[i].vddci);
+	}
+
+	ps->dc_compatible = true;
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+			ps->dc_compatible = false;
+	}
+}
+
+#if 0
+static int si_read_smc_soft_register(struct radeon_device *rdev,
+				     u16 reg_offset, u32 *value)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	return si_read_smc_sram_dword(rdev,
+				      si_pi->soft_regs_start + reg_offset, value,
+				      si_pi->sram_end);
+}
+#endif
+
+static int si_write_smc_soft_register(struct radeon_device *rdev,
+				      u16 reg_offset, u32 value)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	return si_write_smc_sram_dword(rdev,
+				       si_pi->soft_regs_start + reg_offset,
+				       value, si_pi->sram_end);
+}
+
+static bool si_is_special_1gb_platform(struct radeon_device *rdev)
+{
+	bool ret = false;
+	u32 tmp, width, row, column, bank, density;
+	bool is_memory_gddr5, is_special;
+
+	tmp = RREG32(MC_SEQ_MISC0);
+	is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
+	is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
+		& (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
+
+	WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
+	width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
+
+	tmp = RREG32(MC_ARB_RAMCFG);
+	row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
+	column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
+	bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+
+	density = (1 << (row + column - 20 + bank)) * width;
+
+	if ((rdev->pdev->device == 0x6819) &&
+	    is_memory_gddr5 && is_special && (density == 0x400))
+		ret = true;
+
+	return ret;
+}
+
+static void si_get_leakage_vddc(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u16 vddc, count = 0;
+	int i, ret;
+
+	for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
+		ret = radeon_atom_get_leakage_vddc_based_on_leakage_idx(rdev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
+
+		if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
+			si_pi->leakage_voltage.entries[count].voltage = vddc;
+			si_pi->leakage_voltage.entries[count].leakage_index =
+				SISLANDS_LEAKAGE_INDEX0 + i;
+			count++;
+		}
+	}
+	si_pi->leakage_voltage.count = count;
+}
+
+static int si_get_leakage_voltage_from_leakage_index(struct radeon_device *rdev,
+						     u32 index, u16 *leakage_voltage)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	int i;
+
+	if (leakage_voltage == NULL)
+		return -EINVAL;
+
+	if ((index & 0xff00) != 0xff00)
+		return -EINVAL;
+
+	if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
+		return -EINVAL;
+
+	if (index < SISLANDS_LEAKAGE_INDEX0)
+		return -EINVAL;
+
+	for (i = 0; i < si_pi->leakage_voltage.count; i++) {
+		if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
+			*leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
+			return 0;
+		}
+	}
+	return -EAGAIN;
+}
+
+static void si_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	bool want_thermal_protection;
+	enum radeon_dpm_event_src dpm_event_src;
+
+	switch (sources) {
+	case 0:
+	default:
+		want_thermal_protection = false;
+		break;
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+		want_thermal_protection = true;
+		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+		break;
+	}
+
+	if (want_thermal_protection) {
+		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+		if (pi->thermal_protection)
+			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	} else {
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+	}
+}
+
+static void si_enable_auto_throttle_source(struct radeon_device *rdev,
+					   enum radeon_dpm_auto_throttle_src source,
+					   bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (enable) {
+		if (!(pi->active_auto_throttle_sources & (1 << source))) {
+			pi->active_auto_throttle_sources |= 1 << source;
+			si_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	} else {
+		if (pi->active_auto_throttle_sources & (1 << source)) {
+			pi->active_auto_throttle_sources &= ~(1 << source);
+			si_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+		}
+	}
+}
+
+static void si_start_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_stop_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_enable_sclk_control(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+}
+
+#if 0
+static int si_notify_hardware_of_thermal_state(struct radeon_device *rdev,
+					       u32 thermal_level)
+{
+	PPSMC_Result ret;
+
+	if (thermal_level == 0) {
+		ret = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+		if (ret == PPSMC_Result_OK)
+			return 0;
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static void si_notify_hardware_vpu_recovery_event(struct radeon_device *rdev)
+{
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
+}
+#endif
+
+#if 0
+static int si_notify_hw_of_powersource(struct radeon_device *rdev, bool ac_power)
+{
+	if (ac_power)
+		return (si_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+			0 : -EINVAL;
+
+	return 0;
+}
+#endif
+
+static PPSMC_Result si_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+						      PPSMC_Msg msg, u32 parameter)
+{
+	WREG32(SMC_SCRATCH0, parameter);
+	return si_send_msg_to_smc(rdev, msg);
+}
+
+static int si_restrict_performance_levels_before_switch(struct radeon_device *rdev)
+{
+	if (si_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+int si_dpm_force_performance_level(struct radeon_device *rdev,
+				   enum radeon_dpm_forced_level level)
+{
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct ni_ps *ps = ni_get_ps(rps);
+	u32 levels = ps->performance_level_count;
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+		if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+#if 0
+static int si_set_boot_state(struct radeon_device *rdev)
+{
+	return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+#endif
+
+static int si_set_sw_state(struct radeon_device *rdev)
+{
+	return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_halt_smc(struct radeon_device *rdev)
+{
+	if (si_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (si_wait_for_smc_inactive(rdev) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_resume_smc(struct radeon_device *rdev)
+{
+	if (si_send_msg_to_smc(rdev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (si_send_msg_to_smc(rdev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static void si_dpm_start_smc(struct radeon_device *rdev)
+{
+	si_program_jump_on_start(rdev);
+	si_start_smc(rdev);
+	si_start_smc_clock(rdev);
+}
+
+static void si_dpm_stop_smc(struct radeon_device *rdev)
+{
+	si_reset_smc(rdev);
+	si_stop_smc_clock(rdev);
+}
+
+static int si_process_firmware_header(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->state_table_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->soft_regs_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->mc_reg_table_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->fan_table_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->arb_table_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->cac_table_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->dte_table_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->spll_table_start = tmp;
+
+	ret = si_read_smc_sram_dword(rdev,
+				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+				     SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->papm_cfg_table_start = tmp;
+
+	return ret;
+}
+
+static void si_read_clock_registers(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+	si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+	si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+	si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+	si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+	si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+	si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+	si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+	si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+	si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+	si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+	si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+	si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+	si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+	si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void si_enable_thermal_protection(struct radeon_device *rdev,
+					  bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	else
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+static void si_enable_acpi_power_management(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+#if 0
+static int si_enter_ulp_state(struct radeon_device *rdev)
+{
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+	udelay(25000);
+
+	return 0;
+}
+
+static int si_exit_ulp_state(struct radeon_device *rdev)
+{
+	int i;
+
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+	udelay(7000);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(SMC_RESP_0) == 1)
+			break;
+		udelay(1000);
+	}
+
+	return 0;
+}
+#endif
+
+static int si_notify_smc_display_change(struct radeon_device *rdev,
+				     bool has_display)
+{
+	PPSMC_Msg msg = has_display ?
+		PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+	return (si_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static void si_program_response_times(struct radeon_device *rdev)
+{
+	u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+	u32 vddc_dly, acpi_dly, vbi_dly;
+	u32 reference_clock;
+
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+	voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
+	backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
+
+	if (voltage_response_time == 0)
+		voltage_response_time = 1000;
+
+	acpi_delay_time = 15000;
+	vbi_time_out = 100000;
+
+	reference_clock = radeon_get_xclk(rdev);
+
+	vddc_dly = (voltage_response_time  * reference_clock) / 100;
+	acpi_dly = (acpi_delay_time * reference_clock) / 100;
+	vbi_dly  = (vbi_time_out * reference_clock) / 100;
+
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+}
+
+static void si_program_ds_registers(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	u32 tmp = 1; /* XXX: 0x10 on tahiti A0 */
+
+	if (eg_pi->sclk_deep_sleep) {
+		WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
+		WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
+			 ~AUTOSCALE_ON_SS_CLEAR);
+	}
+}
+
+static void si_program_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp, pipe;
+	int i;
+
+	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	if (rdev->pm.dpm.new_active_crtc_count > 0)
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	if (rdev->pm.dpm.new_active_crtc_count > 1)
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+	if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
+	    (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+		/* find the first active crtc */
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.dpm.new_active_crtcs & (1 << i))
+				break;
+		}
+		if (i == rdev->num_crtc)
+			pipe = 0;
+		else
+			pipe = i;
+
+		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+	}
+
+	/* Setting this to false forces the performance state to low if the crtcs are disabled.
+	 * This can be a problem on PowerXpress systems or if you want to use the card
+	 * for offscreen rendering or compute if there are no crtcs enabled.
+	 */
+	si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
+}
+
+static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	if (enable) {
+		if (pi->sclk_ss)
+			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+	} else {
+		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+	}
+}
+
+static void si_setup_bsp(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 xclk = radeon_get_xclk(rdev);
+
+	r600_calculate_u_and_p(pi->asi,
+			       xclk,
+			       16,
+			       &pi->bsp,
+			       &pi->bsu);
+
+	r600_calculate_u_and_p(pi->pasi,
+			       xclk,
+			       16,
+			       &pi->pbsp,
+			       &pi->pbsu);
+
+
+	pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+	WREG32(CG_BSP, pi->dsp);
+}
+
+static void si_program_git(struct radeon_device *rdev)
+{
+	WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+static void si_program_tp(struct radeon_device *rdev)
+{
+	int i;
+	enum r600_td td = R600_TD_DFLT;
+
+	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+		WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+	if (td == R600_TD_AUTO)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+	if (td == R600_TD_UP)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+	if (td == R600_TD_DOWN)
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+static void si_program_tpp(struct radeon_device *rdev)
+{
+	WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+static void si_program_sstp(struct radeon_device *rdev)
+{
+	WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void si_enable_display_gap(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void si_program_vc(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	WREG32(CG_FTV, pi->vrc);
+}
+
+static void si_clear_vc(struct radeon_device *rdev)
+{
+	WREG32(CG_FTV, 0);
+}
+
+u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+	u8 mc_para_index;
+
+	if (memory_clock < 10000)
+		mc_para_index = 0;
+	else if (memory_clock >= 80000)
+		mc_para_index = 0x0f;
+	else
+		mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+	return mc_para_index;
+}
+
+u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+	u8 mc_para_index;
+
+	if (strobe_mode) {
+		if (memory_clock < 12500)
+			mc_para_index = 0x00;
+		else if (memory_clock > 47500)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 10000) / 2500);
+	} else {
+		if (memory_clock < 65000)
+			mc_para_index = 0x00;
+		else if (memory_clock > 135000)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 60000) / 5000);
+	}
+	return mc_para_index;
+}
+
+static u8 si_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	bool strobe_mode = false;
+	u8 result = 0;
+
+	if (mclk <= pi->mclk_strobe_mode_threshold)
+		strobe_mode = true;
+
+	if (pi->mem_gddr5)
+		result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
+	else
+		result = si_get_ddr3_mclk_frequency_ratio(mclk);
+
+	if (strobe_mode)
+		result |= SISLANDS_SMC_STROBE_ENABLE;
+
+	return result;
+}
+
+static int si_upload_firmware(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	int ret;
+
+	si_reset_smc(rdev);
+	si_stop_smc_clock(rdev);
+
+	ret = si_load_smc_ucode(rdev, si_pi->sram_end);
+
+	return ret;
+}
+
+static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
+					      const struct atom_voltage_table *table,
+					      const struct radeon_phase_shedding_limits_table *limits)
+{
+	u32 data, num_bits, num_levels;
+
+	if ((table == NULL) || (limits == NULL))
+		return false;
+
+	data = table->mask_low;
+
+	num_bits = hweight32(data);
+
+	if (num_bits == 0)
+		return false;
+
+	num_levels = (1 << num_bits);
+
+	if (table->count != num_levels)
+		return false;
+
+	if (limits->count != (num_levels - 1))
+		return false;
+
+	return true;
+}
+
+void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+					      u32 max_voltage_steps,
+					      struct atom_voltage_table *voltage_table)
+{
+	unsigned int i, diff;
+
+	if (voltage_table->count <= max_voltage_steps)
+		return;
+
+	diff = voltage_table->count - max_voltage_steps;
+
+	for (i= 0; i < max_voltage_steps; i++)
+		voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+	voltage_table->count = max_voltage_steps;
+}
+
+static int si_get_svi2_voltage_table(struct radeon_device *rdev,
+				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+				     struct atom_voltage_table *voltage_table)
+{
+	u32 i;
+
+	if (voltage_dependency_table == NULL)
+		return -EINVAL;
+
+	voltage_table->mask_low = 0;
+	voltage_table->phase_delay = 0;
+
+	voltage_table->count = voltage_dependency_table->count;
+	for (i = 0; i < voltage_table->count; i++) {
+		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+		voltage_table->entries[i].smio_low = 0;
+	}
+
+	return 0;
+}
+
+static int si_construct_voltage_tables(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	int ret;
+
+	if (pi->voltage_control) {
+		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+
+		if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(rdev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &eg_pi->vddc_voltage_table);
+	} else if (si_pi->voltage_control_svi2) {
+		ret = si_get_svi2_voltage_table(rdev,
+						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						&eg_pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+	} else {
+		return -EINVAL;
+	}
+
+	if (eg_pi->vddci_control) {
+		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
+						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+
+		if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(rdev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &eg_pi->vddci_voltage_table);
+	}
+	if (si_pi->vddci_control_svi2) {
+		ret = si_get_svi2_voltage_table(rdev,
+						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						&eg_pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->mvdd_control) {
+		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
+						    VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
+
+		if (ret) {
+			pi->mvdd_control = false;
+			return ret;
+		}
+
+		if (si_pi->mvdd_voltage_table.count == 0) {
+			pi->mvdd_control = false;
+			return -EINVAL;
+		}
+
+		if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(rdev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &si_pi->mvdd_voltage_table);
+	}
+
+	if (si_pi->vddc_phase_shed_control) {
+		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+						    VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
+		if (ret)
+			si_pi->vddc_phase_shed_control = false;
+
+		if ((si_pi->vddc_phase_shed_table.count == 0) ||
+		    (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
+			si_pi->vddc_phase_shed_control = false;
+	}
+
+	return 0;
+}
+
+static void si_populate_smc_voltage_table(struct radeon_device *rdev,
+					  const struct atom_voltage_table *voltage_table,
+					  SISLANDS_SMC_STATETABLE *table)
+{
+	unsigned int i;
+
+	for (i = 0; i < voltage_table->count; i++)
+		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+}
+
+static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
+					  SISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u8 i;
+
+	if (si_pi->voltage_control_svi2) {
+		si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+			si_pi->svc_gpio_id);
+		si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+			si_pi->svd_gpio_id);
+		si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+					   2);
+	} else {
+		if (eg_pi->vddc_voltage_table.count) {
+			si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+				cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+			for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+				if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+					table->maxVDDCIndexInPPTable = i;
+					break;
+				}
+			}
+		}
+
+		if (eg_pi->vddci_voltage_table.count) {
+			si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
+
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+				cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+		}
+
+
+		if (si_pi->mvdd_voltage_table.count) {
+			si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
+
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+				cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+		}
+
+		if (si_pi->vddc_phase_shed_control) {
+			if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
+							      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+				si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
+
+				table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
+					cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+
+				si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+							   (u32)si_pi->vddc_phase_shed_table.phase_delay);
+			} else {
+				si_pi->vddc_phase_shed_control = false;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int si_populate_voltage_value(struct radeon_device *rdev,
+				     const struct atom_voltage_table *table,
+				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++) {
+		if (value <= table->entries[i].value) {
+			voltage->index = (u8)i;
+			voltage->value = cpu_to_be16(table->entries[i].value);
+			break;
+		}
+	}
+
+	if (i >= table->count)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int si_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
+				  SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	if (pi->mvdd_control) {
+		if (mclk <= pi->mvdd_split_frequency)
+			voltage->index = 0;
+		else
+			voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
+
+		voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
+	}
+	return 0;
+}
+
+static int si_get_std_voltage_value(struct radeon_device *rdev,
+				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+				    u16 *std_voltage)
+{
+	u16 v_index;
+	bool voltage_found = false;
+	*std_voltage = be16_to_cpu(voltage->value);
+
+	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
+			if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+				return -EINVAL;
+
+			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+				if (be16_to_cpu(voltage->value) ==
+				    (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+					voltage_found = true;
+					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+						*std_voltage =
+							rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+					else
+						*std_voltage =
+							rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+					break;
+				}
+			}
+
+			if (!voltage_found) {
+				for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+					if (be16_to_cpu(voltage->value) <=
+					    (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+						voltage_found = true;
+						if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+							*std_voltage =
+								rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+						else
+							*std_voltage =
+								rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+						break;
+					}
+				}
+			}
+		} else {
+			if ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+				*std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+		}
+	}
+
+	return 0;
+}
+
+static int si_populate_std_voltage_value(struct radeon_device *rdev,
+					 u16 value, u8 index,
+					 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	voltage->index = index;
+	voltage->value = cpu_to_be16(value);
+
+	return 0;
+}
+
+static int si_populate_phase_shedding_value(struct radeon_device *rdev,
+					    const struct radeon_phase_shedding_limits_table *limits,
+					    u16 voltage, u32 sclk, u32 mclk,
+					    SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < limits->count; i++) {
+		if ((voltage <= limits->entries[i].voltage) &&
+		    (sclk <= limits->entries[i].sclk) &&
+		    (mclk <= limits->entries[i].mclk))
+			break;
+	}
+
+	smc_voltage->phase_settings = (u8)i;
+
+	return 0;
+}
+
+static int si_init_arb_table_index(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = si_read_smc_sram_dword(rdev, si_pi->arb_table_start, &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+	return si_write_smc_sram_dword(rdev, si_pi->arb_table_start,  tmp, si_pi->sram_end);
+}
+
+static int si_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
+{
+	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int si_reset_to_default(struct radeon_device *rdev)
+{
+	return (si_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 tmp;
+	int ret;
+
+	ret = si_read_smc_sram_dword(rdev, si_pi->arb_table_start,
+				     &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp = (tmp >> 24) & 0xff;
+
+	if (tmp == MC_CG_ARB_FREQ_F0)
+		return 0;
+
+	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
+					    u32 engine_clock)
+{
+	u32 dram_rows;
+	u32 dram_refresh_rate;
+	u32 mc_arb_rfsh_rate;
+	u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+
+	if (tmp >= 4)
+		dram_rows = 16384;
+	else
+		dram_rows = 1 << (tmp + 10);
+
+	dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
+	mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+	return mc_arb_rfsh_rate;
+}
+
+static int si_populate_memory_timing_parameters(struct radeon_device *rdev,
+						struct rv7xx_pl *pl,
+						SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+	u32 dram_timing;
+	u32 dram_timing2;
+	u32 burst_time;
+
+	arb_regs->mc_arb_rfsh_rate =
+		(u8)si_calculate_memory_refresh_rate(rdev, pl->sclk);
+
+	radeon_atom_set_engine_dram_timings(rdev,
+					    pl->sclk,
+					    pl->mclk);
+
+	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+	arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
+	arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+	arb_regs->mc_arb_burst_time = (u8)burst_time;
+
+	return 0;
+}
+
+static int si_do_program_memory_timing_parameters(struct radeon_device *rdev,
+						  struct radeon_ps *radeon_state,
+						  unsigned int first_arb_set)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+	int i, ret = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		ret = si_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
+		if (ret)
+			break;
+		ret = si_copy_bytes_to_smc(rdev,
+					   si_pi->arb_table_start +
+					   offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+					   sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
+					   (u8 *)&arb_regs,
+					   sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+					   si_pi->sram_end);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int si_program_memory_timing_parameters(struct radeon_device *rdev,
+					       struct radeon_ps *radeon_new_state)
+{
+	return si_do_program_memory_timing_parameters(rdev, radeon_new_state,
+						      SISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static int si_populate_initial_mvdd_value(struct radeon_device *rdev,
+					  struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	if (pi->mvdd_control)
+		return si_populate_voltage_value(rdev, &si_pi->mvdd_voltage_table,
+						 si_pi->mvdd_bootup_value, voltage);
+
+	return 0;
+}
+
+static int si_populate_smc_initial_state(struct radeon_device *rdev,
+					 struct radeon_ps *radeon_initial_state,
+					 SISLANDS_SMC_STATETABLE *table)
+{
+	struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 reg;
+	int ret;
+
+	table->initialState.levels[0].mclk.vDLL_CNTL =
+		cpu_to_be32(si_pi->clock_registers.dll_cntl);
+	table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
+	table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
+	table->initialState.levels[0].mclk.vMPLL_SS =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+	table->initialState.levels[0].mclk.vMPLL_SS2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+	table->initialState.levels[0].mclk.mclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
+
+	table->initialState.levels[0].sclk.sclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].sclk);
+
+	table->initialState.levels[0].arbRefreshState =
+		SISLANDS_INITIAL_STATE_ARB_INDEX;
+
+	table->initialState.levels[0].ACIndex = 0;
+
+	ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+					initial_state->performance_levels[0].vddc,
+					&table->initialState.levels[0].vddc);
+
+	if (!ret) {
+		u16 std_vddc;
+
+		ret = si_get_std_voltage_value(rdev,
+					       &table->initialState.levels[0].vddc,
+					       &std_vddc);
+		if (!ret)
+			si_populate_std_voltage_value(rdev, std_vddc,
+						      table->initialState.levels[0].vddc.index,
+						      &table->initialState.levels[0].std_vddc);
+	}
+
+	if (eg_pi->vddci_control)
+		si_populate_voltage_value(rdev,
+					  &eg_pi->vddci_voltage_table,
+					  initial_state->performance_levels[0].vddci,
+					  &table->initialState.levels[0].vddci);
+
+	if (si_pi->vddc_phase_shed_control)
+		si_populate_phase_shedding_value(rdev,
+						 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						 initial_state->performance_levels[0].vddc,
+						 initial_state->performance_levels[0].sclk,
+						 initial_state->performance_levels[0].mclk,
+						 &table->initialState.levels[0].vddc);
+
+	si_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+
+	reg = CG_R(0xffff) | CG_L(0);
+	table->initialState.levels[0].aT = cpu_to_be32(reg);
+
+	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+
+	table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+
+	if (pi->mem_gddr5) {
+		table->initialState.levels[0].strobeMode =
+			si_get_strobe_mode_settings(rdev,
+						    initial_state->performance_levels[0].mclk);
+
+		if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+			table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+		else
+			table->initialState.levels[0].mcFlags =  0;
+	}
+
+	table->initialState.levelCount = 1;
+
+	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	table->initialState.levels[0].dpm2.MaxPS = 0;
+	table->initialState.levels[0].dpm2.NearTDPDec = 0;
+	table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+	table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+	table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int si_populate_smc_acpi_state(struct radeon_device *rdev,
+				      SISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+	u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+	u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+	u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+	u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+	u32 reg;
+	int ret;
+
+	table->ACPIState = table->initialState;
+
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+						pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = si_get_std_voltage_value(rdev,
+						       &table->ACPIState.levels[0].vddc, &std_vddc);
+			if (!ret)
+				si_populate_std_voltage_value(rdev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+		table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+
+		if (si_pi->vddc_phase_shed_control) {
+			si_populate_phase_shedding_value(rdev,
+							 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+							 pi->acpi_vddc,
+							 0,
+							 0,
+							 &table->ACPIState.levels[0].vddc);
+		}
+	} else {
+		ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
+						pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = si_get_std_voltage_value(rdev,
+						       &table->ACPIState.levels[0].vddc, &std_vddc);
+
+			if (!ret)
+				si_populate_std_voltage_value(rdev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+		table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
+										    si_pi->sys_pcie_mask,
+										    si_pi->boot_pcie_gen,
+										    RADEON_PCIE_GEN1);
+
+		if (si_pi->vddc_phase_shed_control)
+			si_populate_phase_shedding_value(rdev,
+							 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+							 pi->min_vddc_in_table,
+							 0,
+							 0,
+							 &table->ACPIState.levels[0].vddc);
+	}
+
+	if (pi->acpi_vddc) {
+		if (eg_pi->acpi_vddci)
+			si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
+						  eg_pi->acpi_vddci,
+						  &table->ACPIState.levels[0].vddci);
+	}
+
+	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.vDLL_CNTL =
+		cpu_to_be32(dll_cntl);
+	table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(mpll_ad_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(mpll_dq_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+		cpu_to_be32(mpll_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+		cpu_to_be32(mpll_func_cntl_1);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+		cpu_to_be32(mpll_func_cntl_2);
+	table->ACPIState.levels[0].mclk.vMPLL_SS =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+	table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(spll_func_cntl_3);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+		cpu_to_be32(spll_func_cntl_4);
+
+	table->ACPIState.levels[0].mclk.mclk_value = 0;
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	si_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+
+	if (eg_pi->dynamic_ac_timing)
+		table->ACPIState.levels[0].ACIndex = 0;
+
+	table->ACPIState.levels[0].dpm2.MaxPS = 0;
+	table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+	table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+	table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+	table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int si_populate_ulv_state(struct radeon_device *rdev,
+				 SISLANDS_SMC_SWSTATE *state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	u32 sclk_in_sr = 1350; /* ??? */
+	int ret;
+
+	ret = si_convert_power_level_to_smc(rdev, &ulv->pl,
+					    &state->levels[0]);
+	if (!ret) {
+		if (eg_pi->sclk_deep_sleep) {
+			if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+			else
+				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+		}
+		if (ulv->one_pcie_lane_in_ulv)
+			state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
+		state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+		state->levels[0].ACIndex = 1;
+		state->levels[0].std_vddc = state->levels[0].vddc;
+		state->levelCount = 1;
+
+		state->flags |= PPSMC_SWSTATE_FLAG_DC;
+	}
+
+	return ret;
+}
+
+static int si_program_ulv_memory_timing_parameters(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+	int ret;
+
+	ret = si_populate_memory_timing_parameters(rdev, &ulv->pl,
+						   &arb_regs);
+	if (ret)
+		return ret;
+
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
+				   ulv->volt_change_delay);
+
+	ret = si_copy_bytes_to_smc(rdev,
+				   si_pi->arb_table_start +
+				   offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+				   sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
+				   (u8 *)&arb_regs,
+				   sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+				   si_pi->sram_end);
+
+	return ret;
+}
+
+static void si_get_mvdd_configuration(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+
+	pi->mvdd_split_frequency = 30000;
+}
+
+static int si_init_smc_table(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+	SISLANDS_SMC_STATETABLE  *table = &si_pi->smc_statetable;
+	int ret;
+	u32 lane_width;
+	u32 vr_hot_gpio;
+
+	si_populate_smc_voltage_tables(rdev, table);
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_SI:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+		break;
+	case THERMAL_TYPE_NONE:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+		break;
+	default:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+		break;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
+		if ((rdev->pdev->device != 0x6818) && (rdev->pdev->device != 0x6819))
+			table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+	}
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (pi->mem_gddr5)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
+		table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+
+	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
+		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
+		vr_hot_gpio = rdev->pm.dpm.backbias_response_time;
+		si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
+					   vr_hot_gpio);
+	}
+
+	ret = si_populate_smc_initial_state(rdev, radeon_boot_state, table);
+	if (ret)
+		return ret;
+
+	ret = si_populate_smc_acpi_state(rdev, table);
+	if (ret)
+		return ret;
+
+	table->driverState = table->initialState;
+
+	ret = si_do_program_memory_timing_parameters(rdev, radeon_boot_state,
+						     SISLANDS_INITIAL_STATE_ARB_INDEX);
+	if (ret)
+		return ret;
+
+	if (ulv->supported && ulv->pl.vddc) {
+		ret = si_populate_ulv_state(rdev, &table->ULVState);
+		if (ret)
+			return ret;
+
+		ret = si_program_ulv_memory_timing_parameters(rdev);
+		if (ret)
+			return ret;
+
+		WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
+		WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+
+		lane_width = radeon_get_pcie_lanes(rdev);
+		si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+	} else {
+		table->ULVState = table->initialState;
+	}
+
+	return si_copy_bytes_to_smc(rdev, si_pi->state_table_start,
+				    (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
+				    si_pi->sram_end);
+}
+
+static int si_calculate_sclk_params(struct radeon_device *rdev,
+				    u32 engine_clock,
+				    SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
+	u64 tmp;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+
+	tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+	do_div(tmp, reference_clock);
+	fbdiv = (u32) tmp;
+
+	spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+	spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->sclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 vco_freq = engine_clock * dividers.post_div;
+
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLK_S_MASK;
+			cg_spll_spread_spectrum |= CLK_S(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+		}
+	}
+
+	sclk->sclk_value = engine_clock;
+	sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+	sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+	sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+	sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+	return 0;
+}
+
+static int si_populate_sclk_value(struct radeon_device *rdev,
+				  u32 engine_clock,
+				  SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	SISLANDS_SMC_SCLK_VALUE sclk_tmp;
+	int ret;
+
+	ret = si_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
+	if (!ret) {
+		sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+		sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+		sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+		sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+		sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+	}
+
+	return ret;
+}
+
+static int si_populate_mclk_value(struct radeon_device *rdev,
+				  u32 engine_clock,
+				  u32 memory_clock,
+				  SISLANDS_SMC_MCLK_VALUE *mclk,
+				  bool strobe_mode,
+				  bool dll_state_on)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32  dll_cntl = si_pi->clock_registers.dll_cntl;
+	u32  mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32  mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+	u32  mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+	u32  mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+	u32  mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+	u32  mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+	u32  mpll_ss1 = si_pi->clock_registers.mpll_ss1;
+	u32  mpll_ss2 = si_pi->clock_registers.mpll_ss2;
+	struct atom_mpll_param mpll_param;
+	int ret;
+
+	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
+	if (ret)
+		return ret;
+
+	mpll_func_cntl &= ~BWCTRL_MASK;
+	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+	if (pi->mem_gddr5) {
+		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+			YCLK_POST_DIV(mpll_param.post_div);
+	}
+
+	if (pi->mclk_ss) {
+		struct radeon_atom_ss ss;
+		u32 freq_nom;
+		u32 tmp;
+		u32 reference_clock = rdev->clock.mpll.reference_freq;
+
+		if (pi->mem_gddr5)
+			freq_nom = memory_clock * 4;
+		else
+			freq_nom = memory_clock * 2;
+
+		tmp = freq_nom / reference_clock;
+		tmp = tmp * tmp;
+		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+			u32 clks = reference_clock * 5 / ss.rate;
+			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+			mpll_ss1 &= ~CLKV_MASK;
+			mpll_ss1 |= CLKV(clkv);
+
+			mpll_ss2 &= ~CLKS_MASK;
+			mpll_ss2 |= CLKS(clks);
+		}
+	}
+
+	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+	if (dll_state_on)
+		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+	else
+		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	mclk->mclk_value = cpu_to_be32(memory_clock);
+	mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+	mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
+	mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
+	mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+	mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+	mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+	return 0;
+}
+
+static void si_populate_smc_sp(struct radeon_device *rdev,
+			       struct radeon_ps *radeon_state,
+			       SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct ni_ps *ps = ni_get_ps(radeon_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	int i;
+
+	for (i = 0; i < ps->performance_level_count - 1; i++)
+		smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+	smc_state->levels[ps->performance_level_count - 1].bSP =
+		cpu_to_be32(pi->psp);
+}
+
+static int si_convert_power_level_to_smc(struct radeon_device *rdev,
+					 struct rv7xx_pl *pl,
+					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	int ret;
+	bool dll_state_on;
+	u16 std_vddc;
+	bool gmc_pg = false;
+
+	if (eg_pi->pcie_performance_request &&
+	    (si_pi->force_pcie_gen != RADEON_PCIE_GEN_INVALID))
+		level->gen2PCIE = (u8)si_pi->force_pcie_gen;
+	else
+		level->gen2PCIE = (u8)pl->pcie_gen;
+
+	ret = si_populate_sclk_value(rdev, pl->sclk, &level->sclk);
+	if (ret)
+		return ret;
+
+	level->mcFlags =  0;
+
+	if (pi->mclk_stutter_mode_threshold &&
+	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+	    !eg_pi->uvd_enabled &&
+	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+	    (rdev->pm.dpm.new_active_crtc_count <= 2)) {
+		level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+		if (gmc_pg)
+			level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
+	}
+
+	if (pi->mem_gddr5) {
+		if (pl->mclk > pi->mclk_edc_enable_threshold)
+			level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
+
+		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+			level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
+
+		level->strobeMode = si_get_strobe_mode_settings(rdev, pl->mclk);
+
+		if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
+			if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
+			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+			else
+				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+		} else {
+			dll_state_on = false;
+		}
+	} else {
+		level->strobeMode = si_get_strobe_mode_settings(rdev,
+								pl->mclk);
+
+		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+	}
+
+	ret = si_populate_mclk_value(rdev,
+				     pl->sclk,
+				     pl->mclk,
+				     &level->mclk,
+				     (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
+	if (ret)
+		return ret;
+
+	ret = si_populate_voltage_value(rdev,
+					&eg_pi->vddc_voltage_table,
+					pl->vddc, &level->vddc);
+	if (ret)
+		return ret;
+
+
+	ret = si_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
+	if (ret)
+		return ret;
+
+	ret = si_populate_std_voltage_value(rdev, std_vddc,
+					    level->vddc.index, &level->std_vddc);
+	if (ret)
+		return ret;
+
+	if (eg_pi->vddci_control) {
+		ret = si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
+						pl->vddci, &level->vddci);
+		if (ret)
+			return ret;
+	}
+
+	if (si_pi->vddc_phase_shed_control) {
+		ret = si_populate_phase_shedding_value(rdev,
+						       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						       pl->vddc,
+						       pl->sclk,
+						       pl->mclk,
+						       &level->vddc);
+		if (ret)
+			return ret;
+	}
+
+	level->MaxPoweredUpCU = si_pi->max_cu;
+
+	ret = si_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
+
+	return ret;
+}
+
+static int si_populate_smc_t(struct radeon_device *rdev,
+			     struct radeon_ps *radeon_state,
+			     SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	u32 a_t;
+	u32 t_l, t_h;
+	u32 high_bsp;
+	int i, ret;
+
+	if (state->performance_level_count >= 9)
+		return -EINVAL;
+
+	if (state->performance_level_count < 2) {
+		a_t = CG_R(0xffff) | CG_L(0);
+		smc_state->levels[0].aT = cpu_to_be32(a_t);
+		return 0;
+	}
+
+	smc_state->levels[0].aT = cpu_to_be32(0);
+
+	for (i = 0; i <= state->performance_level_count - 2; i++) {
+		ret = r600_calculate_at(
+			(50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
+			100 * R600_AH_DFLT,
+			state->performance_levels[i + 1].sclk,
+			state->performance_levels[i].sclk,
+			&t_l,
+			&t_h);
+
+		if (ret) {
+			t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+			t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+		}
+
+		a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+		a_t |= CG_R(t_l * pi->bsp / 20000);
+		smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+		high_bsp = (i == state->performance_level_count - 2) ?
+			pi->pbsp : pi->bsp;
+		a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+		smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+	}
+
+	return 0;
+}
+
+static int si_disable_ulv(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+
+	if (ulv->supported)
+		return (si_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+			0 : -EINVAL;
+
+	return 0;
+}
+
+static bool si_is_state_ulv_compatible(struct radeon_device *rdev,
+				       struct radeon_ps *radeon_state)
+{
+	const struct si_power_info *si_pi = si_get_pi(rdev);
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+	const struct ni_ps *state = ni_get_ps(radeon_state);
+	int i;
+
+	if (state->performance_levels[0].mclk != ulv->pl.mclk)
+		return false;
+
+	/* XXX validate against display requirements! */
+
+	for (i = 0; i < rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+		if (rdev->clock.current_dispclk <=
+		    rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+			if (ulv->pl.vddc <
+			    rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+				return false;
+		}
+	}
+
+	if ((radeon_state->vclk != 0) || (radeon_state->dclk != 0))
+		return false;
+
+	return true;
+}
+
+static int si_set_power_state_conditionally_enable_ulv(struct radeon_device *rdev,
+						       struct radeon_ps *radeon_new_state)
+{
+	const struct si_power_info *si_pi = si_get_pi(rdev);
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+
+	if (ulv->supported) {
+		if (si_is_state_ulv_compatible(rdev, radeon_new_state))
+			return (si_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+	}
+	return 0;
+}
+
+static int si_convert_power_state_to_smc(struct radeon_device *rdev,
+					 struct radeon_ps *radeon_state,
+					 SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	int i, ret;
+	u32 threshold;
+	u32 sclk_in_sr = 1350; /* ??? */
+
+	if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
+		return -EINVAL;
+
+	threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
+
+	if (radeon_state->vclk && radeon_state->dclk) {
+		eg_pi->uvd_enabled = true;
+		if (eg_pi->smu_uvd_hs)
+			smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
+	} else {
+		eg_pi->uvd_enabled = false;
+	}
+
+	if (state->dc_compatible)
+		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	smc_state->levelCount = 0;
+	for (i = 0; i < state->performance_level_count; i++) {
+		if (eg_pi->sclk_deep_sleep) {
+			if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
+				if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+				else
+					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+			}
+		}
+
+		ret = si_convert_power_level_to_smc(rdev, &state->performance_levels[i],
+						    &smc_state->levels[i]);
+		smc_state->levels[i].arbRefreshState =
+			(u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+		if (ret)
+			return ret;
+
+		if (ni_pi->enable_power_containment)
+			smc_state->levels[i].displayWatermark =
+				(state->performance_levels[i].sclk < threshold) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+		else
+			smc_state->levels[i].displayWatermark = (i < 2) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+		if (eg_pi->dynamic_ac_timing)
+			smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+		else
+			smc_state->levels[i].ACIndex = 0;
+
+		smc_state->levelCount++;
+	}
+
+	si_write_smc_soft_register(rdev,
+				   SI_SMC_SOFT_REGISTER_watermark_threshold,
+				   threshold / 512);
+
+	si_populate_smc_sp(rdev, radeon_state, smc_state);
+
+	ret = si_populate_power_containment_values(rdev, radeon_state, smc_state);
+	if (ret)
+		ni_pi->enable_power_containment = false;
+
+	ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state);
+	if (ret)
+		ni_pi->enable_sq_ramping = false;
+
+	return si_populate_smc_t(rdev, radeon_state, smc_state);
+}
+
+static int si_upload_sw_state(struct radeon_device *rdev,
+			      struct radeon_ps *radeon_new_state)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct ni_ps *new_state = ni_get_ps(radeon_new_state);
+	int ret;
+	u32 address = si_pi->state_table_start +
+		offsetof(SISLANDS_SMC_STATETABLE, driverState);
+	u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+		((new_state->performance_level_count - 1) *
+		 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+	SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
+
+	memset(smc_state, 0, state_size);
+
+	ret = si_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
+	if (ret)
+		return ret;
+
+	ret = si_copy_bytes_to_smc(rdev, address, (u8 *)smc_state,
+				   state_size, si_pi->sram_end);
+
+	return ret;
+}
+
+static int si_upload_ulv_state(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	int ret = 0;
+
+	if (ulv->supported && ulv->pl.vddc) {
+		u32 address = si_pi->state_table_start +
+			offsetof(SISLANDS_SMC_STATETABLE, ULVState);
+		SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
+		u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+
+		memset(smc_state, 0, state_size);
+
+		ret = si_populate_ulv_state(rdev, smc_state);
+		if (!ret)
+			ret = si_copy_bytes_to_smc(rdev, address, (u8 *)smc_state,
+						   state_size, si_pi->sram_end);
+	}
+
+	return ret;
+}
+
+static int si_upload_smc_data(struct radeon_device *rdev)
+{
+	struct radeon_crtc *radeon_crtc = NULL;
+	int i;
+
+	if (rdev->pm.dpm.new_active_crtc_count == 0)
+		return 0;
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->pm.dpm.new_active_crtcs & (1 << i)) {
+			radeon_crtc = rdev->mode_info.crtcs[i];
+			break;
+		}
+	}
+
+	if (radeon_crtc == NULL)
+		return 0;
+
+	if (radeon_crtc->line_time <= 0)
+		return 0;
+
+	if (si_write_smc_soft_register(rdev,
+				       SI_SMC_SOFT_REGISTER_crtc_index,
+				       radeon_crtc->crtc_id) != PPSMC_Result_OK)
+		return 0;
+
+	if (si_write_smc_soft_register(rdev,
+				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+				       radeon_crtc->wm_high / radeon_crtc->line_time) != PPSMC_Result_OK)
+		return 0;
+
+	if (si_write_smc_soft_register(rdev,
+				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+				       radeon_crtc->wm_low / radeon_crtc->line_time) != PPSMC_Result_OK)
+		return 0;
+
+	return 0;
+}
+
+static int si_set_mc_special_registers(struct radeon_device *rdev,
+				       struct si_mc_reg_table *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u8 i, j, k;
+	u32 temp_reg;
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+			return -EINVAL;
+		switch (table->mc_reg_address[i].s1 << 2) {
+		case MC_SEQ_MISC1:
+			temp_reg = RREG32(MC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+			for (k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			temp_reg = RREG32(MC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+				if (!pi->mem_gddr5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			if (!pi->mem_gddr5) {
+				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
+				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
+				for (k = 0; k < table->num_entries; k++)
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				j++;
+				if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+					return -EINVAL;
+			}
+			break;
+		case MC_SEQ_RESERVE_M:
+			temp_reg = RREG32(MC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+			for(k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		default:
+			break;
+		}
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+	bool result = true;
+
+	switch (in_reg) {
+	case  MC_SEQ_RAS_TIMING >> 2:
+		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_CAS_TIMING >> 2:
+		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
+		break;
+	case MC_SEQ_MISC_TIMING2 >> 2:
+		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_RD_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D0 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_D1 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
+		break;
+	case MC_PMG_CMD_EMRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS1 >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+		break;
+	case MC_SEQ_PMG_TIMING >> 2:
+		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
+		break;
+	case MC_PMG_CMD_MRS2 >> 2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
+		break;
+	case MC_SEQ_WR_CTL_2 >> 2:
+		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static void si_set_valid_flag(struct si_mc_reg_table *table)
+{
+	u8 i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+				table->valid_flag |= 1 << i;
+				break;
+			}
+		}
+	}
+}
+
+static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
+{
+	u32 i;
+	u16 address;
+
+	for (i = 0; i < table->last; i++)
+		table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+			address : table->mc_reg_address[i].s1;
+
+}
+
+static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+				      struct si_mc_reg_table *si_table)
+{
+	u8 i, j;
+
+	if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+		return -EINVAL;
+	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+		return -EINVAL;
+
+	for (i = 0; i < table->last; i++)
+		si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+	si_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		si_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++) {
+			si_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+		}
+	}
+	si_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+static int si_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct atom_mc_reg_table *table;
+	struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
+	u8 module_index = rv770_get_memory_module_index(rdev);
+	int ret;
+
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+	if (ret)
+		goto init_mc_done;
+
+	ret = si_copy_vbios_mc_reg_table(table, si_table);
+	if (ret)
+		goto init_mc_done;
+
+	si_set_s0_mc_reg_index(si_table);
+
+	ret = si_set_mc_special_registers(rdev, si_table);
+	if (ret)
+		goto init_mc_done;
+
+	si_set_valid_flag(si_table);
+
+init_mc_done:
+	kfree(table);
+
+	return ret;
+
+}
+
+static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
+					 SMC_SIslands_MCRegisters *mc_reg_table)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 i, j;
+
+	for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+		if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+			if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				break;
+			mc_reg_table->address[i].s0 =
+				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+	mc_reg_table->last = (u8)i;
+}
+
+static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
+				    SMC_SIslands_MCRegisterSet *data,
+				    u32 num_entries, u32 valid_flag)
+{
+	u32 i, j;
+
+	for(i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & (1 << j)) {
+			data->value[i] = cpu_to_be32(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static void si_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
+						 struct rv7xx_pl *pl,
+						 SMC_SIslands_MCRegisterSet *mc_reg_table_data)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 i = 0;
+
+	for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
+		if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+			break;
+	}
+
+	if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data, si_pi->mc_reg_table.last,
+				si_pi->mc_reg_table.valid_flag);
+}
+
+static void si_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
+					   struct radeon_ps *radeon_state,
+					   SMC_SIslands_MCRegisters *mc_reg_table)
+{
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	int i;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		si_convert_mc_reg_table_entry_to_smc(rdev,
+						     &state->performance_levels[i],
+						     &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+	}
+}
+
+static int si_populate_mc_reg_table(struct radeon_device *rdev,
+				    struct radeon_ps *radeon_boot_state)
+{
+	struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_seq_index, 1);
+
+	si_populate_mc_reg_addresses(rdev, smc_mc_reg_table);
+
+	si_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
+					     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
+
+	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+				&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
+				si_pi->mc_reg_table.last,
+				si_pi->mc_reg_table.valid_flag);
+
+	if (ulv->supported && ulv->pl.vddc != 0)
+		si_convert_mc_reg_table_entry_to_smc(rdev, &ulv->pl,
+						     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
+	else
+		si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+					&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
+					si_pi->mc_reg_table.last,
+					si_pi->mc_reg_table.valid_flag);
+
+	si_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, smc_mc_reg_table);
+
+	return si_copy_bytes_to_smc(rdev, si_pi->mc_reg_table_start,
+				    (u8 *)smc_mc_reg_table,
+				    sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
+}
+
+static int si_upload_mc_reg_table(struct radeon_device *rdev,
+				  struct radeon_ps *radeon_new_state)
+{
+	struct ni_ps *new_state = ni_get_ps(radeon_new_state);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 address = si_pi->mc_reg_table_start +
+		offsetof(SMC_SIslands_MCRegisters,
+			 data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+	si_convert_mc_reg_table_to_smc(rdev, radeon_new_state, smc_mc_reg_table);
+
+
+	return si_copy_bytes_to_smc(rdev, address,
+				    (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+				    sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
+				    si_pi->sram_end);
+
+}
+
+static void si_enable_voltage_control(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev,
+						      struct radeon_ps *radeon_state)
+{
+	struct ni_ps *state = ni_get_ps(radeon_state);
+	int i;
+	u16 pcie_speed, max_speed = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		pcie_speed = state->performance_levels[i].pcie_gen;
+		if (max_speed < pcie_speed)
+			max_speed = pcie_speed;
+	}
+	return max_speed;
+}
+
+static u16 si_get_current_pcie_speed(struct radeon_device *rdev)
+{
+	u32 speed_cntl;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+	return (u16)speed_cntl;
+}
+
+static void si_request_link_speed_change_before_state_change(struct radeon_device *rdev,
+							     struct radeon_ps *radeon_new_state,
+							     struct radeon_ps *radeon_current_state)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	enum radeon_pcie_gen target_link_speed = si_get_maximum_link_speed(rdev, radeon_new_state);
+	enum radeon_pcie_gen current_link_speed;
+
+	if (si_pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
+		current_link_speed = si_get_maximum_link_speed(rdev, radeon_current_state);
+	else
+		current_link_speed = si_pi->force_pcie_gen;
+
+	si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+	si_pi->pspp_notify_required = false;
+	if (target_link_speed > current_link_speed) {
+		switch (target_link_speed) {
+#if defined(CONFIG_ACPI)
+		case RADEON_PCIE_GEN3:
+			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+				break;
+			si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
+			if (current_link_speed == RADEON_PCIE_GEN2)
+				break;
+		case RADEON_PCIE_GEN2:
+			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+				break;
+#endif
+		default:
+			si_pi->force_pcie_gen = si_get_current_pcie_speed(rdev);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			si_pi->pspp_notify_required = true;
+	}
+}
+
+static void si_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
+							   struct radeon_ps *radeon_new_state,
+							   struct radeon_ps *radeon_current_state)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	enum radeon_pcie_gen target_link_speed = si_get_maximum_link_speed(rdev, radeon_new_state);
+	u8 request;
+
+	if (si_pi->pspp_notify_required) {
+		if (target_link_speed == RADEON_PCIE_GEN3)
+			request = PCIE_PERF_REQ_PECI_GEN3;
+		else if (target_link_speed == RADEON_PCIE_GEN2)
+			request = PCIE_PERF_REQ_PECI_GEN2;
+		else
+			request = PCIE_PERF_REQ_PECI_GEN1;
+
+		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+		    (si_get_current_pcie_speed(rdev) > 0))
+			return;
+
+#if defined(CONFIG_ACPI)
+		radeon_acpi_pcie_performance_request(rdev, request, false);
+#endif
+	}
+}
+
+#if 0
+static int si_ds_request(struct radeon_device *rdev,
+			 bool ds_status_on, u32 count_write)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+
+	if (eg_pi->sclk_deep_sleep) {
+		if (ds_status_on)
+			return (si_send_msg_to_smc(rdev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
+				PPSMC_Result_OK) ?
+				0 : -EINVAL;
+		else
+			return (si_send_msg_to_smc(rdev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
+				PPSMC_Result_OK) ? 0 : -EINVAL;
+	}
+	return 0;
+}
+#endif
+
+static void si_set_max_cu_value(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+
+	if (rdev->family == CHIP_VERDE) {
+		switch (rdev->pdev->device) {
+		case 0x6820:
+		case 0x6825:
+		case 0x6821:
+		case 0x6823:
+		case 0x6827:
+			si_pi->max_cu = 10;
+			break;
+		case 0x682D:
+		case 0x6824:
+		case 0x682F:
+		case 0x6826:
+			si_pi->max_cu = 8;
+			break;
+		case 0x6828:
+		case 0x6830:
+		case 0x6831:
+		case 0x6838:
+		case 0x6839:
+		case 0x683D:
+			si_pi->max_cu = 10;
+			break;
+		case 0x683B:
+		case 0x683F:
+		case 0x6829:
+			si_pi->max_cu = 8;
+			break;
+		default:
+			si_pi->max_cu = 0;
+			break;
+		}
+	} else {
+		si_pi->max_cu = 0;
+	}
+}
+
+static int si_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
+							     struct radeon_clock_voltage_dependency_table *table)
+{
+	u32 i;
+	int j;
+	u16 leakage_voltage;
+
+	if (table) {
+		for (i = 0; i < table->count; i++) {
+			switch (si_get_leakage_voltage_from_leakage_index(rdev,
+									  table->entries[i].v,
+									  &leakage_voltage)) {
+			case 0:
+				table->entries[i].v = leakage_voltage;
+				break;
+			case -EAGAIN:
+				return -EINVAL;
+			case -EINVAL:
+			default:
+				break;
+			}
+		}
+
+		for (j = (table->count - 2); j >= 0; j--) {
+			table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
+				table->entries[j].v : table->entries[j + 1].v;
+		}
+	}
+	return 0;
+}
+
+static int si_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
+{
+	int ret = 0;
+
+	ret = si_patch_single_dependency_table_based_on_leakage(rdev,
+								&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+	ret = si_patch_single_dependency_table_based_on_leakage(rdev,
+								&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+	ret = si_patch_single_dependency_table_based_on_leakage(rdev,
+								&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+	return ret;
+}
+
+static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
+					  struct radeon_ps *radeon_new_state,
+					  struct radeon_ps *radeon_current_state)
+{
+	u32 lane_width;
+	u32 new_lane_width =
+		((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+	u32 current_lane_width =
+		((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+
+	if (new_lane_width != current_lane_width) {
+		radeon_set_pcie_lanes(rdev, new_lane_width);
+		lane_width = radeon_get_pcie_lanes(rdev);
+		si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+	}
+}
+
+static void si_set_vce_clock(struct radeon_device *rdev,
+			     struct radeon_ps *new_rps,
+			     struct radeon_ps *old_rps)
+{
+	if ((old_rps->evclk != new_rps->evclk) ||
+	    (old_rps->ecclk != new_rps->ecclk)) {
+		/* turn the clocks on when encoding, off otherwise */
+		if (new_rps->evclk || new_rps->ecclk)
+			vce_v1_0_enable_mgcg(rdev, false);
+		else
+			vce_v1_0_enable_mgcg(rdev, true);
+		radeon_set_vce_clocks(rdev, new_rps->evclk, new_rps->ecclk);
+	}
+}
+
+void si_dpm_setup_asic(struct radeon_device *rdev)
+{
+	int r;
+
+	r = si_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
+	rv770_get_memory_type(rdev);
+	si_read_clock_registers(rdev);
+	si_enable_acpi_power_management(rdev);
+}
+
+static int si_thermal_enable_alert(struct radeon_device *rdev,
+				   bool enable)
+{
+	u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+	if (enable) {
+		PPSMC_Result result;
+
+		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+		WREG32(CG_THERMAL_INT, thermal_int);
+		rdev->irq.dpm_thermal = false;
+		result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+		if (result != PPSMC_Result_OK) {
+			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+			return -EINVAL;
+		}
+	} else {
+		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+		WREG32(CG_THERMAL_INT, thermal_int);
+		rdev->irq.dpm_thermal = true;
+	}
+
+	return 0;
+}
+
+static int si_thermal_set_temperature_range(struct radeon_device *rdev,
+					    int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+	rdev->pm.dpm.thermal.min_temp = low_temp;
+	rdev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 tmp;
+
+	if (si_pi->fan_ctrl_is_in_default_mode) {
+		tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+		si_pi->fan_ctrl_default_mode = tmp;
+		tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+		si_pi->t_min = tmp;
+		si_pi->fan_ctrl_is_in_default_mode = false;
+	}
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+	tmp |= TMIN(0);
+	WREG32(CG_FDO_CTRL2, tmp);
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+	tmp |= FDO_PWM_MODE(mode);
+	WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+	u32 duty100;
+	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	u16 fdo_min, slope1, slope2;
+	u32 reference_clock, tmp;
+	int ret;
+	u64 tmp64;
+
+	if (!si_pi->fan_table_start) {
+		rdev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0) {
+		rdev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (u16)tmp64;
+
+	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.temp_min = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
+	fan_table.temp_med = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
+	fan_table.temp_max = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
+
+	fan_table.slope1 = cpu_to_be16(slope1);
+	fan_table.slope2 = cpu_to_be16(slope2);
+
+	fan_table.fdo_min = cpu_to_be16(fdo_min);
+
+	fan_table.hys_down = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+	fan_table.hys_up = cpu_to_be16(1);
+
+	fan_table.hys_slope = cpu_to_be16(1);
+
+	fan_table.temp_resp_lim = cpu_to_be16(5);
+
+	reference_clock = radeon_get_xclk(rdev);
+
+	fan_table.refresh_period = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+						reference_clock) / 1600);
+
+	fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+	tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+	fan_table.temp_src = (uint8_t)tmp;
+
+	ret = si_copy_bytes_to_smc(rdev,
+				   si_pi->fan_table_start,
+				   (u8 *)(&fan_table),
+				   sizeof(fan_table),
+				   si_pi->sram_end);
+
+	if (ret) {
+		DRM_ERROR("Failed to load fan table to the SMC.");
+		rdev->pm.dpm.fan.ucode_fan_control = false;
+	}
+
+	return 0;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	PPSMC_Result ret;
+
+	ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
+	if (ret == PPSMC_Result_OK) {
+		si_pi->fan_is_controlled_by_smc = true;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	PPSMC_Result ret;
+
+	ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+
+	if (ret == PPSMC_Result_OK) {
+		si_pi->fan_is_controlled_by_smc = false;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+				      u32 *speed)
+{
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+	duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)duty * 100;
+	do_div(tmp64, duty100);
+	*speed = (u32)tmp64;
+
+	if (*speed > 100)
+		*speed = 100;
+
+	return 0;
+}
+
+int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+				      u32 speed)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 tmp;
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	if (si_pi->fan_is_controlled_by_smc)
+		return -EINVAL;
+
+	if (speed > 100)
+		return -EINVAL;
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)speed * duty100;
+	do_div(tmp64, 100);
+	duty = (u32)tmp64;
+
+	tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+	tmp |= FDO_STATIC_DUTY(duty);
+	WREG32(CG_FDO_CTRL0, tmp);
+
+	return 0;
+}
+
+void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (rdev->pm.dpm.fan.ucode_fan_control)
+			si_fan_ctrl_stop_smc_fan_control(rdev);
+		si_fan_ctrl_set_static_mode(rdev, mode);
+	} else {
+		/* restart auto-manage */
+		if (rdev->pm.dpm.fan.ucode_fan_control)
+			si_thermal_start_smc_fan_control(rdev);
+		else
+			si_fan_ctrl_set_default_mode(rdev);
+	}
+}
+
+u32 si_fan_ctrl_get_mode(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 tmp;
+
+	if (si_pi->fan_is_controlled_by_smc)
+		return 0;
+
+	tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+	return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+					 u32 *speed)
+{
+	u32 tach_period;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	if (rdev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+	if (tach_period == 0)
+		return -ENOENT;
+
+	*speed = 60 * xclk * 10000 / tach_period;
+
+	return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+					 u32 speed)
+{
+	u32 tach_period, tmp;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	if (rdev->pm.no_fan)
+		return -ENOENT;
+
+	if (rdev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	if ((speed < rdev->pm.fan_min_rpm) ||
+	    (speed > rdev->pm.fan_max_rpm))
+		return -EINVAL;
+
+	if (rdev->pm.dpm.fan.ucode_fan_control)
+		si_fan_ctrl_stop_smc_fan_control(rdev);
+
+	tach_period = 60 * xclk * 10000 / (8 * speed);
+	tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+	tmp |= TARGET_PERIOD(tach_period);
+	WREG32(CG_TACH_CTRL, tmp);
+
+	si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+	return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	u32 tmp;
+
+	if (!si_pi->fan_ctrl_is_in_default_mode) {
+		tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+		tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+		WREG32(CG_FDO_CTRL2, tmp);
+
+		tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+		tmp |= TMIN(si_pi->t_min);
+		WREG32(CG_FDO_CTRL2, tmp);
+		si_pi->fan_ctrl_is_in_default_mode = true;
+	}
+}
+
+static void si_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+	if (rdev->pm.dpm.fan.ucode_fan_control) {
+		si_fan_ctrl_start_smc_fan_control(rdev);
+		si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+	}
+}
+
+static void si_thermal_initialize(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (rdev->pm.fan_pulses_per_revolution) {
+		tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+		WREG32(CG_TACH_CTRL, tmp);
+	}
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+	tmp |= TACH_PWM_RESP_RATE(0x28);
+	WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+	int ret;
+
+	si_thermal_initialize(rdev);
+	ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = si_thermal_enable_alert(rdev, true);
+	if (ret)
+		return ret;
+	if (rdev->pm.dpm.fan.ucode_fan_control) {
+		ret = si_halt_smc(rdev);
+		if (ret)
+			return ret;
+		ret = si_thermal_setup_fan_table(rdev);
+		if (ret)
+			return ret;
+		ret = si_resume_smc(rdev);
+		if (ret)
+			return ret;
+		si_thermal_start_smc_fan_control(rdev);
+	}
+
+	return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+	if (!rdev->pm.no_fan) {
+		si_fan_ctrl_set_default_mode(rdev);
+		si_fan_ctrl_stop_smc_fan_control(rdev);
+	}
+}
+
+int si_dpm_enable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+	int ret;
+
+	if (si_is_smc_running(rdev))
+		return -EINVAL;
+	if (pi->voltage_control || si_pi->voltage_control_svi2)
+		si_enable_voltage_control(rdev, true);
+	if (pi->mvdd_control)
+		si_get_mvdd_configuration(rdev);
+	if (pi->voltage_control || si_pi->voltage_control_svi2) {
+		ret = si_construct_voltage_tables(rdev);
+		if (ret) {
+			DRM_ERROR("si_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_initialize_mc_reg_table(rdev);
+		if (ret)
+			eg_pi->dynamic_ac_timing = false;
+	}
+	if (pi->dynamic_ss)
+		si_enable_spread_spectrum(rdev, true);
+	if (pi->thermal_protection)
+		si_enable_thermal_protection(rdev, true);
+	si_setup_bsp(rdev);
+	si_program_git(rdev);
+	si_program_tp(rdev);
+	si_program_tpp(rdev);
+	si_program_sstp(rdev);
+	si_enable_display_gap(rdev);
+	si_program_vc(rdev);
+	ret = si_upload_firmware(rdev);
+	if (ret) {
+		DRM_ERROR("si_upload_firmware failed\n");
+		return ret;
+	}
+	ret = si_process_firmware_header(rdev);
+	if (ret) {
+		DRM_ERROR("si_process_firmware_header failed\n");
+		return ret;
+	}
+	ret = si_initial_switch_from_arb_f0_to_f1(rdev);
+	if (ret) {
+		DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
+		return ret;
+	}
+	ret = si_init_smc_table(rdev);
+	if (ret) {
+		DRM_ERROR("si_init_smc_table failed\n");
+		return ret;
+	}
+	ret = si_init_smc_spll_table(rdev);
+	if (ret) {
+		DRM_ERROR("si_init_smc_spll_table failed\n");
+		return ret;
+	}
+	ret = si_init_arb_table_index(rdev);
+	if (ret) {
+		DRM_ERROR("si_init_arb_table_index failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_populate_mc_reg_table(rdev, boot_ps);
+		if (ret) {
+			DRM_ERROR("si_populate_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = si_initialize_smc_cac_tables(rdev);
+	if (ret) {
+		DRM_ERROR("si_initialize_smc_cac_tables failed\n");
+		return ret;
+	}
+	ret = si_initialize_hardware_cac_manager(rdev);
+	if (ret) {
+		DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
+		return ret;
+	}
+	ret = si_initialize_smc_dte_tables(rdev);
+	if (ret) {
+		DRM_ERROR("si_initialize_smc_dte_tables failed\n");
+		return ret;
+	}
+	ret = si_populate_smc_tdp_limits(rdev, boot_ps);
+	if (ret) {
+		DRM_ERROR("si_populate_smc_tdp_limits failed\n");
+		return ret;
+	}
+	ret = si_populate_smc_tdp_limits_2(rdev, boot_ps);
+	if (ret) {
+		DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
+		return ret;
+	}
+	si_program_response_times(rdev);
+	si_program_ds_registers(rdev);
+	si_dpm_start_smc(rdev);
+	ret = si_notify_smc_display_change(rdev, false);
+	if (ret) {
+		DRM_ERROR("si_notify_smc_display_change failed\n");
+		return ret;
+	}
+	si_enable_sclk_control(rdev, true);
+	si_start_dpm(rdev);
+
+	si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	si_thermal_start_thermal_controller(rdev);
+
+	ni_update_current_ps(rdev, boot_ps);
+
+	return 0;
+}
+
+static int si_set_temperature_range(struct radeon_device *rdev)
+{
+	int ret;
+
+	ret = si_thermal_enable_alert(rdev, false);
+	if (ret)
+		return ret;
+	ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = si_thermal_enable_alert(rdev, true);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	ret = si_set_temperature_range(rdev);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+void si_dpm_disable(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+	if (!si_is_smc_running(rdev))
+		return;
+	si_thermal_stop_thermal_controller(rdev);
+	si_disable_ulv(rdev);
+	si_clear_vc(rdev);
+	if (pi->thermal_protection)
+		si_enable_thermal_protection(rdev, false);
+	si_enable_power_containment(rdev, boot_ps, false);
+	si_enable_smc_cac(rdev, boot_ps, false);
+	si_enable_spread_spectrum(rdev, false);
+	si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+	si_stop_dpm(rdev);
+	si_reset_to_default(rdev);
+	si_dpm_stop_smc(rdev);
+	si_force_switch_to_arb_f0(rdev);
+
+	ni_update_current_ps(rdev, boot_ps);
+}
+
+int si_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+	struct radeon_ps *new_ps = &requested_ps;
+
+	ni_update_requested_ps(rdev, new_ps);
+
+	si_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
+
+	return 0;
+}
+
+static int si_power_control_set_level(struct radeon_device *rdev)
+{
+	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
+	int ret;
+
+	ret = si_restrict_performance_levels_before_switch(rdev);
+	if (ret)
+		return ret;
+	ret = si_halt_smc(rdev);
+	if (ret)
+		return ret;
+	ret = si_populate_smc_tdp_limits(rdev, new_ps);
+	if (ret)
+		return ret;
+	ret = si_populate_smc_tdp_limits_2(rdev, new_ps);
+	if (ret)
+		return ret;
+	ret = si_resume_smc(rdev);
+	if (ret)
+		return ret;
+	ret = si_set_sw_state(rdev);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+int si_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *new_ps = &eg_pi->requested_rps;
+	struct radeon_ps *old_ps = &eg_pi->current_rps;
+	int ret;
+
+	ret = si_disable_ulv(rdev);
+	if (ret) {
+		DRM_ERROR("si_disable_ulv failed\n");
+		return ret;
+	}
+	ret = si_restrict_performance_levels_before_switch(rdev);
+	if (ret) {
+		DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
+		return ret;
+	}
+	if (eg_pi->pcie_performance_request)
+		si_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
+	ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+	ret = si_enable_power_containment(rdev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("si_enable_power_containment failed\n");
+		return ret;
+	}
+	ret = si_enable_smc_cac(rdev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("si_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = si_halt_smc(rdev);
+	if (ret) {
+		DRM_ERROR("si_halt_smc failed\n");
+		return ret;
+	}
+	ret = si_upload_sw_state(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_upload_sw_state failed\n");
+		return ret;
+	}
+	ret = si_upload_smc_data(rdev);
+	if (ret) {
+		DRM_ERROR("si_upload_smc_data failed\n");
+		return ret;
+	}
+	ret = si_upload_ulv_state(rdev);
+	if (ret) {
+		DRM_ERROR("si_upload_ulv_state failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_upload_mc_reg_table(rdev, new_ps);
+		if (ret) {
+			DRM_ERROR("si_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = si_program_memory_timing_parameters(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_program_memory_timing_parameters failed\n");
+		return ret;
+	}
+	si_set_pcie_lane_width_in_smc(rdev, new_ps, old_ps);
+
+	ret = si_resume_smc(rdev);
+	if (ret) {
+		DRM_ERROR("si_resume_smc failed\n");
+		return ret;
+	}
+	ret = si_set_sw_state(rdev);
+	if (ret) {
+		DRM_ERROR("si_set_sw_state failed\n");
+		return ret;
+	}
+	ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+	si_set_vce_clock(rdev, new_ps, old_ps);
+	if (eg_pi->pcie_performance_request)
+		si_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
+	ret = si_set_power_state_conditionally_enable_ulv(rdev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
+		return ret;
+	}
+	ret = si_enable_smc_cac(rdev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("si_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = si_enable_power_containment(rdev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("si_enable_power_containment failed\n");
+		return ret;
+	}
+
+	ret = si_power_control_set_level(rdev);
+	if (ret) {
+		DRM_ERROR("si_power_control_set_level failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void si_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *new_ps = &eg_pi->requested_rps;
+
+	ni_update_current_ps(rdev, new_ps);
+}
+
+#if 0
+void si_dpm_reset_asic(struct radeon_device *rdev)
+{
+	si_restrict_performance_levels_before_switch(rdev);
+	si_disable_ulv(rdev);
+	si_set_boot_state(rdev);
+}
+#endif
+
+void si_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+	si_program_display_gap(rdev);
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void si_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					  struct radeon_ps *rps,
+					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					  u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
+		rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+		rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		rdev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void si_parse_pplib_clock_info(struct radeon_device *rdev,
+				      struct radeon_ps *rps, int index,
+				      union pplib_clock_info *clock_info)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct si_power_info *si_pi = si_get_pi(rdev);
+	struct ni_ps *ps = ni_get_ps(rps);
+	u16 leakage_voltage;
+	struct rv7xx_pl *pl = &ps->performance_levels[index];
+	int ret;
+
+	ps->performance_level_count = index + 1;
+
+	pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+	pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
+	pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+	pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
+
+	pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+	pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+	pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
+						 si_pi->sys_pcie_mask,
+						 si_pi->boot_pcie_gen,
+						 clock_info->si.ucPCIEGen);
+
+	/* patch up vddc if necessary */
+	ret = si_get_leakage_voltage_from_leakage_index(rdev, pl->vddc,
+							&leakage_voltage);
+	if (ret == 0)
+		pl->vddc = leakage_voltage;
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+		pi->acpi_vddc = pl->vddc;
+		eg_pi->acpi_vddci = pl->vddci;
+		si_pi->acpi_pcie_gen = pl->pcie_gen;
+	}
+
+	if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+	    index == 0) {
+		/* XXX disable for A0 tahiti */
+		si_pi->ulv.supported = false;
+		si_pi->ulv.pl = *pl;
+		si_pi->ulv.one_pcie_lane_in_ulv = false;
+		si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+		si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
+		si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
+	}
+
+	if (pi->min_vddc_in_table > pl->vddc)
+		pi->min_vddc_in_table = pl->vddc;
+
+	if (pi->max_vddc_in_table < pl->vddc)
+		pi->max_vddc_in_table = pl->vddc;
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		u16 vddc, vddci, mvdd;
+		radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
+		pl->mclk = rdev->clock.default_mclk;
+		pl->sclk = rdev->clock.default_sclk;
+		pl->vddc = vddc;
+		pl->vddci = vddci;
+		si_pi->mvdd_bootup_value = mvdd;
+	}
+
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+	}
+}
+
+static int si_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct ni_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		if (!rdev->pm.power_state[i].clock_info)
+			return -EINVAL;
+		ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(rdev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		rdev->pm.dpm.ps[i].ps_priv = ps;
+		si_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+					      non_clock_info,
+					      non_clock_info_array->ucEntrySize);
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			si_parse_pplib_clock_info(rdev,
+						  &rdev->pm.dpm.ps[i], k,
+						  clock_info);
+			k++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	/* fill in the vce power states */
+	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
+		u32 sclk, mclk;
+		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
+		clock_info = (union pplib_clock_info *)
+			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+		sclk |= clock_info->si.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+		mclk |= clock_info->si.ucMemoryClockHigh << 16;
+		rdev->pm.dpm.vce_states[i].sclk = sclk;
+		rdev->pm.dpm.vce_states[i].mclk = mclk;
+	}
+
+	return 0;
+}
+
+int si_dpm_init(struct radeon_device *rdev)
+{
+	struct rv7xx_power_info *pi;
+	struct evergreen_power_info *eg_pi;
+	struct ni_power_info *ni_pi;
+	struct si_power_info *si_pi;
+	struct atom_clock_dividers dividers;
+	enum pci_bus_speed speed_cap;
+	struct pci_dev *root = rdev->pdev->bus->self;
+	int ret;
+
+	si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+	if (si_pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = si_pi;
+	ni_pi = &si_pi->ni;
+	eg_pi = &ni_pi->eg;
+	pi = &eg_pi->rv7xx;
+
+	speed_cap = pcie_get_speed_cap(root);
+	if (speed_cap == PCI_SPEED_UNKNOWN) {
+		si_pi->sys_pcie_mask = 0;
+	} else {
+		if (speed_cap == PCIE_SPEED_8_0GT)
+			si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+				RADEON_PCIE_SPEED_50 |
+				RADEON_PCIE_SPEED_80;
+		else if (speed_cap == PCIE_SPEED_5_0GT)
+			si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+				RADEON_PCIE_SPEED_50;
+		else
+			si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+	}
+	si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+	si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
+
+	si_set_max_cu_value(rdev);
+
+	rv770_get_max_vddc(rdev);
+	si_get_leakage_vddc(rdev);
+	si_patch_dependency_tables_based_on_leakage(rdev);
+
+	pi->acpi_vddc = 0;
+	eg_pi->acpi_vddci = 0;
+	pi->min_vddc_in_table = 0;
+	pi->max_vddc_in_table = 0;
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = r600_parse_extended_power_table(rdev);
+	if (ret)
+		return ret;
+
+	ret = si_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+		kcalloc(4,
+			sizeof(struct radeon_clock_voltage_dependency_entry),
+			GFP_KERNEL);
+	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+		r600_free_extended_power_table(rdev);
+		return -ENOMEM;
+	}
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+	if (rdev->pm.dpm.voltage_response_time == 0)
+		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (rdev->pm.dpm.backbias_response_time == 0)
+		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->ref_div = dividers.ref_div + 1;
+	else
+		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	eg_pi->smu_uvd_hs = false;
+
+	pi->mclk_strobe_mode_threshold = 40000;
+	if (si_is_special_1gb_platform(rdev))
+		pi->mclk_stutter_mode_threshold = 0;
+	else
+		pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
+	pi->mclk_edc_enable_threshold = 40000;
+	eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+	ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+	pi->voltage_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					    VOLTAGE_OBJ_GPIO_LUT);
+	if (!pi->voltage_control) {
+		si_pi->voltage_control_svi2 =
+			radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+						    VOLTAGE_OBJ_SVID2);
+		if (si_pi->voltage_control_svi2)
+			radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+						  &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+	}
+
+	pi->mvdd_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+					    VOLTAGE_OBJ_GPIO_LUT);
+
+	eg_pi->vddci_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+					    VOLTAGE_OBJ_GPIO_LUT);
+	if (!eg_pi->vddci_control)
+		si_pi->vddci_control_svi2 =
+			radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+						    VOLTAGE_OBJ_SVID2);
+
+	si_pi->vddc_phase_shed_control =
+		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					    VOLTAGE_OBJ_PHASE_LUT);
+
+	rv770_get_engine_memory_ss(rdev);
+
+	pi->asi = RV770_ASI_DFLT;
+	pi->pasi = CYPRESS_HASI_DFLT;
+	pi->vrc = SISLANDS_VRC_DFLT;
+
+	pi->gfx_clock_gating = true;
+
+	eg_pi->sclk_deep_sleep = true;
+	si_pi->sclk_deep_sleep_above_low = false;
+
+	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	eg_pi->dynamic_ac_timing = true;
+
+	eg_pi->light_sleep = true;
+#if defined(CONFIG_ACPI)
+	eg_pi->pcie_performance_request =
+		radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+	eg_pi->pcie_performance_request = false;
+#endif
+
+	si_pi->sram_end = SMC_RAM_END;
+
+	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+	si_initialize_powertune_defaults(rdev);
+
+	/* make sure dc limits are valid */
+	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	si_pi->fan_ctrl_is_in_default_mode = true;
+
+	return 0;
+}
+
+void si_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+	r600_free_extended_power_table(rdev);
+}
+
+void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						    struct seq_file *m)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		pl = &ps->performance_levels[current_index];
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+			   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+	}
+}
+
+u32 si_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		return 0;
+	} else {
+		pl = &ps->performance_levels[current_index];
+		return pl->sclk;
+	}
+}
+
+u32 si_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct ni_ps *ps = ni_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		return 0;
+	} else {
+		pl = &ps->performance_levels[current_index];
+		return pl->mclk;
+	}
+}
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
new file mode 100644
index 0000000..1032a68
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SI_DPM_H__
+#define __SI_DPM_H__
+
+#include "ni_dpm.h"
+#include "sislands_smc.h"
+
+enum si_cac_config_reg_type
+{
+	SISLANDS_CACCONFIG_MMR = 0,
+	SISLANDS_CACCONFIG_CGIND,
+	SISLANDS_CACCONFIG_MAX
+};
+
+struct si_cac_config_reg
+{
+	u32 offset;
+	u32 mask;
+	u32 shift;
+	u32 value;
+	enum si_cac_config_reg_type type;
+};
+
+struct si_powertune_data
+{
+	u32 cac_window;
+	u32 l2_lta_window_size_default;
+	u8 lts_truncate_default;
+	u8 shift_n_default;
+	u8 operating_temp;
+	struct ni_leakage_coeffients leakage_coefficients;
+	u32 fixed_kt;
+	u32 lkge_lut_v0_percent;
+	u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+	bool enable_powertune_by_default;
+};
+
+struct si_dyn_powertune_data
+{
+	u32 cac_leakage;
+	s32 leakage_minimum_temperature;
+	u32 wintime;
+	u32 l2_lta_window_size;
+	u8 lts_truncate;
+	u8 shift_n;
+	u8 dc_pwr_value;
+	bool disable_uvd_powertune;
+};
+
+struct si_dte_data
+{
+	u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+	u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+	u32 k;
+	u32 t0;
+	u32 max_t;
+	u8 window_size;
+	u8 temp_select;
+	u8 dte_mode;
+	u8 tdep_count;
+	u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 t_threshold;
+	bool enable_dte_by_default;
+};
+
+struct si_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 dll_cntl;
+	u32 mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl_1;
+	u32 mpll_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct si_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT               0
+#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT                  1
+#define SISLANDS_MCREGISTERTABLE_ULV_SLOT                   2
+#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT     3
+
+struct si_leakage_voltage_entry
+{
+	u16 voltage;
+	u16 leakage_index;
+};
+
+#define SISLANDS_LEAKAGE_INDEX0     0xff01
+#define SISLANDS_MAX_LEAKAGE_COUNT  4
+
+struct si_leakage_voltage
+{
+	u16 count;
+	struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
+
+struct si_ulv_param {
+	bool supported;
+	u32 cg_ulv_control;
+	u32 cg_ulv_parameter;
+	u32 volt_change_delay;
+	struct rv7xx_pl pl;
+	bool one_pcie_lane_in_ulv;
+};
+
+struct si_power_info {
+	/* must be first! */
+	struct ni_power_info ni;
+	struct si_clock_registers clock_registers;
+	struct si_mc_reg_table mc_reg_table;
+	struct atom_voltage_table mvdd_voltage_table;
+	struct atom_voltage_table vddc_phase_shed_table;
+	struct si_leakage_voltage leakage_voltage;
+	u16 mvdd_bootup_value;
+	struct si_ulv_param ulv;
+	u32 max_cu;
+	/* pcie gen */
+	enum radeon_pcie_gen force_pcie_gen;
+	enum radeon_pcie_gen boot_pcie_gen;
+	enum radeon_pcie_gen acpi_pcie_gen;
+	u32 sys_pcie_mask;
+	/* flags */
+	bool enable_dte;
+	bool enable_ppm;
+	bool vddc_phase_shed_control;
+	bool pspp_notify_required;
+	bool sclk_deep_sleep_above_low;
+	bool voltage_control_svi2;
+	bool vddci_control_svi2;
+	/* smc offsets */
+	u32 sram_end;
+	u32 state_table_start;
+	u32 soft_regs_start;
+	u32 mc_reg_table_start;
+	u32 arb_table_start;
+	u32 cac_table_start;
+	u32 dte_table_start;
+	u32 spll_table_start;
+	u32 papm_cfg_table_start;
+	u32 fan_table_start;
+	/* CAC stuff */
+	const struct si_cac_config_reg *cac_weights;
+	const struct si_cac_config_reg *lcac_config;
+	const struct si_cac_config_reg *cac_override;
+	const struct si_powertune_data *powertune_data;
+	struct si_dyn_powertune_data dyn_powertune_data;
+	/* DTE stuff */
+	struct si_dte_data dte_data;
+	/* scratch structs */
+	SMC_SIslands_MCRegisters smc_mc_reg_table;
+	SISLANDS_SMC_STATETABLE smc_statetable;
+	PP_SIslands_PAPMParameters papm_parm;
+	/* SVI2 */
+	u8 svd_gpio_id;
+	u8 svc_gpio_id;
+	/* fan control */
+	bool fan_ctrl_is_in_default_mode;
+	u32 t_min;
+	u32 fan_ctrl_default_mode;
+	bool fan_is_controlled_by_smc;
+};
+
+#define SISLANDS_INITIAL_STATE_ARB_INDEX    0
+#define SISLANDS_ACPI_STATE_ARB_INDEX       1
+#define SISLANDS_ULV_STATE_ARB_INDEX        2
+#define SISLANDS_DRIVER_STATE_ARB_INDEX     3
+
+#define SISLANDS_DPM2_MAX_PULSE_SKIP        256
+
+#define SISLANDS_DPM2_NEAR_TDP_DEC          10
+#define SISLANDS_DPM2_ABOVE_SAFE_INC        5
+#define SISLANDS_DPM2_BELOW_SAFE_INC        20
+
+#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT            80
+
+#define SISLANDS_DPM2_MAXPS_PERCENT_H                   99
+#define SISLANDS_DPM2_MAXPS_PERCENT_M                   99
+
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
+#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER                 0x12
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
+#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE                  0x1E
+#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO                 0xF
+
+#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN         10
+
+#define SISLANDS_VRC_DFLT                               0xC000B3
+#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT             1687
+#define SISLANDS_CGULVPARAMETER_DFLT                    0x00040035
+#define SISLANDS_CGULVCONTROL_DFLT                      0x1f007550
+
+
+#endif
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
new file mode 100644
index 0000000..501f9d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK                      0x65b0
+#define SI_DC_GPIO_HPD_A                         0x65b4
+#define SI_DC_GPIO_HPD_EN                        0x65b8
+#define SI_DC_GPIO_HPD_Y                         0x65bc
+
+#define SI_GRPH_CONTROL                          0x6804
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)		 (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2			 0
+#       define SI_ADDR_SURF_P4_8x16		 4
+#       define SI_ADDR_SURF_P4_16x16		 5
+#       define SI_ADDR_SURF_P4_16x32		 6
+#       define SI_ADDR_SURF_P4_32x32		 7
+#       define SI_ADDR_SURF_P8_16x16_8x16	 8
+#       define SI_ADDR_SURF_P8_16x32_8x16	 9
+#       define SI_ADDR_SURF_P8_32x32_8x16	 10
+#       define SI_ADDR_SURF_P8_16x32_16x16	 11
+#       define SI_ADDR_SURF_P8_32x32_16x16	 12
+#       define SI_ADDR_SURF_P8_32x32_16x32	 13
+#       define SI_ADDR_SURF_P8_32x64_32x32	 14
+
+#endif
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
new file mode 100644
index 0000000..51155ab
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "sid.h"
+#include "ppsmc.h"
+#include "radeon_ucode.h"
+#include "sislands_smc.h"
+
+static int si_set_smc_sram_address(struct radeon_device *rdev,
+				   u32 smc_address, u32 limit)
+{
+	if (smc_address & 3)
+		return -EINVAL;
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(SMC_IND_INDEX_0, smc_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+	return 0;
+}
+
+int si_copy_bytes_to_smc(struct radeon_device *rdev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit)
+{
+	unsigned long flags;
+	int ret = 0;
+	u32 data, original_data, addr, extra_shift;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		ret = si_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	/* RMW for the final bytes */
+	if (byte_count > 0) {
+		data = 0;
+
+		ret = si_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		original_data = RREG32(SMC_IND_DATA_0);
+
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			/* SMC address space is BE */
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		ret = si_set_smc_sram_address(rdev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+	}
+
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+void si_start_smc(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+	tmp &= ~RST_REG;
+
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void si_reset_smc(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+
+	tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+	tmp |= RST_REG;
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int si_program_jump_on_start(struct radeon_device *rdev)
+{
+	static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+
+	return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void si_stop_smc_clock(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	tmp |= CK_DISABLE;
+
+	WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+void si_start_smc_clock(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	tmp &= ~CK_DISABLE;
+
+	WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool si_is_smc_running(struct radeon_device *rdev)
+{
+	u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+	u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	if (!(rst & RST_REG) && !(clk & CK_DISABLE))
+		return true;
+
+	return false;
+}
+
+PPSMC_Result si_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+	u32 tmp;
+	int i;
+
+	if (!si_is_smc_running(rdev))
+		return PPSMC_Result_Failed;
+
+	WREG32(SMC_MESSAGE_0, msg);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(SMC_RESP_0);
+		if (tmp != 0)
+			break;
+		udelay(1);
+	}
+	tmp = RREG32(SMC_RESP_0);
+
+	return (PPSMC_Result)tmp;
+}
+
+PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	if (!si_is_smc_running(rdev))
+		return PPSMC_Result_OK;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+		if ((tmp & CKEN) == 0)
+			break;
+		udelay(1);
+	}
+
+	return PPSMC_Result_OK;
+}
+
+int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
+{
+	unsigned long flags;
+	u32 ucode_start_address;
+	u32 ucode_size;
+	const u8 *src;
+	u32 data;
+
+	if (!rdev->smc_fw)
+		return -EINVAL;
+
+	if (rdev->new_fw) {
+		const struct smc_firmware_header_v1_0 *hdr =
+			(const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+		radeon_ucode_print_smc_hdr(&hdr->header);
+
+		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+		src = (const u8 *)
+			(rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+	} else {
+		switch (rdev->family) {
+		case CHIP_TAHITI:
+			ucode_start_address = TAHITI_SMC_UCODE_START;
+			ucode_size = TAHITI_SMC_UCODE_SIZE;
+			break;
+		case CHIP_PITCAIRN:
+			ucode_start_address = PITCAIRN_SMC_UCODE_START;
+			ucode_size = PITCAIRN_SMC_UCODE_SIZE;
+			break;
+		case CHIP_VERDE:
+			ucode_start_address = VERDE_SMC_UCODE_START;
+			ucode_size = VERDE_SMC_UCODE_SIZE;
+			break;
+		case CHIP_OLAND:
+			ucode_start_address = OLAND_SMC_UCODE_START;
+			ucode_size = OLAND_SMC_UCODE_SIZE;
+			break;
+		case CHIP_HAINAN:
+			ucode_start_address = HAINAN_SMC_UCODE_START;
+			ucode_size = HAINAN_SMC_UCODE_SIZE;
+			break;
+		default:
+			DRM_ERROR("unknown asic in smc ucode loader\n");
+			BUG();
+		}
+		src = (const u8 *)rdev->smc_fw->data;
+	}
+
+	if (ucode_size & 3)
+		return -EINVAL;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, ucode_start_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+	while (ucode_size >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		ucode_size -= 4;
+	}
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+			   u32 *value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	ret = si_set_smc_sram_address(rdev, smc_address, limit);
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+			    u32 value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
+	ret = si_set_smc_sram_address(rdev, smc_address, limit);
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
new file mode 100644
index 0000000..65a911d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -0,0 +1,1956 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+
+#define SI_MAX_SH_GPRS           256
+#define SI_MAX_TEMP_GPRS         16
+#define SI_MAX_SH_THREADS        256
+#define SI_MAX_SH_STACK_ENTRIES  4096
+#define SI_MAX_FRC_EOV_CNT       16384
+#define SI_MAX_BACKENDS          8
+#define SI_MAX_BACKENDS_MASK     0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
+#define SI_MAX_SIMDS             12
+#define SI_MAX_SIMDS_MASK        0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
+#define SI_MAX_PIPES             8
+#define SI_MAX_PIPES_MASK        0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
+#define SI_MAX_LDS_NUM           0xFFFF
+#define SI_MAX_TCC               16
+#define SI_MAX_TCC_MASK          0xFFFF
+
+/* SMC IND accessor regs */
+#define SMC_IND_INDEX_0                              0x200
+#define SMC_IND_DATA_0                               0x204
+
+#define SMC_IND_ACCESS_CNTL                          0x228
+#       define AUTO_INCREMENT_IND_0                  (1 << 0)
+#define SMC_MESSAGE_0                                0x22c
+#define SMC_RESP_0                                   0x230
+
+/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
+#define SMC_CG_IND_START                    0xc0030000
+#define SMC_CG_IND_END                      0xc0040000
+
+#define	CG_CGTT_LOCAL_0				0x400
+#define	CG_CGTT_LOCAL_1				0x401
+
+/* SMC IND registers */
+#define	SMC_SYSCON_RESET_CNTL				0x80000000
+#       define RST_REG                                  (1 << 0)
+#define	SMC_SYSCON_CLOCK_CNTL_0				0x80000004
+#       define CK_DISABLE                               (1 << 0)
+#       define CKEN                                     (1 << 24)
+
+#define VGA_HDP_CONTROL  				0x328
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+
+#define DCCG_DISP_SLOW_SELECT_REG                       0x4fc
+#define		DCCG_DISP1_SLOW_SELECT(x)		((x) << 0)
+#define		DCCG_DISP1_SLOW_SELECT_MASK		(7 << 0)
+#define		DCCG_DISP1_SLOW_SELECT_SHIFT		0
+#define		DCCG_DISP2_SLOW_SELECT(x)		((x) << 4)
+#define		DCCG_DISP2_SLOW_SELECT_MASK		(7 << 4)
+#define		DCCG_DISP2_SLOW_SELECT_SHIFT		4
+
+#define	CG_SPLL_FUNC_CNTL				0x600
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_PDIV_A(x)				((x) << 20)
+#define		SPLL_PDIV_A_MASK			(0x7f << 20)
+#define		SPLL_PDIV_A_SHIFT			20
+#define	CG_SPLL_FUNC_CNTL_2				0x604
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SPLL_CTLREQ_CHG				(1 << 23)
+#define		SCLK_MUX_UPDATE				(1 << 26)
+#define	CG_SPLL_FUNC_CNTL_3				0x608
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_FB_DIV_SHIFT			0
+#define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_FUNC_CNTL_4				0x60c
+
+#define	SPLL_STATUS					0x614
+#define		SPLL_CHG_STATUS				(1 << 1)
+#define	SPLL_CNTL_MODE					0x618
+#define		SPLL_SW_DIR_CONTROL			(1 << 0)
+#	define SPLL_REFCLK_SEL(x)			((x) << 26)
+#	define SPLL_REFCLK_SEL_MASK			(3 << 26)
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x620
+#define		SSEN					(1 << 0)
+#define		CLK_S(x)				((x) << 4)
+#define		CLK_S_MASK				(0xfff << 4)
+#define		CLK_S_SHIFT				4
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x624
+#define		CLK_V(x)				((x) << 0)
+#define		CLK_V_MASK				(0x3ffffff << 0)
+#define		CLK_V_SHIFT				0
+
+#define	CG_SPLL_AUTOSCALE_CNTL				0x62c
+#       define AUTOSCALE_ON_SS_CLEAR                    (1 << 9)
+
+/* discrete uvd clocks */
+#define	CG_UPLL_FUNC_CNTL				0x634
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_VCO_MODE_MASK			0x00000600
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define	CG_UPLL_FUNC_CNTL_2				0x638
+#	define UPLL_PDIV_A(x)				((x) << 0)
+#	define UPLL_PDIV_A_MASK				0x0000007F
+#	define UPLL_PDIV_B(x)				((x) << 8)
+#	define UPLL_PDIV_B_MASK				0x00007F00
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define	CG_UPLL_FUNC_CNTL_3				0x63C
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+#define	CG_UPLL_FUNC_CNTL_4                             0x644
+#	define UPLL_SPARE_ISPARE9			0x00020000
+#define	CG_UPLL_FUNC_CNTL_5				0x648
+#	define RESET_ANTI_MUX_MASK			0x00000200
+#define	CG_UPLL_SPREAD_SPECTRUM				0x650
+#	define SSEN_MASK				0x00000001
+
+#define	MPLL_BYPASSCLK_SEL				0x65c
+#	define MPLL_CLKOUT_SEL(x)			((x) << 8)
+#	define MPLL_CLKOUT_SEL_MASK			0xFF00
+
+#define CG_CLKPIN_CNTL                                    0x660
+#       define XTALIN_DIVIDE                              (1 << 1)
+#       define BCLK_AS_XCLK                               (1 << 2)
+#define CG_CLKPIN_CNTL_2                                  0x664
+#       define FORCE_BIF_REFCLK_EN                        (1 << 3)
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+
+#define	THM_CLK_CNTL					0x66c
+#	define CMON_CLK_SEL(x)				((x) << 0)
+#	define CMON_CLK_SEL_MASK			0xFF
+#	define TMON_CLK_SEL(x)				((x) << 8)
+#	define TMON_CLK_SEL_MASK			0xFF00
+#define	MISC_CLK_CNTL					0x670
+#	define DEEP_SLEEP_CLK_SEL(x)			((x) << 0)
+#	define DEEP_SLEEP_CLK_SEL_MASK			0xFF
+#	define ZCLK_SEL(x)				((x) << 8)
+#	define ZCLK_SEL_MASK				0xFF00
+
+#define	CG_THERMAL_CTRL					0x700
+#define 	DPM_EVENT_SRC(x)			((x) << 0)
+#define 	DPM_EVENT_SRC_MASK			(7 << 0)
+#define		DIG_THERM_DPM(x)			((x) << 14)
+#define		DIG_THERM_DPM_MASK			0x003FC000
+#define		DIG_THERM_DPM_SHIFT			14
+#define	CG_THERMAL_STATUS				0x704
+#define		FDO_PWM_DUTY(x)				((x) << 9)
+#define		FDO_PWM_DUTY_MASK			(0xff << 9)
+#define		FDO_PWM_DUTY_SHIFT			9
+#define	CG_THERMAL_INT					0x708
+#define		DIG_THERM_INTH(x)			((x) << 8)
+#define		DIG_THERM_INTH_MASK			0x0000FF00
+#define		DIG_THERM_INTH_SHIFT			8
+#define		DIG_THERM_INTL(x)			((x) << 16)
+#define		DIG_THERM_INTL_MASK			0x00FF0000
+#define		DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+
+#define	CG_MULT_THERMAL_CTRL					0x710
+#define		TEMP_SEL(x)					((x) << 20)
+#define		TEMP_SEL_MASK					(0xff << 20)
+#define		TEMP_SEL_SHIFT					20
+#define	CG_MULT_THERMAL_STATUS					0x714
+#define		ASIC_MAX_TEMP(x)				((x) << 0)
+#define		ASIC_MAX_TEMP_MASK				0x000001ff
+#define		ASIC_MAX_TEMP_SHIFT				0
+#define		CTF_TEMP(x)					((x) << 9)
+#define		CTF_TEMP_MASK					0x0003fe00
+#define		CTF_TEMP_SHIFT					9
+
+#define	CG_FDO_CTRL0					0x754
+#define		FDO_STATIC_DUTY(x)			((x) << 0)
+#define		FDO_STATIC_DUTY_MASK			0x000000FF
+#define		FDO_STATIC_DUTY_SHIFT			0
+#define	CG_FDO_CTRL1					0x758
+#define		FMAX_DUTY100(x)				((x) << 0)
+#define		FMAX_DUTY100_MASK			0x000000FF
+#define		FMAX_DUTY100_SHIFT			0
+#define	CG_FDO_CTRL2					0x75C
+#define		TMIN(x)					((x) << 0)
+#define		TMIN_MASK				0x000000FF
+#define		TMIN_SHIFT				0
+#define		FDO_PWM_MODE(x)				((x) << 11)
+#define		FDO_PWM_MODE_MASK			(7 << 11)
+#define		FDO_PWM_MODE_SHIFT			11
+#define		TACH_PWM_RESP_RATE(x)			((x) << 25)
+#define		TACH_PWM_RESP_RATE_MASK			(0x7f << 25)
+#define		TACH_PWM_RESP_RATE_SHIFT		25
+
+#define CG_TACH_CTRL                                    0x770
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0x774
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
+#define GENERAL_PWRMGT                                  0x780
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (1 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#define CG_TPC                                            0x784
+#define SCLK_PWRMGT_CNTL                                  0x788
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#       define DYN_LIGHT_SLEEP_EN                         (1 << 14)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x798
+#       define CURRENT_STATE_INDEX_MASK                   (0xf << 4)
+#       define CURRENT_STATE_INDEX_SHIFT                  4
+
+#define CG_FTV                                            0x7bc
+
+#define CG_FFCT_0                                         0x7c0
+#       define UTC_0(x)                                   ((x) << 0)
+#       define UTC_0_MASK                                 (0x3ff << 0)
+#       define DTC_0(x)                                   ((x) << 10)
+#       define DTC_0_MASK                                 (0x3ff << 10)
+
+#define CG_BSP                                          0x7fc
+#       define BSP(x)					((x) << 0)
+#       define BSP_MASK					(0xffff << 0)
+#       define BSU(x)					((x) << 16)
+#       define BSU_MASK					(0xf << 16)
+#define CG_AT                                           0x800
+#       define CG_R(x)					((x) << 0)
+#       define CG_R_MASK				(0xffff << 0)
+#       define CG_L(x)					((x) << 16)
+#       define CG_L_MASK				(0xffff << 16)
+
+#define CG_GIT                                          0x804
+#       define CG_GICST(x)                              ((x) << 0)
+#       define CG_GICST_MASK                            (0xffff << 0)
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+
+#define CG_SSP                                            0x80c
+#       define SST(x)                                     ((x) << 0)
+#       define SST_MASK                                   (0xffff << 0)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0x828
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define	CG_ULV_CONTROL					0x878
+#define	CG_ULV_PARAMETER				0x87c
+
+#define	SMC_SCRATCH0					0x884
+
+#define	CG_CAC_CTRL					0x8b8
+#	define CAC_WINDOW(x)				((x) << 0)
+#	define CAC_WINDOW_MASK				0x00ffffff
+
+#define DMIF_ADDR_CONFIG  				0xBD4
+
+#define DMIF_ADDR_CALC  				0xC00
+
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0ca0
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define	SRBM_STATUS				        0xE50
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		IH_BUSY 				(1 << 17)
+
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0xe80
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0xe84
+
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
+#define	SRBM_STATUS2				        0x0EC4
+#define		DMA_BUSY 				(1 << 5)
+#define		DMA1_BUSY 				(1 << 6)
+
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)		((x) << 2)
+#define		L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)		((x) << 4)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 19)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define		INVALIDATE_CACHE_MODE(x)			((x) << 26)
+#define			INVALIDATE_PTE_AND_PDE_CACHES		0
+#define			INVALIDATE_ONLY_PTE_CACHES		1
+#define			INVALIDATE_ONLY_PDE_CACHES		2
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		L2_CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define		PAGE_TABLE_BLOCK_SIZE(x)			(((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT0_CNTL2				0x1430
+#define VM_CONTEXT1_CNTL2				0x1434
+#define	VM_CONTEXT8_PAGE_TABLE_BASE_ADDR		0x1438
+#define	VM_CONTEXT9_PAGE_TABLE_BASE_ADDR		0x143c
+#define	VM_CONTEXT10_PAGE_TABLE_BASE_ADDR		0x1440
+#define	VM_CONTEXT11_PAGE_TABLE_BASE_ADDR		0x1444
+#define	VM_CONTEXT12_PAGE_TABLE_BASE_ADDR		0x1448
+#define	VM_CONTEXT13_PAGE_TABLE_BASE_ADDR		0x144c
+#define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x1450
+#define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x1454
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+#define		PROTECTIONS_MASK			(0xf << 0)
+#define		PROTECTIONS_SHIFT			0
+		/* bit 0: range
+		 * bit 1: pde0
+		 * bit 2: valid
+		 * bit 3: read
+		 * bit 4: write
+		 */
+#define		MEMORY_CLIENT_ID_MASK			(0xff << 12)
+#define		MEMORY_CLIENT_ID_SHIFT			12
+#define		MEMORY_CLIENT_RW_MASK			(1 << 24)
+#define		MEMORY_CLIENT_RW_SHIFT			24
+#define		FAULT_VMID_MASK				(0xf << 25)
+#define		FAULT_VMID_SHIFT			25
+
+#define VM_INVALIDATE_REQUEST				0x1478
+#define VM_INVALIDATE_RESPONSE				0x147c
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
+
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153c
+#define	VM_CONTEXT1_PAGE_TABLE_BASE_ADDR		0x1540
+#define	VM_CONTEXT2_PAGE_TABLE_BASE_ADDR		0x1544
+#define	VM_CONTEXT3_PAGE_TABLE_BASE_ADDR		0x1548
+#define	VM_CONTEXT4_PAGE_TABLE_BASE_ADDR		0x154c
+#define	VM_CONTEXT5_PAGE_TABLE_BASE_ADDR		0x1550
+#define	VM_CONTEXT6_PAGE_TABLE_BASE_ADDR		0x1554
+#define	VM_CONTEXT7_PAGE_TABLE_BASE_ADDR		0x1558
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155c
+#define	VM_CONTEXT1_PAGE_TABLE_START_ADDR		0x1560
+
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT1_PAGE_TABLE_END_ADDR			0x1580
+
+#define VM_L2_CG           				0x15c0
+#define		MC_CG_ENABLE				(1 << 18)
+#define		MC_LS_ENABLE				(1 << 19)
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x0000f000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+
+#define	MC_VM_MX_L1_TLB_CNTL				0x2064
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+
+#define MC_HUB_MISC_HUB_CG           			0x20b8
+#define MC_HUB_MISC_VM_CG           			0x20bc
+
+#define MC_HUB_MISC_SIP_CG           			0x20c0
+
+#define MC_XPB_CLK_GAT           			0x2478
+
+#define MC_CITF_MISC_RD_CG           			0x2648
+#define MC_CITF_MISC_WR_CG           			0x264c
+#define MC_CITF_MISC_VM_CG           			0x2650
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define		NOOFGROUPS_SHIFT				12
+#define		NOOFGROUPS_MASK					0x00001000
+
+#define	MC_ARB_DRAM_TIMING				0x2774
+#define	MC_ARB_DRAM_TIMING2				0x2778
+
+#define MC_ARB_BURST_TIME                               0x2808
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0x1f << 0)
+#define		STATE0_SHIFT				0
+#define		STATE1(x)				((x) << 5)
+#define		STATE1_MASK				(0x1f << 5)
+#define		STATE1_SHIFT				5
+#define		STATE2(x)				((x) << 10)
+#define		STATE2_MASK				(0x1f << 10)
+#define		STATE2_SHIFT				10
+#define		STATE3(x)				((x) << 15)
+#define		STATE3_MASK				(0x1f << 15)
+#define		STATE3_SHIFT				15
+
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x28e8
+#define		TRAIN_DONE_D0      			(1 << 30)
+#define		TRAIN_DONE_D1      			(1 << 31)
+
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+#define MC_PMG_AUTO_CMD           			0x28d0
+
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+
+#define MC_SEQ_RAS_TIMING                               0x28a0
+#define MC_SEQ_CAS_TIMING                               0x28a4
+#define MC_SEQ_MISC_TIMING                              0x28a8
+#define MC_SEQ_MISC_TIMING2                             0x28ac
+#define MC_SEQ_PMG_TIMING                               0x28b0
+#define MC_SEQ_RD_CTL_D0                                0x28b4
+#define MC_SEQ_RD_CTL_D1                                0x28b8
+#define MC_SEQ_WR_CTL_D0                                0x28bc
+#define MC_SEQ_WR_CTL_D1                                0x28c0
+
+#define MC_SEQ_MISC0           				0x2a00
+#define 	MC_SEQ_MISC0_VEN_ID_SHIFT               8
+#define 	MC_SEQ_MISC0_VEN_ID_MASK                0x00000f00
+#define 	MC_SEQ_MISC0_VEN_ID_VALUE               3
+#define 	MC_SEQ_MISC0_REV_ID_SHIFT               12
+#define 	MC_SEQ_MISC0_REV_ID_MASK                0x0000f000
+#define 	MC_SEQ_MISC0_REV_ID_VALUE               1
+#define 	MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define 	MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define 	MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0x2a04
+#define MC_SEQ_RESERVE_M                                0x2a08
+#define MC_PMG_CMD_EMRS                                 0x2a0c
+
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#define MC_SEQ_MISC5                                    0x2a54
+#define MC_SEQ_MISC6                                    0x2a58
+
+#define MC_SEQ_MISC7                                    0x2a64
+
+#define MC_SEQ_RAS_TIMING_LP                            0x2a6c
+#define MC_SEQ_CAS_TIMING_LP                            0x2a70
+#define MC_SEQ_MISC_TIMING_LP                           0x2a74
+#define MC_SEQ_MISC_TIMING2_LP                          0x2a78
+#define MC_SEQ_WR_CTL_D0_LP                             0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP                             0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP                           0x2a88
+
+#define MC_PMG_CMD_MRS                                  0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP                             0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP                             0x2b20
+
+#define MC_PMG_CMD_MRS1                                 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0x2b48
+#define MC_SEQ_PMG_TIMING_LP                            0x2b4c
+
+#define MC_SEQ_WR_CTL_2                                 0x2b54
+#define MC_SEQ_WR_CTL_2_LP                              0x2b58
+#define MC_PMG_CMD_MRS2                                 0x2b5c
+#define MC_SEQ_PMG_CMD_MRS2_LP                          0x2b60
+
+#define	MCLK_PWRMGT_CNTL				0x2ba0
+#       define DLL_SPEED(x)				((x) << 0)
+#       define DLL_SPEED_MASK				(0x1f << 0)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCK0_PDNB                              (1 << 8)
+#       define MRDCK1_PDNB                              (1 << 9)
+#       define MRDCK0_RESET                             (1 << 16)
+#       define MRDCK1_RESET                             (1 << 17)
+#       define DLL_READY_READ                           (1 << 24)
+#define	DLL_CNTL					0x2ba4
+#       define MRDCK0_BYPASS                            (1 << 24)
+#       define MRDCK1_BYPASS                            (1 << 25)
+
+#define	MPLL_CNTL_MODE					0x2bb0
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#define	MPLL_FUNC_CNTL					0x2bb4
+#define		BWCTRL(x)				((x) << 20)
+#define		BWCTRL_MASK				(0xff << 20)
+#define	MPLL_FUNC_CNTL_1				0x2bb8
+#define		VCO_MODE(x)				((x) << 0)
+#define		VCO_MODE_MASK				(3 << 0)
+#define		CLKFRAC(x)				((x) << 4)
+#define		CLKFRAC_MASK				(0xfff << 4)
+#define		CLKF(x)					((x) << 16)
+#define		CLKF_MASK				(0xfff << 16)
+#define	MPLL_FUNC_CNTL_2				0x2bbc
+#define	MPLL_AD_FUNC_CNTL				0x2bc0
+#define		YCLK_POST_DIV(x)			((x) << 0)
+#define		YCLK_POST_DIV_MASK			(7 << 0)
+#define	MPLL_DQ_FUNC_CNTL				0x2bc4
+#define		YCLK_SEL(x)				((x) << 4)
+#define		YCLK_SEL_MASK				(1 << 4)
+
+#define	MPLL_SS1					0x2bcc
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	MPLL_SS2					0x2bd0
+#define		CLKS(x)					((x) << 0)
+#define		CLKS_MASK				(0xfff << 0)
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define 	CLOCK_GATING_DIS			(1 << 23)
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL					0x2F4C
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+#define HDP_MEM_POWER_LS				0x2F50
+#define 	HDP_LS_ENABLE				(1 << 0)
+
+#define ATC_MISC_CG           				0x3350
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX                       0x5E00
+#       define AZ_ENDPOINT_REG_INDEX(x)                  (((x) & 0xff) << 0)
+#       define AZ_ENDPOINT_REG_WRITE_EN                  (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA                        0x5E04
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER          0x25
+#define		SPEAKER_ALLOCATION(x)			(((x) & 0x7f) << 0)
+#define		SPEAKER_ALLOCATION_MASK			(0x7f << 0)
+#define		SPEAKER_ALLOCATION_SHIFT		0
+#define		HDMI_CONNECTION				(1 << 16)
+#define		DP_CONNECTION				(1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0        0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1        0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2        0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3        0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4        0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5        0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6        0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7        0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8        0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9        0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10       0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11       0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12       0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13       0x35 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC         0x37
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR             0x38
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0               0x3a
+#       define MANUFACTURER_ID(x)                        (((x) & 0xffff) << 0)
+#       define PRODUCT_ID(x)                             (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1               0x3b
+#       define SINK_DESCRIPTION_LEN(x)                   (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2               0x3c
+#       define PORT_ID0(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3               0x3d
+#       define PORT_ID1(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4               0x3e
+#       define DESCRIPTION0(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION1(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION2(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION3(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5               0x3f
+#       define DESCRIPTION4(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION5(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION6(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION7(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6               0x40
+#       define DESCRIPTION8(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION9(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION10(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION11(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7               0x41
+#       define DESCRIPTION12(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION13(x)                          (((x) & 0xff) << 8)
+#       define DESCRIPTION14(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION15(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8               0x42
+#       define DESCRIPTION16(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION17(x)                          (((x) & 0xff) << 8)
+
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL         0x54
+#       define AUDIO_ENABLED                             (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT  0x56
+#define		PORT_CONNECTIVITY_MASK				(3 << 30)
+#define		PORT_CONNECTIVITY_SHIFT				30
+
+#define	DC_LB_MEMORY_SPLIT					0x6b0c
+#define		DC_LB_MEMORY_CONFIG(x)				((x) << 20)
+
+#define	PRIORITY_A_CNT						0x6b18
+#define		PRIORITY_MARK_MASK				0x7fff
+#define		PRIORITY_OFF					(1 << 16)
+#define		PRIORITY_ALWAYS_ON				(1 << 20)
+#define	PRIORITY_B_CNT						0x6b1c
+
+#define	DPG_PIPE_ARBITRATION_CONTROL3				0x6cc8
+#       define LATENCY_WATERMARK_MASK(x)			((x) << 16)
+#define	DPG_PIPE_LATENCY_CONTROL				0x6ccc
+#       define LATENCY_LOW_WATERMARK(x)				((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)			((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DAC_AUTODETECT_INT_CONTROL			0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL                          0x6cd4
+#       define STUTTER_ENABLE                             (1 << 0)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE                           0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL            (1 << 4)   /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE                           0x05b0
+#define DCCG_AUDIO_DTO0_MODULE                          0x05b4
+#define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+#define DCCG_AUDIO_DTO1_MODULE                          0x05c4
+
+#define DENTIST_DISPCLK_CNTL				0x0490
+#	define DENTIST_DPREFCLK_WDIVIDER(x)		(((x) & 0x7f) << 24)
+#	define DENTIST_DPREFCLK_WDIVIDER_MASK		(0x7f << 24)
+#	define DENTIST_DPREFCLK_WDIVIDER_SHIFT		24
+
+#define AFMT_AUDIO_SRC_CONTROL                          0x713c
+#define		AFMT_AUDIO_SRC_SELECT(x)		(((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+
+#define	GRBM_STATUS2					0x8008
+#define		RLC_RQ_PENDING 					(1 << 0)
+#define		RLC_BUSY 					(1 << 8)
+#define		TC_BUSY 					(1 << 9)
+
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SPI_BUSY					(1 << 22)
+#define		BCI_BUSY					(1 << 23)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_BCI_BUSY					(1 << 22)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_RLC					(1 << 2)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_BCI					(1 << 7)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SH_INDEX(x)     			((x) << 8)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		SH_BROADCAST_WRITES      		(1 << 29)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define	CP_STRMOUT_CNTL					0x84FC
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+
+#define CP_ME_CNTL					0x86D8
+#define		CP_CE_HALT					(1 << 24)
+#define		CP_PFP_HALT					(1 << 26)
+#define		CP_ME_HALT					(1 << 28)
+
+#define	CP_COHER_CNTL2					0x85E8
+
+#define	CP_RB2_RPTR					0x86f8
+#define	CP_RB1_RPTR					0x86fc
+#define	CP_RB0_RPTR					0x8700
+#define	CP_RB_WPTR_DELAY				0x8704
+
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+
+#define	CP_PERFMON_CNTL					0x87FC
+
+#define	VGT_VTX_VECT_EJECT_REG				0x88B0
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ESGS_RING_SIZE				0x88C8
+#define	VGT_GSVS_RING_SIZE				0x88CC
+
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+
+#define	VGT_PRIMITIVE_TYPE				0x8958
+#define	VGT_INDEX_TYPE					0x895C
+
+#define	VGT_NUM_INDICES					0x8970
+#define	VGT_NUM_INSTANCES				0x8974
+
+#define	VGT_TF_RING_SIZE				0x8988
+
+#define	VGT_HS_OFFCHIP_PARAM				0x89B0
+
+#define	VGT_TF_MEMORY_BASE				0x89B8
+
+#define CC_GC_SHADER_ARRAY_CONFIG			0x89bc
+#define		INACTIVE_CUS_MASK			0xFFFF0000
+#define		INACTIVE_CUS_SHIFT			16
+#define GC_USER_SHADER_ARRAY_CONFIG			0x89c0
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_FRONTEND_PRIM_FIFO_SIZE(x)			((x) << 0)
+#define		SC_BACKEND_PRIM_FIFO_SIZE(x)			((x) << 6)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 15)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 23)
+
+#define	PA_SC_ENHANCE					0x8BF0
+
+#define	SQ_CONFIG					0x8C00
+
+#define	SQC_CACHES					0x8C08
+
+#define SQ_POWER_THROTTLE                               0x8e58
+#define		MIN_POWER(x)				((x) << 0)
+#define		MIN_POWER_MASK				(0x3fff << 0)
+#define		MIN_POWER_SHIFT				0
+#define		MAX_POWER(x)				((x) << 16)
+#define		MAX_POWER_MASK				(0x3fff << 16)
+#define		MAX_POWER_SHIFT				0
+#define SQ_POWER_THROTTLE2                              0x8e5c
+#define		MAX_POWER_DELTA(x)			((x) << 0)
+#define		MAX_POWER_DELTA_MASK			(0x3fff << 0)
+#define		MAX_POWER_DELTA_SHIFT			0
+#define		STI_SIZE(x)				((x) << 16)
+#define		STI_SIZE_MASK				(0x3ff << 16)
+#define		STI_SIZE_SHIFT				16
+#define		LTI_RATIO(x)				((x) << 27)
+#define		LTI_RATIO_MASK				(0xf << 27)
+#define		LTI_RATIO_SHIFT				27
+
+#define	SX_DEBUG_1					0x9060
+
+#define	SPI_STATIC_THREAD_MGMT_1			0x90E0
+#define	SPI_STATIC_THREAD_MGMT_2			0x90E4
+#define	SPI_STATIC_THREAD_MGMT_3			0x90E8
+#define	SPI_PS_MAX_WAVE_ID				0x90EC
+
+#define	SPI_CONFIG_CNTL					0x9100
+
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_TCC_DISABLE				0x914C
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+#define	CGTS_SM_CTRL_REG				0x9150
+#define		OVERRIDE				(1 << 21)
+#define		LS_OVERRIDE				(1 << 22)
+
+#define	SPI_LB_CU_MASK					0x9354
+
+#define	TA_CNTL_AUX					0x9508
+#define	TA_CS_BC_BASE_ADDR				0x950C
+
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+
+#define	GB_TILE_MODE0					0x9910
+#       define MICRO_TILE_MODE(x)				((x) << 0)
+#              define	ADDR_SURF_DISPLAY_MICRO_TILING		0
+#              define	ADDR_SURF_THIN_MICRO_TILING		1
+#              define	ADDR_SURF_DEPTH_MICRO_TILING		2
+#       define ARRAY_MODE(x)					((x) << 2)
+#              define	ARRAY_LINEAR_GENERAL			0
+#              define	ARRAY_LINEAR_ALIGNED			1
+#              define	ARRAY_1D_TILED_THIN1			2
+#              define	ARRAY_2D_TILED_THIN1			4
+#       define PIPE_CONFIG(x)					((x) << 6)
+#              define	ADDR_SURF_P2				0
+#              define	ADDR_SURF_P4_8x16			4
+#              define	ADDR_SURF_P4_16x16			5
+#              define	ADDR_SURF_P4_16x32			6
+#              define	ADDR_SURF_P4_32x32			7
+#              define	ADDR_SURF_P8_16x16_8x16			8
+#              define	ADDR_SURF_P8_16x32_8x16			9
+#              define	ADDR_SURF_P8_32x32_8x16			10
+#              define	ADDR_SURF_P8_16x32_16x16		11
+#              define	ADDR_SURF_P8_32x32_16x16		12
+#              define	ADDR_SURF_P8_32x32_16x32		13
+#              define	ADDR_SURF_P8_32x64_32x32		14
+#       define TILE_SPLIT(x)					((x) << 11)
+#              define	ADDR_SURF_TILE_SPLIT_64B		0
+#              define	ADDR_SURF_TILE_SPLIT_128B		1
+#              define	ADDR_SURF_TILE_SPLIT_256B		2
+#              define	ADDR_SURF_TILE_SPLIT_512B		3
+#              define	ADDR_SURF_TILE_SPLIT_1KB		4
+#              define	ADDR_SURF_TILE_SPLIT_2KB		5
+#              define	ADDR_SURF_TILE_SPLIT_4KB		6
+#       define BANK_WIDTH(x)					((x) << 14)
+#              define	ADDR_SURF_BANK_WIDTH_1			0
+#              define	ADDR_SURF_BANK_WIDTH_2			1
+#              define	ADDR_SURF_BANK_WIDTH_4			2
+#              define	ADDR_SURF_BANK_WIDTH_8			3
+#       define BANK_HEIGHT(x)					((x) << 16)
+#              define	ADDR_SURF_BANK_HEIGHT_1			0
+#              define	ADDR_SURF_BANK_HEIGHT_2			1
+#              define	ADDR_SURF_BANK_HEIGHT_4			2
+#              define	ADDR_SURF_BANK_HEIGHT_8			3
+#       define MACRO_TILE_ASPECT(x)				((x) << 18)
+#              define	ADDR_SURF_MACRO_ASPECT_1		0
+#              define	ADDR_SURF_MACRO_ASPECT_2		1
+#              define	ADDR_SURF_MACRO_ASPECT_4		2
+#              define	ADDR_SURF_MACRO_ASPECT_8		3
+#       define NUM_BANKS(x)					((x) << 20)
+#              define	ADDR_SURF_2_BANK			0
+#              define	ADDR_SURF_4_BANK			1
+#              define	ADDR_SURF_8_BANK			2
+#              define	ADDR_SURF_16_BANK			3
+
+#define	CB_PERFCOUNTER0_SELECT0				0x9a20
+#define	CB_PERFCOUNTER0_SELECT1				0x9a24
+#define	CB_PERFCOUNTER1_SELECT0				0x9a28
+#define	CB_PERFCOUNTER1_SELECT1				0x9a2c
+#define	CB_PERFCOUNTER2_SELECT0				0x9a30
+#define	CB_PERFCOUNTER2_SELECT1				0x9a34
+#define	CB_PERFCOUNTER3_SELECT0				0x9a38
+#define	CB_PERFCOUNTER3_SELECT1				0x9a3c
+
+#define	CB_CGTT_SCLK_CTRL				0x9a60
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	TCP_CHAN_STEER_LO				0xac0c
+#define	TCP_CHAN_STEER_HI				0xac10
+
+#define	CP_RB0_BASE					0xC100
+#define	CP_RB0_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+
+#define	CP_RB0_RPTR_ADDR				0xC10C
+#define	CP_RB0_RPTR_ADDR_HI				0xC110
+#define	CP_RB0_WPTR					0xC114
+
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define	CP_ME_RAM_DATA					0xC160
+
+#define	CP_CE_UCODE_ADDR				0xC168
+#define	CP_CE_UCODE_DATA				0xC16C
+
+#define	CP_RB1_BASE					0xC180
+#define	CP_RB1_CNTL					0xC184
+#define	CP_RB1_RPTR_ADDR				0xC188
+#define	CP_RB1_RPTR_ADDR_HI				0xC18C
+#define	CP_RB1_WPTR					0xC190
+#define	CP_RB2_BASE					0xC194
+#define	CP_RB2_CNTL					0xC198
+#define	CP_RB2_RPTR_ADDR				0xC19C
+#define	CP_RB2_RPTR_ADDR_HI				0xC1A0
+#define	CP_RB2_WPTR					0xC1A4
+#define CP_INT_CNTL_RING0                               0xC1A8
+#define CP_INT_CNTL_RING1                               0xC1AC
+#define CP_INT_CNTL_RING2                               0xC1B0
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
+#define CP_INT_STATUS_RING0                             0xC1B4
+#define CP_INT_STATUS_RING1                             0xC1B8
+#define CP_INT_STATUS_RING2                             0xC1BC
+#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1 << 31)
+
+#define	CP_MEM_SLP_CNTL					0xC1E4
+#       define CP_MEM_LS_EN                             (1 << 0)
+
+#define	CP_DEBUG					0xC1FC
+
+#define RLC_CNTL                                          0xC300
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_RL_BASE                                       0xC304
+#define RLC_RL_SIZE                                       0xC308
+#define RLC_LB_CNTL                                       0xC30C
+#       define LOAD_BALANCE_ENABLE                        (1 << 0)
+#define RLC_SAVE_AND_RESTORE_BASE                         0xC310
+#define RLC_LB_CNTR_MAX                                   0xC314
+#define RLC_LB_CNTR_INIT                                  0xC318
+
+#define RLC_CLEAR_STATE_RESTORE_BASE                      0xC320
+
+#define RLC_UCODE_ADDR                                    0xC32C
+#define RLC_UCODE_DATA                                    0xC330
+
+#define RLC_GPU_CLOCK_COUNT_LSB                           0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB                           0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0xC340
+#define RLC_MC_CNTL                                       0xC344
+#define RLC_UCODE_CNTL                                    0xC348
+#define RLC_STAT                                          0xC34C
+#       define RLC_BUSY_STATUS                            (1 << 0)
+#       define GFX_POWER_STATUS                           (1 << 1)
+#       define GFX_CLOCK_STATUS                           (1 << 2)
+#       define GFX_LS_STATUS                              (1 << 3)
+
+#define	RLC_PG_CNTL					0xC35C
+#	define GFX_PG_ENABLE				(1 << 0)
+#	define GFX_PG_SRC				(1 << 1)
+
+#define	RLC_CGTT_MGCG_OVERRIDE				0xC400
+#define	RLC_CGCG_CGLS_CTRL				0xC404
+#	define CGCG_EN					(1 << 0)
+#	define CGLS_EN					(1 << 1)
+
+#define	RLC_TTOP_D					0xC414
+#	define RLC_PUD(x)				((x) << 0)
+#	define RLC_PUD_MASK				(0xff << 0)
+#	define RLC_PDD(x)				((x) << 8)
+#	define RLC_PDD_MASK				(0xff << 8)
+#	define RLC_TTPD(x)				((x) << 16)
+#	define RLC_TTPD_MASK				(0xff << 16)
+#	define RLC_MSD(x)				((x) << 24)
+#	define RLC_MSD_MASK				(0xff << 24)
+
+#define RLC_LB_INIT_CU_MASK                               0xC41C
+
+#define	RLC_PG_AO_CU_MASK				0xC42C
+#define	RLC_MAX_PG_CU					0xC430
+#	define MAX_PU_CU(x)				((x) << 0)
+#	define MAX_PU_CU_MASK				(0xff << 0)
+#define	RLC_AUTO_PG_CTRL				0xC434
+#	define AUTO_PG_EN				(1 << 0)
+#	define GRBM_REG_SGIT(x)				((x) << 3)
+#	define GRBM_REG_SGIT_MASK			(0xffff << 3)
+#	define PG_AFTER_GRBM_REG_ST(x)			((x) << 19)
+#	define PG_AFTER_GRBM_REG_ST_MASK		(0x1fff << 19)
+
+#define RLC_SERDES_WR_MASTER_MASK_0                       0xC454
+#define RLC_SERDES_WR_MASTER_MASK_1                       0xC458
+#define RLC_SERDES_WR_CTRL                                0xC45C
+
+#define RLC_SERDES_MASTER_BUSY_0                          0xC464
+#define RLC_SERDES_MASTER_BUSY_1                          0xC468
+
+#define RLC_GCPM_GENERAL_3                                0xC478
+
+#define	DB_RENDER_CONTROL				0x28000
+
+#define DB_DEPTH_INFO                                   0x2803c
+
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+
+/* PIF PHY0 registers idx/data 0x8/0xc */
+#define PB0_PIF_CNTL                                      0x10
+#       define LS2_EXIT_TIME(x)                           ((x) << 17)
+#       define LS2_EXIT_TIME_MASK                         (0x7 << 17)
+#       define LS2_EXIT_TIME_SHIFT                        17
+#define PB0_PIF_PAIRING                                   0x11
+#       define MULTI_PIF                                  (1 << 25)
+#define PB0_PIF_PWRDOWN_0                                 0x12
+#       define PLL_POWER_STATE_IN_TXS2_0(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_0(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_SHIFT             10
+#       define PLL_RAMP_UP_TIME_0(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_0_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_0_SHIFT                   24
+#define PB0_PIF_PWRDOWN_1                                 0x13
+#       define PLL_POWER_STATE_IN_TXS2_1(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_1(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_SHIFT             10
+#       define PLL_RAMP_UP_TIME_1(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_1_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_1_SHIFT                   24
+
+#define PB0_PIF_PWRDOWN_2                                 0x17
+#       define PLL_POWER_STATE_IN_TXS2_2(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_2(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_SHIFT             10
+#       define PLL_RAMP_UP_TIME_2(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_2_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_2_SHIFT                   24
+#define PB0_PIF_PWRDOWN_3                                 0x18
+#       define PLL_POWER_STATE_IN_TXS2_3(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_3(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_SHIFT             10
+#       define PLL_RAMP_UP_TIME_3(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_3_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_3_SHIFT                   24
+/* PIF PHY1 registers idx/data 0x10/0x14 */
+#define PB1_PIF_CNTL                                      0x10
+#define PB1_PIF_PAIRING                                   0x11
+#define PB1_PIF_PWRDOWN_0                                 0x12
+#define PB1_PIF_PWRDOWN_1                                 0x13
+
+#define PB1_PIF_PWRDOWN_2                                 0x17
+#define PB1_PIF_PWRDOWN_3                                 0x18
+/* PCIE registers idx/data 0x30/0x34 */
+#define PCIE_CNTL2                                        0x1c /* PCIE */
+#       define SLV_MEM_LS_EN                              (1 << 16)
+#       define SLV_MEM_AGGRESSIVE_LS_EN                   (1 << 17)
+#       define MST_MEM_LS_EN                              (1 << 18)
+#       define REPLAY_MEM_LS_EN                           (1 << 19)
+#define PCIE_LC_STATUS1                                   0x28 /* PCIE */
+#       define LC_REVERSE_RCVR                            (1 << 0)
+#       define LC_REVERSE_XMIT                            (1 << 1)
+#       define LC_OPERATING_LINK_WIDTH_MASK               (0x7 << 2)
+#       define LC_OPERATING_LINK_WIDTH_SHIFT              2
+#       define LC_DETECTED_LINK_WIDTH_MASK                (0x7 << 5)
+#       define LC_DETECTED_LINK_WIDTH_SHIFT               5
+
+#define PCIE_P_CNTL                                       0x40 /* PCIE */
+#       define P_IGNORE_EDB_ERR                           (1 << 6)
+
+/* PCIE PORT registers idx/data 0x38/0x3c */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#       define LC_DYN_LANES_PWR_STATE(x)                  ((x) << 21)
+#       define LC_DYN_LANES_PWR_STATE_MASK                (0x3 << 21)
+#       define LC_DYN_LANES_PWR_STATE_SHIFT               21
+#define PCIE_LC_N_FTS_CNTL                                0xa3 /* PCIE_P */
+#       define LC_XMIT_N_FTS(x)                           ((x) << 0)
+#       define LC_XMIT_N_FTS_MASK                         (0xff << 0)
+#       define LC_XMIT_N_FTS_SHIFT                        0
+#       define LC_XMIT_N_FTS_OVERRIDE_EN                  (1 << 8)
+#       define LC_N_FTS_MASK                              (0xff << 24)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_GEN3_EN_STRAP                           (1 << 1)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 2)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_MASK         (0x3 << 3)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT        3
+#       define LC_FORCE_EN_SW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_SW_SPEED_CHANGE               (1 << 6)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 7)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 8)
+#       define LC_INITIATE_LINK_SPEED_CHANGE              (1 << 9)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 10)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     10
+#       define LC_CURRENT_DATA_RATE_MASK                  (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+#       define LC_CURRENT_DATA_RATE_SHIFT                 13
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 16)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 18)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 19)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN3               (1 << 20)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN3                (1 << 21)
+
+#define PCIE_LC_CNTL2                                     0xb1
+#       define LC_ALLOW_PDWN_IN_L1                        (1 << 17)
+#       define LC_ALLOW_PDWN_IN_L23                       (1 << 18)
+
+#define PCIE_LC_CNTL3                                     0xb5 /* PCIE_P */
+#       define LC_GO_TO_RECOVERY                          (1 << 30)
+#define PCIE_LC_CNTL4                                     0xb6 /* PCIE_P */
+#       define LC_REDO_EQ                                 (1 << 5)
+#       define LC_SET_QUIESCE                             (1 << 13)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG				0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_NO_OP					0xEFFC
+#define UVD_RBC_RB_RPTR					0xF690
+#define UVD_RBC_RB_WPTR					0xF694
+#define UVD_STATUS					0xf6bc
+
+#define	UVD_CGC_CTRL					0xF4B0
+#	define DCM					(1 << 0)
+#	define CG_DT(x)					((x) << 2)
+#	define CG_DT_MASK				(0xf << 2)
+#	define CLK_OD(x)				((x) << 6)
+#	define CLK_OD_MASK				(0x1f << 6)
+
+ /* UVD CTX indirect */
+#define	UVD_CGC_MEM_CTRL				0xC0
+#define	UVD_CGC_CTRL2					0xC1
+#	define DYN_OR_EN				(1 << 0)
+#	define DYN_RR_EN				(1 << 1)
+#	define G_DIV_ID(x)				((x) << 2)
+#	define G_DIV_ID_MASK				(0x7 << 2)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define			GDS_PARTITION_BASE		2
+#define			CE_PARTITION_BASE		3
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_ALLOC_GDS				0x1B
+#define	PACKET3_WRITE_GDS_RAM				0x1C
+#define	PACKET3_ATOMIC_GDS				0x1D
+#define	PACKET3_ATOMIC					0x1E
+#define	PACKET3_OCCLUSION_QUERY				0x1F
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER_CONST			0x31
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - tc/l2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
+#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define		WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#define		WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+		 * 1 - mem
+		 */
+#define		WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+		 * 1 - pfp
+		 */
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - EOP events
+		 * 6 - EOS events
+		 * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+		 */
+#define		INV_L2                                  (1 << 20)
+                /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_LOAD_CONFIG_REG				0x5F
+#define	PACKET3_LOAD_CONTEXT_REG			0x60
+#define	PACKET3_LOAD_SH_REG				0x61
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000b000
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_SH_REG				0x76
+#define		PACKET3_SET_SH_REG_START			0x0000b000
+#define		PACKET3_SET_SH_REG_END				0x0000c000
+#define	PACKET3_SET_SH_REG_OFFSET			0x77
+#define	PACKET3_ME_WRITE				0x7A
+#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
+#define	PACKET3_SCRATCH_RAM_READ			0x7E
+#define	PACKET3_CE_WRITE				0x7F
+#define	PACKET3_LOAD_CONST_RAM				0x80
+#define	PACKET3_WRITE_CONST_RAM				0x81
+#define	PACKET3_WRITE_CONST_RAM_OFFSET			0x82
+#define	PACKET3_DUMP_CONST_RAM				0x83
+#define	PACKET3_INCREMENT_CE_COUNTER			0x84
+#define	PACKET3_INCREMENT_DE_COUNTER			0x85
+#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
+#define	PACKET3_WAIT_ON_DE_COUNTER			0x87
+#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
+#define	PACKET3_SET_CE_DE_COUNTERS			0x89
+#define	PACKET3_WAIT_ON_AVAIL_BUFFER			0x8A
+#define	PACKET3_SWITCH_BUFFER				0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_TILING_CONFIG  				  0xd0b8
+
+#define	DMA_POWER_CNTL					0xd0bc
+#       define MEM_POWER_OVERRIDE                       (1 << 8)
+#define	DMA_CLK_CTRL					0xd0c0
+
+#define	DMA_PG						0xd0d4
+#	define PG_CNTL_ENABLE				(1 << 0)
+#define	DMA_PGFSM_CONFIG				0xd0d8
+#define	DMA_PGFSM_WRITE					0xd0dc
+
+#define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((b) & 0x1) << 26) |		\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_POLL_REG_MEM				  0xe
+#define	DMA_PACKET_NOP					  0xf
+
+#define VCE_STATUS					0x20004
+#define VCE_VCPU_CNTL					0x20014
+#define		VCE_CLK_EN				(1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0				0x20024
+#define VCE_VCPU_CACHE_SIZE0				0x20028
+#define VCE_VCPU_CACHE_OFFSET1				0x2002c
+#define VCE_VCPU_CACHE_SIZE1				0x20030
+#define VCE_VCPU_CACHE_OFFSET2				0x20034
+#define VCE_VCPU_CACHE_SIZE2				0x20038
+#define VCE_VCPU_SCRATCH7				0x200dc
+#define VCE_SOFT_RESET					0x20120
+#define 	VCE_ECPU_SOFT_RESET			(1 << 0)
+#define 	VCE_FME_SOFT_RESET			(1 << 2)
+#define VCE_RB_BASE_LO2					0x2016c
+#define VCE_RB_BASE_HI2					0x20170
+#define VCE_RB_SIZE2					0x20174
+#define VCE_RB_RPTR2					0x20178
+#define VCE_RB_WPTR2					0x2017c
+#define VCE_RB_BASE_LO					0x20180
+#define VCE_RB_BASE_HI					0x20184
+#define VCE_RB_SIZE					0x20188
+#define VCE_RB_RPTR					0x2018c
+#define VCE_RB_WPTR					0x20190
+#define VCE_CLOCK_GATING_A				0x202f8
+#	define CGC_DYN_CLOCK_MODE			(1 << 16)
+#define VCE_CLOCK_GATING_B				0x202fc
+#define VCE_UENC_CLOCK_GATING				0x205bc
+#define VCE_UENC_REG_CLOCK_GATING			0x205c0
+#define VCE_FW_REG_STATUS				0x20e10
+#	define VCE_FW_REG_STATUS_BUSY			(1 << 0)
+#	define VCE_FW_REG_STATUS_PASS			(1 << 3)
+#	define VCE_FW_REG_STATUS_DONE			(1 << 11)
+#define VCE_LMI_FW_START_KEYSEL				0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL			0x20e20
+#define VCE_LMI_CTRL2					0x20e74
+#define VCE_LMI_CTRL					0x20e98
+#define VCE_LMI_VM_CTRL					0x20ea0
+#define VCE_LMI_SWAP_CNTL				0x20eb4
+#define VCE_LMI_SWAP_CNTL1				0x20eb8
+#define VCE_LMI_CACHE_CTRL				0x20ef4
+
+#define VCE_CMD_NO_OP					0x00000000
+#define VCE_CMD_END					0x00000001
+#define VCE_CMD_IB					0x00000002
+#define VCE_CMD_FENCE					0x00000003
+#define VCE_CMD_TRAP					0x00000004
+#define VCE_CMD_IB_AUTO					0x00000005
+#define VCE_CMD_SEMAPHORE				0x00000006
+
+/* discrete vce clocks */
+#define	CG_VCEPLL_FUNC_CNTL				0xc0030600
+#	define VCEPLL_RESET_MASK			0x00000001
+#	define VCEPLL_SLEEP_MASK			0x00000002
+#	define VCEPLL_BYPASS_EN_MASK			0x00000004
+#	define VCEPLL_CTLREQ_MASK			0x00000008
+#	define VCEPLL_VCO_MODE_MASK			0x00000600
+#	define VCEPLL_REF_DIV_MASK			0x003F0000
+#	define VCEPLL_CTLACK_MASK			0x40000000
+#	define VCEPLL_CTLACK2_MASK			0x80000000
+#define	CG_VCEPLL_FUNC_CNTL_2				0xc0030601
+#	define VCEPLL_PDIV_A(x)				((x) << 0)
+#	define VCEPLL_PDIV_A_MASK			0x0000007F
+#	define VCEPLL_PDIV_B(x)				((x) << 8)
+#	define VCEPLL_PDIV_B_MASK			0x00007F00
+#	define EVCLK_SRC_SEL(x)				((x) << 20)
+#	define EVCLK_SRC_SEL_MASK			0x01F00000
+#	define ECCLK_SRC_SEL(x)				((x) << 25)
+#	define ECCLK_SRC_SEL_MASK			0x3E000000
+#define	CG_VCEPLL_FUNC_CNTL_3				0xc0030602
+#	define VCEPLL_FB_DIV(x)				((x) << 0)
+#	define VCEPLL_FB_DIV_MASK			0x01FFFFFF
+#define	CG_VCEPLL_FUNC_CNTL_4				0xc0030603
+#define	CG_VCEPLL_FUNC_CNTL_5				0xc0030604
+#define	CG_VCEPLL_SPREAD_SPECTRUM			0xc0030606
+#	define VCEPLL_SSEN_MASK				0x00000001
+
+#endif
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
new file mode 100644
index 0000000..966e3a5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SISLANDS_SMC_H
+#define PP_SISLANDS_SMC_H
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_SIslands_Dpm2PerfLevel
+{
+    uint8_t MaxPS;
+    uint8_t TgtAct;
+    uint8_t MaxPS_StepInc;
+    uint8_t MaxPS_StepDec;
+    uint8_t PSSamplingTime;
+    uint8_t NearTDPDec;
+    uint8_t AboveSafeInc;
+    uint8_t BelowSafeInc;
+    uint8_t PSDeltaLimit;
+    uint8_t PSDeltaWin;
+    uint16_t PwrEfficiencyRatio;
+    uint8_t Reserved[4];
+};
+
+typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
+
+struct PP_SIslands_DPM2Status
+{
+    uint32_t    dpm2Flags;
+    uint8_t     CurrPSkip;
+    uint8_t     CurrPSkipPowerShift;
+    uint8_t     CurrPSkipTDP;
+    uint8_t     CurrPSkipOCP;
+    uint8_t     MaxSPLLIndex;
+    uint8_t     MinSPLLIndex;
+    uint8_t     CurrSPLLIndex;
+    uint8_t     InfSweepMode;
+    uint8_t     InfSweepDir;
+    uint8_t     TDPexceeded;
+    uint8_t     reserved;
+    uint8_t     SwitchDownThreshold;
+    uint32_t    SwitchDownCounter;
+    uint32_t    SysScalingFactor;
+};
+
+typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
+
+struct PP_SIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+    uint32_t    MinLimitDelta;
+};
+typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
+
+struct PP_SIslands_PAPMStatus
+{
+    uint32_t    EstimatedDGPU_T;
+    uint32_t    EstimatedDGPU_P;
+    uint32_t    EstimatedAPU_T;
+    uint32_t    EstimatedAPU_P;
+    uint8_t     dGPU_T_Limit_Exceeded;
+    uint8_t     reserved[3];
+};
+typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
+
+struct PP_SIslands_PAPMParameters
+{
+    uint32_t    NearTDPLimitTherm;
+    uint32_t    NearTDPLimitPAPM;
+    uint32_t    PlatformPowerLimit;
+    uint32_t    dGPU_T_Limit;
+    uint32_t    dGPU_T_Warning;
+    uint32_t    dGPU_T_Hysteresis;
+};
+typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
+
+struct SISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t    vCG_SPLL_FUNC_CNTL;
+    uint32_t    vCG_SPLL_FUNC_CNTL_2;
+    uint32_t    vCG_SPLL_FUNC_CNTL_3;
+    uint32_t    vCG_SPLL_FUNC_CNTL_4;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t    sclk_value;
+};
+
+typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
+
+struct SISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t    vMPLL_FUNC_CNTL;
+    uint32_t    vMPLL_FUNC_CNTL_1;
+    uint32_t    vMPLL_FUNC_CNTL_2;
+    uint32_t    vMPLL_AD_FUNC_CNTL;
+    uint32_t    vMPLL_DQ_FUNC_CNTL;
+    uint32_t    vMCLK_PWRMGT_CNTL;
+    uint32_t    vDLL_CNTL;
+    uint32_t    vMPLL_SS;
+    uint32_t    vMPLL_SS2;
+    uint32_t    mclk_value;
+};
+
+typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
+
+struct SISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t    value;
+    uint8_t     index;
+    uint8_t     phase_settings;
+};
+
+typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
+
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     UVDWatermark;
+    uint8_t                     VCEWatermark;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint8_t                     padding;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    SISLANDS_SMC_SCLK_VALUE     sclk;
+    SISLANDS_SMC_MCLK_VALUE     mclk;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    SISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint8_t                     hysteresisUp;
+    uint8_t                     hysteresisDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    MaxPoweredUpCU;
+    SISLANDS_SMC_VOLTAGE_VALUE  high_temp_vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  low_temp_vddc;
+    uint32_t                    reserved[2];
+    PP_SIslands_Dpm2PerfLevel   dpm2;
+};
+
+#define SISLANDS_SMC_STROBE_RATIO    0x0F
+#define SISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define SISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define SISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define SISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define SISLANDS_SMC_MC_STUTTER_EN   0x08
+#define SISLANDS_SMC_MC_PG_EN        0x10
+
+typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct SISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
+#define SISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+struct SISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define SISLANDS_MAX_NO_VREG_STEPS 32
+
+struct SISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint32_t                            lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+    SISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    SISLANDS_SMC_VOLTAGEMASKTABLE       phaseMaskTable;
+    PP_SIslands_DPM2Parameters          dpm2Params;
+    SISLANDS_SMC_SWSTATE                initialState;
+    SISLANDS_SMC_SWSTATE                ACPIState;
+    SISLANDS_SMC_SWSTATE                ULVState;
+    SISLANDS_SMC_SWSTATE                driverState;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
+
+#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout         0x0
+#define SI_SMC_SOFT_REGISTER_delay_vreg               0xC
+#define SI_SMC_SOFT_REGISTER_delay_acpi               0x28
+#define SI_SMC_SOFT_REGISTER_seq_index                0x5C
+#define SI_SMC_SOFT_REGISTER_mvdd_chg_time            0x60
+#define SI_SMC_SOFT_REGISTER_mclk_switch_lim          0x70
+#define SI_SMC_SOFT_REGISTER_watermark_threshold      0x78
+#define SI_SMC_SOFT_REGISTER_phase_shedding_delay     0x88
+#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay    0x8C
+#define SI_SMC_SOFT_REGISTER_mc_block_delay           0x98
+#define SI_SMC_SOFT_REGISTER_ticks_per_us             0xA8
+#define SI_SMC_SOFT_REGISTER_crtc_index               0xC4
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
+#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width  0xF4
+#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen   0xFC
+#define SI_SMC_SOFT_REGISTER_vr_hot_gpio              0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type     0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd   0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc   0x120
+
+struct PP_SIslands_FanTable
+{
+	uint8_t  fdo_mode;
+	uint8_t  padding;
+	int16_t  temp_min;
+	int16_t  temp_med;
+	int16_t  temp_max;
+	int16_t  slope1;
+	int16_t  slope2;
+	int16_t  fdo_min;
+	int16_t  hys_up;
+	int16_t  hys_down;
+	int16_t  hys_slope;
+	int16_t  temp_resp_lim;
+	int16_t  temp_curr;
+	int16_t  slope_curr;
+	int16_t  pwm_curr;
+	uint32_t refresh_period;
+	int16_t  fdo_max;
+	uint8_t  temp_src;
+	int8_t  padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
+
+#define SMC_SISLANDS_SCALE_I  7
+#define SMC_SISLANDS_SCALE_R 12
+
+struct PP_SIslands_CacConfig
+{
+    uint16_t   cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+    uint32_t   lkge_lut_V0;
+    uint32_t   lkge_lut_Vstep;
+    uint32_t   WinTime;
+    uint32_t   R_LL;
+    uint32_t   calculation_repeats;
+    uint32_t   l2numWin_TDP;
+    uint32_t   dc_cac;
+    uint8_t    lts_truncate_n;
+    uint8_t    SHIFT_N;
+    uint8_t    log2_PG_LKG_SCALE;
+    uint8_t    cac_temp;
+    uint32_t   lkge_lut_T0;
+    uint32_t   lkge_lut_Tstep;
+};
+
+typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
+
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_SIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
+
+struct SMC_SIslands_MCRegisterSet
+{
+    uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
+
+struct SMC_SIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_SIslands_MCRegisterAddress      address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_SIslands_MCRegisterSet          data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
+
+struct SMC_SIslands_MCArbDramTimingRegisterSet
+{
+    uint32_t mc_arb_dram_timing;
+    uint32_t mc_arb_dram_timing2;
+    uint8_t  mc_arb_rfsh_rate;
+    uint8_t  mc_arb_burst_time;
+    uint8_t  padding[2];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_SIslands_MCArbDramTimingRegisters
+{
+    uint8_t                                     arb_current;
+    uint8_t                                     reserved[3];
+    SMC_SIslands_MCArbDramTimingRegisterSet     data[16];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
+
+struct SMC_SISLANDS_SPLL_DIV_TABLE
+{
+    uint32_t    freq[256];
+    uint32_t    ss[256];
+};
+
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK  0x01ffffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK   0xfe000000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT  25
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK   0x000fffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT  0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK   0xfff00000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT  20
+
+typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
+
+#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
+
+#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
+
+struct Smc_SIslands_DTE_Configuration
+{
+    uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t K;
+    uint32_t T0;
+    uint32_t MaxT;
+    uint8_t  WindowSize;
+    uint8_t  Tdep_count;
+    uint8_t  temp_select;
+    uint8_t  DTE_mode;
+    uint8_t  T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tthreshold;
+};
+
+typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
+
+#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_version                   0x0
+#define SISLANDS_SMC_FIRMWARE_HEADER_flags                     0x4
+#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters             0xC
+#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable                0x10
+#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable                  0x14
+#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable            0x18
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable           0x24
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
+#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable                 0x38
+#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration          0x40
+#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters            0x48
+
+#pragma pack(pop)
+
+int si_copy_bytes_to_smc(struct radeon_device *rdev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit);
+void si_start_smc(struct radeon_device *rdev);
+void si_reset_smc(struct radeon_device *rdev);
+int si_program_jump_on_start(struct radeon_device *rdev);
+void si_stop_smc_clock(struct radeon_device *rdev);
+void si_start_smc_clock(struct radeon_device *rdev);
+bool si_is_smc_running(struct radeon_device *rdev);
+PPSMC_Result si_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
+PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev);
+int si_load_smc_ucode(struct radeon_device *rdev, u32 limit);
+int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+			   u32 *value, u32 limit);
+int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+			    u32 value, u32 limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
new file mode 100644
index 0000000..75a380a1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_H
+#define SMU7_H
+
+#pragma pack(push, 1)
+
+#define SMU7_CONTEXT_ID_SMC        1
+#define SMU7_CONTEXT_ID_VBIOS      2
+
+
+#define SMU7_CONTEXT_ID_SMC        1
+#define SMU7_CONTEXT_ID_VBIOS      2
+
+#define SMU7_MAX_LEVELS_VDDC            8
+#define SMU7_MAX_LEVELS_VDDCI           4
+#define SMU7_MAX_LEVELS_MVDD            4
+#define SMU7_MAX_LEVELS_VDDNB           8
+
+#define SMU7_MAX_LEVELS_GRAPHICS        SMU__NUM_SCLK_DPM_STATE   // SCLK + SQ DPM + ULV
+#define SMU7_MAX_LEVELS_MEMORY          SMU__NUM_MCLK_DPM_LEVELS   // MCLK Levels DPM
+#define SMU7_MAX_LEVELS_GIO             SMU__NUM_LCLK_DPM_LEVELS  // LCLK Levels
+#define SMU7_MAX_LEVELS_LINK            SMU__NUM_PCIE_DPM_LEVELS  // PCIe speed and number of lanes.
+#define SMU7_MAX_LEVELS_UVD             8   // VCLK/DCLK levels for UVD.
+#define SMU7_MAX_LEVELS_VCE             8   // ECLK levels for VCE.
+#define SMU7_MAX_LEVELS_ACP             8   // ACLK levels for ACP.
+#define SMU7_MAX_LEVELS_SAMU            8   // SAMCLK levels for SAMU.
+#define SMU7_MAX_ENTRIES_SMIO           32  // Number of entries in SMIO table.
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL    0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL      0
+
+#define GPIO_CLAMP_MODE_VRHOT      1
+#define GPIO_CLAMP_MODE_THERM      2
+#define GPIO_CLAMP_MODE_DC         4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT  6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK   (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT  9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK   (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT  12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK   (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT  15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK   (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT  18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK   (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT  21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK   (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+struct SMU7_PIDController
+{
+    uint32_t Ki;
+    int32_t LFWindupUL;
+    int32_t LFWindupLL;
+    uint32_t StatePrecision;
+    uint32_t LfPrecision;
+    uint32_t LfOffset;
+    uint32_t MaxState;
+    uint32_t MaxLfFraction;
+    uint32_t StateShift;
+};
+
+typedef struct SMU7_PIDController SMU7_PIDController;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+#define SMU7_SCLK_DPM_CONFIG_MASK                        0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK              0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK              0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK                        0x08
+#define SMU7_UVD_DPM_CONFIG_MASK                         0x10
+#define SMU7_VCE_DPM_CONFIG_MASK                         0x20
+#define SMU7_ACP_DPM_CONFIG_MASK                         0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK                        0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK                    0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE                  0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE                  0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE                  0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE                  0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE                  0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE                  0x00020000
+
+struct SMU7_Firmware_Header
+{
+    uint32_t Digest[5];
+    uint32_t Version;
+    uint32_t HeaderSize;
+    uint32_t Flags;
+    uint32_t EntryPoint;
+    uint32_t CodeSize;
+    uint32_t ImageSize;
+
+    uint32_t Rtos;
+    uint32_t SoftRegisters;
+    uint32_t DpmTable;
+    uint32_t FanTable;
+    uint32_t CacConfigTable;
+    uint32_t CacStatusTable;
+
+    uint32_t mcRegisterTable;
+
+    uint32_t mcArbDramTimingTable;
+
+    uint32_t PmFuseTable;
+    uint32_t Globals;
+    uint32_t Reserved[42];
+    uint32_t Signature;
+};
+
+typedef struct SMU7_Firmware_Header SMU7_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum  DisplayConfig {
+    PowerDown = 1,
+    DP54x4,
+    DP54x2,
+    DP54x1,
+    DP27x4,
+    DP27x2,
+    DP27x1,
+    HDMI297,
+    HDMI162,
+    LVDS,
+    DP324x4,
+    DP324x2,
+    DP324x1
+};
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
new file mode 100644
index 0000000..0b0b404
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -0,0 +1,514 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_DISCRETE_H
+#define SMU7_DISCRETE_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 3
+#define SMU7_DTE_SINKS 1
+#define SMU7_NUM_CPU_TES 0
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+struct SMU7_SoftRegisters
+{
+    uint32_t        RefClockFrequency;
+    uint32_t        PmTimerP;
+    uint32_t        FeatureEnables;
+    uint32_t        PreVBlankGap;
+    uint32_t        VBlankTimeout;
+    uint32_t        TrainTimeGap;
+
+    uint32_t        MvddSwitchTime;
+    uint32_t        LongestAcpiTrainTime;
+    uint32_t        AcpiDelay;
+    uint32_t        G5TrainTime;
+    uint32_t        DelayMpllPwron;
+    uint32_t        VoltageChangeTimeout;
+    uint32_t        HandshakeDisables;
+
+    uint8_t         DisplayPhy1Config;
+    uint8_t         DisplayPhy2Config;
+    uint8_t         DisplayPhy3Config;
+    uint8_t         DisplayPhy4Config;
+
+    uint8_t         DisplayPhy5Config;
+    uint8_t         DisplayPhy6Config;
+    uint8_t         DisplayPhy7Config;
+    uint8_t         DisplayPhy8Config;
+
+    uint32_t        AverageGraphicsA;
+    uint32_t        AverageMemoryA;
+    uint32_t        AverageGioA;
+
+    uint8_t         SClkDpmEnabledLevels;
+    uint8_t         MClkDpmEnabledLevels;
+    uint8_t         LClkDpmEnabledLevels;
+    uint8_t         PCIeDpmEnabledLevels;
+
+    uint8_t         UVDDpmEnabledLevels;
+    uint8_t         SAMUDpmEnabledLevels;
+    uint8_t         ACPDpmEnabledLevels;
+    uint8_t         VCEDpmEnabledLevels;
+
+    uint32_t        DRAM_LOG_ADDR_H;
+    uint32_t        DRAM_LOG_ADDR_L;
+    uint32_t        DRAM_LOG_PHY_ADDR_H;
+    uint32_t        DRAM_LOG_PHY_ADDR_L;
+    uint32_t        DRAM_LOG_BUFF_SIZE;
+    uint32_t        UlvEnterC;
+    uint32_t        UlvTime;
+    uint32_t        Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Discrete_VoltageLevel
+{
+    uint16_t    Voltage;
+    uint16_t    StdVoltageHiSidd;
+    uint16_t    StdVoltageLoSidd;
+    uint8_t     Smio;
+    uint8_t     padding;
+};
+
+typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
+
+struct SMU7_Discrete_GraphicsLevel
+{
+    uint32_t    Flags;
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+
+    uint32_t    SclkFrequency;
+
+    uint8_t     padding1[2];
+    uint16_t    ActivityLevel;
+
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     EnabledForActivity;
+    uint8_t     EnabledForThrottle;
+    uint8_t     UpH;
+    uint8_t     DownH;
+    uint8_t     VoltageDownH;
+    uint8_t     PowerThrottle;
+    uint8_t     DeepSleepDivId;
+    uint8_t     padding[3];
+};
+
+typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
+
+struct SMU7_Discrete_ACPILevel
+{
+    uint32_t    Flags;
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    SclkFrequency;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     DeepSleepDivId;
+    uint8_t     padding;
+    uint32_t    CgSpllFuncCntl;
+    uint32_t    CgSpllFuncCntl2;
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+};
+
+typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
+
+struct SMU7_Discrete_Ulv
+{
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint16_t    VddcOffset;
+    uint8_t     VddcOffsetVid;
+    uint8_t     VddcPhase;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
+
+struct SMU7_Discrete_MemoryLevel
+{
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    MinVddci;
+    uint32_t    MinMvdd;
+
+    uint32_t    MclkFrequency;
+
+    uint8_t     EdcReadEnable;
+    uint8_t     EdcWriteEnable;
+    uint8_t     RttEnable;
+    uint8_t     StutterEnable;
+
+    uint8_t     StrobeEnable;
+    uint8_t     StrobeRatio;
+    uint8_t     EnabledForThrottle;
+    uint8_t     EnabledForActivity;
+
+    uint8_t     UpH;
+    uint8_t     DownH;
+    uint8_t     VoltageDownH;
+    uint8_t     padding;
+
+    uint16_t    ActivityLevel;
+    uint8_t     DisplayWatermark;
+    uint8_t     padding1;
+
+    uint32_t    MpllFuncCntl;
+    uint32_t    MpllFuncCntl_1;
+    uint32_t    MpllFuncCntl_2;
+    uint32_t    MpllAdFuncCntl;
+    uint32_t    MpllDqFuncCntl;
+    uint32_t    MclkPwrmgtCntl;
+    uint32_t    DllCntl;
+    uint32_t    MpllSs1;
+    uint32_t    MpllSs2;
+};
+
+typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
+
+struct SMU7_Discrete_LinkLevel
+{
+    uint8_t     PcieGenSpeed;
+    uint8_t     PcieLaneCount;
+    uint8_t     EnabledForActivity;
+    uint8_t     Padding;
+    uint32_t    DownT;
+    uint32_t    UpT;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
+
+
+struct SMU7_Discrete_MCArbDramTimingTableEntry
+{
+    uint32_t McArbDramTiming;
+    uint32_t McArbDramTiming2;
+    uint8_t  McArbBurstTime;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU7_Discrete_MCArbDramTimingTable
+{
+    SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
+
+struct SMU7_Discrete_UvdLevel
+{
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint16_t MinVddc;
+    uint8_t  MinVddcPhases;
+    uint8_t  VclkDivider;
+    uint8_t  DclkDivider;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
+
+struct SMU7_Discrete_ExtClkLevel
+{
+    uint32_t Frequency;
+    uint16_t MinVoltage;
+    uint8_t  MinPhases;
+    uint8_t  Divider;
+};
+
+typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
+
+struct SMU7_Discrete_StateInfo
+{
+    uint32_t SclkFrequency;
+    uint32_t MclkFrequency;
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint32_t SamclkFrequency;
+    uint32_t AclkFrequency;
+    uint32_t EclkFrequency;
+    uint16_t MvddVoltage;
+    uint16_t padding16;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+    uint8_t  McRegIndex;
+    uint8_t  SeqIndex;
+    uint8_t  SclkDid;
+    int8_t   SclkIndex;
+    int8_t   MclkIndex;
+    uint8_t  PCIeGen;
+
+};
+
+typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
+
+
+struct SMU7_Discrete_DpmTable
+{
+    SMU7_PIDController                  GraphicsPIDController;
+    SMU7_PIDController                  MemoryPIDController;
+    SMU7_PIDController                  LinkPIDController;
+
+    uint32_t                            SystemFlags;
+
+
+    uint32_t                            SmioMaskVddcVid;
+    uint32_t                            SmioMaskVddcPhase;
+    uint32_t                            SmioMaskVddciVid;
+    uint32_t                            SmioMaskMvddVid;
+
+    uint32_t                            VddcLevelCount;
+    uint32_t                            VddciLevelCount;
+    uint32_t                            MvddLevelCount;
+
+    SMU7_Discrete_VoltageLevel          VddcLevel               [SMU7_MAX_LEVELS_VDDC];
+//    SMU7_Discrete_VoltageLevel          VddcStandardReference   [SMU7_MAX_LEVELS_VDDC];
+    SMU7_Discrete_VoltageLevel          VddciLevel              [SMU7_MAX_LEVELS_VDDCI];
+    SMU7_Discrete_VoltageLevel          MvddLevel               [SMU7_MAX_LEVELS_MVDD];
+
+    uint8_t                             GraphicsDpmLevelCount;
+    uint8_t                             MemoryDpmLevelCount;
+    uint8_t                             LinkLevelCount;
+    uint8_t                             UvdLevelCount;
+    uint8_t                             VceLevelCount;
+    uint8_t                             AcpLevelCount;
+    uint8_t                             SamuLevelCount;
+    uint8_t                             MasterDeepSleepControl;
+    uint32_t                            Reserved[5];
+//    uint32_t                            SamuDefaultLevel;
+
+    SMU7_Discrete_GraphicsLevel         GraphicsLevel           [SMU7_MAX_LEVELS_GRAPHICS];
+    SMU7_Discrete_MemoryLevel           MemoryACPILevel;
+    SMU7_Discrete_MemoryLevel           MemoryLevel             [SMU7_MAX_LEVELS_MEMORY];
+    SMU7_Discrete_LinkLevel             LinkLevel               [SMU7_MAX_LEVELS_LINK];
+    SMU7_Discrete_ACPILevel             ACPILevel;
+    SMU7_Discrete_UvdLevel              UvdLevel                [SMU7_MAX_LEVELS_UVD];
+    SMU7_Discrete_ExtClkLevel           VceLevel                [SMU7_MAX_LEVELS_VCE];
+    SMU7_Discrete_ExtClkLevel           AcpLevel                [SMU7_MAX_LEVELS_ACP];
+    SMU7_Discrete_ExtClkLevel           SamuLevel               [SMU7_MAX_LEVELS_SAMU];
+    SMU7_Discrete_Ulv                   Ulv;
+
+    uint32_t                            SclkStepSize;
+    uint32_t                            Smio                    [SMU7_MAX_ENTRIES_SMIO];
+
+    uint8_t                             UvdBootLevel;
+    uint8_t                             VceBootLevel;
+    uint8_t                             AcpBootLevel;
+    uint8_t                             SamuBootLevel;
+
+    uint8_t                             UVDInterval;
+    uint8_t                             VCEInterval;
+    uint8_t                             ACPInterval;
+    uint8_t                             SAMUInterval;
+
+    uint8_t                             GraphicsBootLevel;
+    uint8_t                             GraphicsVoltageChangeEnable;
+    uint8_t                             GraphicsThermThrottleEnable;
+    uint8_t                             GraphicsInterval;
+
+    uint8_t                             VoltageInterval;
+    uint8_t                             ThermalInterval;
+    uint16_t                            TemperatureLimitHigh;
+
+    uint16_t                            TemperatureLimitLow;
+    uint8_t                             MemoryBootLevel;
+    uint8_t                             MemoryVoltageChangeEnable;
+
+    uint8_t                             MemoryInterval;
+    uint8_t                             MemoryThermThrottleEnable;
+    uint16_t                            VddcVddciDelta;
+
+    uint16_t                            VoltageResponseTime;
+    uint16_t                            PhaseResponseTime;
+
+    uint8_t                             PCIeBootLinkLevel;
+    uint8_t                             PCIeGenInterval;
+    uint8_t                             DTEInterval;
+    uint8_t                             DTEMode;
+
+    uint8_t                             SVI2Enable;
+    uint8_t                             VRHotGpio;
+    uint8_t                             AcDcGpio;
+    uint8_t                             ThermGpio;
+
+    uint16_t                            PPM_PkgPwrLimit;
+    uint16_t                            PPM_TemperatureLimit;
+
+    uint16_t                            DefaultTdp;
+    uint16_t                            TargetTdp;
+
+    uint16_t                            FpsHighT;
+    uint16_t                            FpsLowT;
+
+    uint16_t                            BAPMTI_R  [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+    uint16_t                            BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+
+    uint8_t                             DTEAmbientTempBase;
+    uint8_t                             DTETjOffset;
+    uint8_t                             GpuTjMax;
+    uint8_t                             GpuTjHyst;
+
+    uint16_t                            BootVddc;
+    uint16_t                            BootVddci;
+
+    uint16_t                            BootMVdd;
+    uint16_t                            padding;
+
+    uint32_t                            BAPM_TEMP_GRADIENT;
+
+    uint32_t                            LowSclkInterruptT;
+};
+
+typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
+
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
+
+struct SMU7_Discrete_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
+
+struct SMU7_Discrete_MCRegisterSet
+{
+    uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
+
+struct SMU7_Discrete_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMU7_Discrete_MCRegisterAddress     address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+    SMU7_Discrete_MCRegisterSet         data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
+
+struct SMU7_Discrete_FanTable
+{
+	uint16_t FdoMode;
+	int16_t  TempMin;
+	int16_t  TempMed;
+	int16_t  TempMax;
+	int16_t  Slope1;
+	int16_t  Slope2;
+	int16_t  FdoMin;
+	int16_t  HystUp;
+	int16_t  HystDown;
+	int16_t  HystSlope;
+	int16_t  TempRespLim;
+	int16_t  TempCurr;
+	int16_t  SlopeCurr;
+	int16_t  PwmCurr;
+	uint32_t RefreshPeriod;
+	int16_t  FdoMax;
+	uint8_t  TempSrc;
+	int8_t   Padding;
+};
+
+typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable;
+
+
+struct SMU7_Discrete_PmFuses {
+  // dw0-dw1
+  uint8_t BapmVddCVidHiSidd[8];
+
+  // dw2-dw3
+  uint8_t BapmVddCVidLoSidd[8];
+
+  // dw4-dw5
+  uint8_t VddCVid[8];
+
+  // dw6
+  uint8_t SviLoadLineEn;
+  uint8_t SviLoadLineVddC;
+  uint8_t SviLoadLineTrimVddC;
+  uint8_t SviLoadLineOffsetVddC;
+
+  // dw7
+  uint16_t TDC_VDDC_PkgLimit;
+  uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+  uint8_t TDC_MAWt;
+
+  // dw8
+  uint8_t TdcWaterfallCtl;
+  uint8_t LPMLTemperatureMin;
+  uint8_t LPMLTemperatureMax;
+  uint8_t Reserved;
+
+  // dw9-dw10
+  uint8_t BapmVddCVidHiSidd2[8];
+
+  // dw11-dw12
+  int16_t FuzzyFan_ErrorSetDelta;
+  int16_t FuzzyFan_ErrorRateSetDelta;
+  int16_t FuzzyFan_PwmSetDelta;
+  uint16_t CalcMeasPowerBlend;
+
+  // dw13-dw16
+  uint8_t GnbLPML[16];
+
+  // dw17
+  uint8_t GnbLPMLMaxVid;
+  uint8_t GnbLPMLMinVid;
+  uint8_t Reserved1[2];
+
+  // dw18
+  uint16_t BapmVddCBaseLeakageHiSidd;
+  uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
+
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
new file mode 100644
index 0000000..78ada9f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_FUSION_H
+#define SMU7_FUSION_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+// All 'soft registers' should be uint32_t.
+struct SMU7_SoftRegisters
+{
+    uint32_t        RefClockFrequency;
+    uint32_t        PmTimerP;
+    uint32_t        FeatureEnables;
+    uint32_t        HandshakeDisables;
+
+    uint8_t         DisplayPhy1Config;
+    uint8_t         DisplayPhy2Config;
+    uint8_t         DisplayPhy3Config;
+    uint8_t         DisplayPhy4Config;
+
+    uint8_t         DisplayPhy5Config;
+    uint8_t         DisplayPhy6Config;
+    uint8_t         DisplayPhy7Config;
+    uint8_t         DisplayPhy8Config;
+
+    uint32_t        AverageGraphicsA;
+    uint32_t        AverageMemoryA;
+    uint32_t        AverageGioA;
+
+    uint8_t         SClkDpmEnabledLevels;
+    uint8_t         MClkDpmEnabledLevels;
+    uint8_t         LClkDpmEnabledLevels;
+    uint8_t         PCIeDpmEnabledLevels;
+
+    uint8_t         UVDDpmEnabledLevels;
+    uint8_t         SAMUDpmEnabledLevels;
+    uint8_t         ACPDpmEnabledLevels;
+    uint8_t         VCEDpmEnabledLevels;
+
+    uint32_t        DRAM_LOG_ADDR_H;
+    uint32_t        DRAM_LOG_ADDR_L;
+    uint32_t        DRAM_LOG_PHY_ADDR_H;
+    uint32_t        DRAM_LOG_PHY_ADDR_L;
+    uint32_t        DRAM_LOG_BUFF_SIZE;
+    uint32_t        UlvEnterC;
+    uint32_t        UlvTime;
+    uint32_t        Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Fusion_GraphicsLevel
+{
+    uint32_t    MinVddNb;
+
+    uint32_t    SclkFrequency;
+
+    uint8_t     Vid;
+    uint8_t     VidOffset;
+    uint16_t    AT;
+
+    uint8_t     PowerThrottle;
+    uint8_t     GnbSlow;
+    uint8_t     ForceNbPs1;
+    uint8_t     SclkDid;
+
+    uint8_t     DisplayWatermark;
+    uint8_t     EnabledForActivity;
+    uint8_t     EnabledForThrottle;
+    uint8_t     UpH;
+
+    uint8_t     DownH;
+    uint8_t     VoltageDownH;
+    uint8_t     DeepSleepDivId;
+
+    uint8_t     ClkBypassCntl;
+
+    uint32_t    reserved;
+};
+
+typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
+
+struct SMU7_Fusion_GIOLevel
+{
+    uint8_t     EnabledForActivity;
+    uint8_t     LclkDid;
+    uint8_t     Vid;
+    uint8_t     VoltageDownH;
+
+    uint32_t    MinVddNb;
+
+    uint16_t    ResidencyCounter;
+    uint8_t     UpH;
+    uint8_t     DownH;
+
+    uint32_t    LclkFrequency;
+
+    uint8_t     ActivityLevel;
+    uint8_t     EnabledForThrottle;
+
+    uint8_t     ClkBypassCntl;
+
+    uint8_t     padding;
+};
+
+typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU7_Fusion_UvdLevel
+{
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint16_t MinVddNb;
+    uint8_t  VclkDivider;
+    uint8_t  DclkDivider;
+
+    uint8_t     VClkBypassCntl;
+    uint8_t     DClkBypassCntl;
+
+    uint8_t     padding[2];
+
+};
+
+typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU7_Fusion_ExtClkLevel
+{
+    uint32_t Frequency;
+    uint16_t MinVoltage;
+    uint8_t  Divider;
+    uint8_t  ClkBypassCntl;
+
+    uint32_t Reserved;
+};
+typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
+
+struct SMU7_Fusion_ACPILevel
+{
+    uint32_t    Flags;
+    uint32_t    MinVddNb;
+    uint32_t    SclkFrequency;
+    uint8_t     SclkDid;
+    uint8_t     GnbSlow;
+    uint8_t     ForceNbPs1;
+    uint8_t     DisplayWatermark;
+    uint8_t     DeepSleepDivId;
+    uint8_t     padding[3];
+};
+
+typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
+
+struct SMU7_Fusion_NbDpm
+{
+    uint8_t DpmXNbPsHi;
+    uint8_t DpmXNbPsLo;
+    uint8_t Dpm0PgNbPsHi;
+    uint8_t Dpm0PgNbPsLo;
+    uint8_t EnablePsi1;
+    uint8_t SkipDPM0;
+    uint8_t SkipPG;
+    uint8_t Hysteresis;
+    uint8_t EnableDpmPstatePoll;
+    uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
+
+struct SMU7_Fusion_StateInfo
+{
+    uint32_t SclkFrequency;
+    uint32_t LclkFrequency;
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint32_t SamclkFrequency;
+    uint32_t AclkFrequency;
+    uint32_t EclkFrequency;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+    int8_t   SclkIndex;
+    int8_t   MclkIndex;
+};
+
+typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
+
+struct SMU7_Fusion_DpmTable
+{
+    uint32_t                            SystemFlags;
+
+    SMU7_PIDController                  GraphicsPIDController;
+    SMU7_PIDController                  GioPIDController;
+
+    uint8_t                            GraphicsDpmLevelCount;
+    uint8_t                            GIOLevelCount;
+    uint8_t                            UvdLevelCount;
+    uint8_t                            VceLevelCount;
+
+    uint8_t                            AcpLevelCount;
+    uint8_t                            SamuLevelCount;
+    uint16_t                           FpsHighT;
+
+    SMU7_Fusion_GraphicsLevel         GraphicsLevel           [SMU__NUM_SCLK_DPM_STATE];
+    SMU7_Fusion_ACPILevel             ACPILevel;
+    SMU7_Fusion_UvdLevel              UvdLevel                [SMU7_MAX_LEVELS_UVD];
+    SMU7_Fusion_ExtClkLevel           VceLevel                [SMU7_MAX_LEVELS_VCE];
+    SMU7_Fusion_ExtClkLevel           AcpLevel                [SMU7_MAX_LEVELS_ACP];
+    SMU7_Fusion_ExtClkLevel           SamuLevel               [SMU7_MAX_LEVELS_SAMU];
+
+    uint8_t                           UvdBootLevel;
+    uint8_t                           VceBootLevel;
+    uint8_t                           AcpBootLevel;
+    uint8_t                           SamuBootLevel;
+    uint8_t                           UVDInterval;
+    uint8_t                           VCEInterval;
+    uint8_t                           ACPInterval;
+    uint8_t                           SAMUInterval;
+
+    uint8_t                           GraphicsBootLevel;
+    uint8_t                           GraphicsInterval;
+    uint8_t                           GraphicsThermThrottleEnable;
+    uint8_t                           GraphicsVoltageChangeEnable;
+
+    uint8_t                           GraphicsClkSlowEnable;
+    uint8_t                           GraphicsClkSlowDivider;
+    uint16_t                          FpsLowT;
+
+    uint32_t                          DisplayCac;
+    uint32_t                          LowSclkInterruptT;
+
+    uint32_t                          DRAM_LOG_ADDR_H;
+    uint32_t                          DRAM_LOG_ADDR_L;
+    uint32_t                          DRAM_LOG_PHY_ADDR_H;
+    uint32_t                          DRAM_LOG_PHY_ADDR_L;
+    uint32_t                          DRAM_LOG_BUFF_SIZE;
+
+};
+
+struct SMU7_Fusion_GIODpmTable
+{
+
+    SMU7_Fusion_GIOLevel              GIOLevel                [SMU7_MAX_LEVELS_GIO];
+
+    SMU7_PIDController                GioPIDController;
+
+    uint32_t                          GIOLevelCount;
+
+    uint8_t                           Enable;
+    uint8_t                           GIOVoltageChangeEnable;
+    uint8_t                           GIOBootLevel;
+    uint8_t                           padding;
+    uint8_t                           padding1[2];
+    uint8_t                           TargetState;
+    uint8_t                           CurrenttState;
+    uint8_t                           ThrottleOnHtc;
+    uint8_t                           ThermThrottleStatus;
+    uint8_t                           ThermThrottleTempSelect;
+    uint8_t                           ThermThrottleEnable;
+    uint16_t                          TemperatureLimitHigh;
+    uint16_t                          TemperatureLimitLow;
+
+};
+
+typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable;
+typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable;
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
new file mode 100644
index 0000000..1e4975f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -0,0 +1,1946 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sumod.h"
+#include "r600_dpm.h"
+#include "cypress_dpm.h"
+#include "sumo_dpm.h"
+#include <linux/seq_file.h>
+
+#define SUMO_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define SUMO_MINIMUM_ENGINE_CLOCK 800
+#define BOOST_DPM_LEVEL 7
+
+static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
+{
+	SUMO_UTC_DFLT_00,
+	SUMO_UTC_DFLT_01,
+	SUMO_UTC_DFLT_02,
+	SUMO_UTC_DFLT_03,
+	SUMO_UTC_DFLT_04,
+	SUMO_UTC_DFLT_05,
+	SUMO_UTC_DFLT_06,
+	SUMO_UTC_DFLT_07,
+	SUMO_UTC_DFLT_08,
+	SUMO_UTC_DFLT_09,
+	SUMO_UTC_DFLT_10,
+	SUMO_UTC_DFLT_11,
+	SUMO_UTC_DFLT_12,
+	SUMO_UTC_DFLT_13,
+	SUMO_UTC_DFLT_14,
+};
+
+static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
+{
+	SUMO_DTC_DFLT_00,
+	SUMO_DTC_DFLT_01,
+	SUMO_DTC_DFLT_02,
+	SUMO_DTC_DFLT_03,
+	SUMO_DTC_DFLT_04,
+	SUMO_DTC_DFLT_05,
+	SUMO_DTC_DFLT_06,
+	SUMO_DTC_DFLT_07,
+	SUMO_DTC_DFLT_08,
+	SUMO_DTC_DFLT_09,
+	SUMO_DTC_DFLT_10,
+	SUMO_DTC_DFLT_11,
+	SUMO_DTC_DFLT_12,
+	SUMO_DTC_DFLT_13,
+	SUMO_DTC_DFLT_14,
+};
+
+static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
+{
+	struct sumo_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+static void sumo_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
+	else {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+		RREG32(GB_ADDR_CONFIG);
+	}
+}
+
+#define CGCG_CGTT_LOCAL0_MASK 0xE5BFFFFF
+#define CGCG_CGTT_LOCAL1_MASK 0xEFFF07FF
+
+static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable)
+{
+	u32 local0;
+	u32 local1;
+
+	local0 = RREG32(CG_CGTT_LOCAL_0);
+	local1 = RREG32(CG_CGTT_LOCAL_1);
+
+	if (enable) {
+		WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+		WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+	} else {
+		WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+		WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+	}
+}
+
+static void sumo_program_git(struct radeon_device *rdev)
+{
+	u32 p, u;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	r600_calculate_u_and_p(SUMO_GICST_DFLT,
+			       xclk, 16, &p, &u);
+
+	WREG32_P(CG_GIT, CG_GICST(p), ~CG_GICST_MASK);
+}
+
+static void sumo_program_grsd(struct radeon_device *rdev)
+{
+	u32 p, u;
+	u32 xclk = radeon_get_xclk(rdev);
+	u32 grs = 256 * 25 / 100;
+
+	r600_calculate_u_and_p(1, xclk, 14, &p, &u);
+
+	WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u));
+}
+
+void sumo_gfx_clockgating_initialize(struct radeon_device *rdev)
+{
+	sumo_program_git(rdev);
+	sumo_program_grsd(rdev);
+}
+
+static void sumo_gfx_powergating_initialize(struct radeon_device *rdev)
+{
+	u32 rcu_pwr_gating_cntl;
+	u32 p, u;
+	u32 p_c, p_p, d_p;
+	u32 r_t, i_t;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	if (rdev->family == CHIP_PALM) {
+		p_c = 4;
+		d_p = 10;
+		r_t = 10;
+		i_t = 4;
+		p_p = 50 + 1000/200 + 6 * 32;
+	} else {
+		p_c = 16;
+		d_p = 50;
+		r_t = 50;
+		i_t  = 50;
+		p_p = 113;
+	}
+
+	WREG32(CG_SCRATCH2, 0x01B60A17);
+
+	r600_calculate_u_and_p(SUMO_GFXPOWERGATINGT_DFLT,
+			       xclk, 16, &p, &u);
+
+	WREG32_P(CG_PWR_GATING_CNTL, PGP(p) | PGU(u),
+		 ~(PGP_MASK | PGU_MASK));
+
+	r600_calculate_u_and_p(SUMO_VOLTAGEDROPT_DFLT,
+			       xclk, 16, &p, &u);
+
+	WREG32_P(CG_CG_VOLTAGE_CNTL, PGP(p) | PGU(u),
+		 ~(PGP_MASK | PGU_MASK));
+
+	if (rdev->family == CHIP_PALM) {
+		WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x10103210);
+		WREG32_RCU(RCU_PWR_GATING_SEQ1, 0x10101010);
+	} else {
+		WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x76543210);
+		WREG32_RCU(RCU_PWR_GATING_SEQ1, 0xFEDCBA98);
+	}
+
+	rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
+	rcu_pwr_gating_cntl &=
+		~(RSVD_MASK | PCV_MASK | PGS_MASK);
+	rcu_pwr_gating_cntl |= PCV(p_c) | PGS(1) | PWR_GATING_EN;
+	if (rdev->family == CHIP_PALM) {
+		rcu_pwr_gating_cntl &= ~PCP_MASK;
+		rcu_pwr_gating_cntl |= PCP(0x77);
+	}
+	WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
+
+	rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
+	rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
+	rcu_pwr_gating_cntl |= MPPU(p_p) | MPPD(50);
+	WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
+
+	rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
+	rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
+	rcu_pwr_gating_cntl |= DPPU(d_p) | DPPD(50);
+	WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
+
+	rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_4);
+	rcu_pwr_gating_cntl &= ~(RT_MASK | IT_MASK);
+	rcu_pwr_gating_cntl |= RT(r_t) | IT(i_t);
+	WREG32_RCU(RCU_PWR_GATING_CNTL_4, rcu_pwr_gating_cntl);
+
+	if (rdev->family == CHIP_PALM)
+		WREG32_RCU(RCU_PWR_GATING_CNTL_5, 0xA02);
+
+	sumo_smu_pg_init(rdev);
+
+	rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
+	rcu_pwr_gating_cntl &=
+		~(RSVD_MASK | PCV_MASK | PGS_MASK);
+	rcu_pwr_gating_cntl |= PCV(p_c) | PGS(4) | PWR_GATING_EN;
+	if (rdev->family == CHIP_PALM) {
+		rcu_pwr_gating_cntl &= ~PCP_MASK;
+		rcu_pwr_gating_cntl |= PCP(0x77);
+	}
+	WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
+
+	if (rdev->family == CHIP_PALM) {
+		rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
+		rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
+		rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
+		WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
+
+		rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
+		rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
+		rcu_pwr_gating_cntl |= DPPU(16) | DPPD(50);
+		WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
+	}
+
+	sumo_smu_pg_init(rdev);
+
+	rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
+	rcu_pwr_gating_cntl &=
+		~(RSVD_MASK | PCV_MASK | PGS_MASK);
+	rcu_pwr_gating_cntl |= PGS(5) | PWR_GATING_EN;
+
+	if (rdev->family == CHIP_PALM) {
+		rcu_pwr_gating_cntl |= PCV(4);
+		rcu_pwr_gating_cntl &= ~PCP_MASK;
+		rcu_pwr_gating_cntl |= PCP(0x77);
+	} else
+		rcu_pwr_gating_cntl |= PCV(11);
+	WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
+
+	if (rdev->family == CHIP_PALM) {
+		rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
+		rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
+		rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
+		WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
+
+		rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
+		rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
+		rcu_pwr_gating_cntl |= DPPU(22) | DPPD(50);
+		WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
+	}
+
+	sumo_smu_pg_init(rdev);
+}
+
+static void sumo_gfx_powergating_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(CG_PWR_GATING_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN);
+	else {
+		WREG32_P(CG_PWR_GATING_CNTL, 0, ~DYN_PWR_DOWN_EN);
+		RREG32(GB_ADDR_CONFIG);
+	}
+}
+
+static int sumo_enable_clock_power_gating(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	if (pi->enable_gfx_clock_gating)
+		sumo_gfx_clockgating_initialize(rdev);
+	if (pi->enable_gfx_power_gating)
+		sumo_gfx_powergating_initialize(rdev);
+	if (pi->enable_mg_clock_gating)
+		sumo_mg_clockgating_enable(rdev, true);
+	if (pi->enable_gfx_clock_gating)
+		sumo_gfx_clockgating_enable(rdev, true);
+	if (pi->enable_gfx_power_gating)
+		sumo_gfx_powergating_enable(rdev, true);
+
+	return 0;
+}
+
+static void sumo_disable_clock_power_gating(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	if (pi->enable_gfx_clock_gating)
+		sumo_gfx_clockgating_enable(rdev, false);
+	if (pi->enable_gfx_power_gating)
+		sumo_gfx_powergating_enable(rdev, false);
+	if (pi->enable_mg_clock_gating)
+		sumo_mg_clockgating_enable(rdev, false);
+}
+
+static void sumo_calculate_bsp(struct radeon_device *rdev,
+			       u32 high_clk)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 xclk = radeon_get_xclk(rdev);
+
+	pi->pasi = 65535 * 100 / high_clk;
+	pi->asi = 65535 * 100 / high_clk;
+
+	r600_calculate_u_and_p(pi->asi,
+			       xclk, 16, &pi->bsp, &pi->bsu);
+
+	r600_calculate_u_and_p(pi->pasi,
+			       xclk, 16, &pi->pbsp, &pi->pbsu);
+
+	pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+}
+
+static void sumo_init_bsp(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	WREG32(CG_BSP_0, pi->psp);
+}
+
+
+static void sumo_program_bsp(struct radeon_device *rdev,
+			     struct radeon_ps *rps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct sumo_ps *ps = sumo_get_ps(rps);
+	u32 i;
+	u32 highest_engine_clock = ps->levels[ps->num_levels - 1].sclk;
+
+	if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
+		highest_engine_clock = pi->boost_pl.sclk;
+
+	sumo_calculate_bsp(rdev, highest_engine_clock);
+
+	for (i = 0; i < ps->num_levels - 1; i++)
+		WREG32(CG_BSP_0 + (i * 4), pi->dsp);
+
+	WREG32(CG_BSP_0 + (i * 4), pi->psp);
+
+	if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
+		WREG32(CG_BSP_0 + (BOOST_DPM_LEVEL * 4), pi->psp);
+}
+
+static void sumo_write_at(struct radeon_device *rdev,
+			  u32 index, u32 value)
+{
+	if (index == 0)
+		WREG32(CG_AT_0, value);
+	else if (index == 1)
+		WREG32(CG_AT_1, value);
+	else if (index == 2)
+		WREG32(CG_AT_2, value);
+	else if (index == 3)
+		WREG32(CG_AT_3, value);
+	else if (index == 4)
+		WREG32(CG_AT_4, value);
+	else if (index == 5)
+		WREG32(CG_AT_5, value);
+	else if (index == 6)
+		WREG32(CG_AT_6, value);
+	else if (index == 7)
+		WREG32(CG_AT_7, value);
+}
+
+static void sumo_program_at(struct radeon_device *rdev,
+			    struct radeon_ps *rps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct sumo_ps *ps = sumo_get_ps(rps);
+	u32 asi;
+	u32 i;
+	u32 m_a;
+	u32 a_t;
+	u32 r[SUMO_MAX_HARDWARE_POWERLEVELS];
+	u32 l[SUMO_MAX_HARDWARE_POWERLEVELS];
+
+	r[0] = SUMO_R_DFLT0;
+	r[1] = SUMO_R_DFLT1;
+	r[2] = SUMO_R_DFLT2;
+	r[3] = SUMO_R_DFLT3;
+	r[4] = SUMO_R_DFLT4;
+
+	l[0] = SUMO_L_DFLT0;
+	l[1] = SUMO_L_DFLT1;
+	l[2] = SUMO_L_DFLT2;
+	l[3] = SUMO_L_DFLT3;
+	l[4] = SUMO_L_DFLT4;
+
+	for (i = 0; i < ps->num_levels; i++) {
+		asi = (i == ps->num_levels - 1) ? pi->pasi : pi->asi;
+
+		m_a = asi * ps->levels[i].sclk / 100;
+
+		a_t = CG_R(m_a * r[i] / 100) | CG_L(m_a * l[i] / 100);
+
+		sumo_write_at(rdev, i, a_t);
+	}
+
+	if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
+		asi = pi->pasi;
+
+		m_a = asi * pi->boost_pl.sclk / 100;
+
+		a_t = CG_R(m_a * r[ps->num_levels - 1] / 100) |
+			CG_L(m_a * l[ps->num_levels - 1] / 100);
+
+		sumo_write_at(rdev, BOOST_DPM_LEVEL, a_t);
+	}
+}
+
+static void sumo_program_tp(struct radeon_device *rdev)
+{
+	int i;
+	enum r600_td td = R600_TD_DFLT;
+
+	for (i = 0; i < SUMO_PM_NUMBER_OF_TC; i++) {
+		WREG32_P(CG_FFCT_0 + (i * 4), UTC_0(sumo_utc[i]), ~UTC_0_MASK);
+		WREG32_P(CG_FFCT_0 + (i * 4), DTC_0(sumo_dtc[i]), ~DTC_0_MASK);
+	}
+
+	if (td == R600_TD_AUTO)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+	if (td == R600_TD_UP)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+	if (td == R600_TD_DOWN)
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+void sumo_program_vc(struct radeon_device *rdev, u32 vrc)
+{
+	WREG32(CG_FTV, vrc);
+}
+
+void sumo_clear_vc(struct radeon_device *rdev)
+{
+	WREG32(CG_FTV, 0);
+}
+
+void sumo_program_sstp(struct radeon_device *rdev)
+{
+	u32 p, u;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	r600_calculate_u_and_p(SUMO_SST_DFLT,
+			       xclk, 16, &p, &u);
+
+	WREG32(CG_SSP, SSTU(u) | SST(p));
+}
+
+static void sumo_set_divider_value(struct radeon_device *rdev,
+				   u32 index, u32 divider)
+{
+	u32 reg_index = index / 4;
+	u32 field_index = index % 4;
+
+	if (field_index == 0)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 SCLK_FSTATE_0_DIV(divider), ~SCLK_FSTATE_0_DIV_MASK);
+	else if (field_index == 1)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 SCLK_FSTATE_1_DIV(divider), ~SCLK_FSTATE_1_DIV_MASK);
+	else if (field_index == 2)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 SCLK_FSTATE_2_DIV(divider), ~SCLK_FSTATE_2_DIV_MASK);
+	else if (field_index == 3)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 SCLK_FSTATE_3_DIV(divider), ~SCLK_FSTATE_3_DIV_MASK);
+}
+
+static void sumo_set_ds_dividers(struct radeon_device *rdev,
+				 u32 index, u32 divider)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	if (pi->enable_sclk_ds) {
+		u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_6);
+
+		dpm_ctrl &= ~(0x7 << (index * 3));
+		dpm_ctrl |= (divider << (index * 3));
+		WREG32(CG_SCLK_DPM_CTRL_6, dpm_ctrl);
+	}
+}
+
+static void sumo_set_ss_dividers(struct radeon_device *rdev,
+				 u32 index, u32 divider)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	if (pi->enable_sclk_ds) {
+		u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_11);
+
+		dpm_ctrl &= ~(0x7 << (index * 3));
+		dpm_ctrl |= (divider << (index * 3));
+		WREG32(CG_SCLK_DPM_CTRL_11, dpm_ctrl);
+	}
+}
+
+static void sumo_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
+{
+	u32 voltage_cntl = RREG32(CG_DPM_VOLTAGE_CNTL);
+
+	voltage_cntl &= ~(DPM_STATE0_LEVEL_MASK << (index * 2));
+	voltage_cntl |= (vid << (DPM_STATE0_LEVEL_SHIFT + index * 2));
+	WREG32(CG_DPM_VOLTAGE_CNTL, voltage_cntl);
+}
+
+static void sumo_set_allos_gnb_slow(struct radeon_device *rdev, u32 index, u32 gnb_slow)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 temp = gnb_slow;
+	u32 cg_sclk_dpm_ctrl_3;
+
+	if (pi->driver_nbps_policy_disable)
+		temp = 1;
+
+	cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3);
+	cg_sclk_dpm_ctrl_3 &= ~(GNB_SLOW_FSTATE_0_MASK << index);
+	cg_sclk_dpm_ctrl_3 |= (temp << (GNB_SLOW_FSTATE_0_SHIFT + index));
+
+	WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3);
+}
+
+static void sumo_program_power_level(struct radeon_device *rdev,
+				     struct sumo_pl *pl, u32 index)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	int ret;
+	struct atom_clock_dividers dividers;
+	u32 ds_en = RREG32(DEEP_SLEEP_CNTL) & ENABLE_DS;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     pl->sclk, false, &dividers);
+	if (ret)
+		return;
+
+	sumo_set_divider_value(rdev, index, dividers.post_div);
+
+	sumo_set_vid(rdev, index, pl->vddc_index);
+
+	if (pl->ss_divider_index == 0 || pl->ds_divider_index == 0) {
+		if (ds_en)
+			WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS);
+	} else {
+		sumo_set_ss_dividers(rdev, index, pl->ss_divider_index);
+		sumo_set_ds_dividers(rdev, index, pl->ds_divider_index);
+
+		if (!ds_en)
+			WREG32_P(DEEP_SLEEP_CNTL, ENABLE_DS, ~ENABLE_DS);
+	}
+
+	sumo_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow);
+
+	if (pi->enable_boost)
+		sumo_set_tdp_limit(rdev, index, pl->sclk_dpm_tdp_limit);
+}
+
+static void sumo_power_level_enable(struct radeon_device *rdev, u32 index, bool enable)
+{
+	u32 reg_index = index / 4;
+	u32 field_index = index % 4;
+
+	if (field_index == 0)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 enable ? SCLK_FSTATE_0_VLD : 0, ~SCLK_FSTATE_0_VLD);
+	else if (field_index == 1)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 enable ? SCLK_FSTATE_1_VLD : 0, ~SCLK_FSTATE_1_VLD);
+	else if (field_index == 2)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 enable ? SCLK_FSTATE_2_VLD : 0, ~SCLK_FSTATE_2_VLD);
+	else if (field_index == 3)
+		WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
+			 enable ? SCLK_FSTATE_3_VLD : 0, ~SCLK_FSTATE_3_VLD);
+}
+
+static bool sumo_dpm_enabled(struct radeon_device *rdev)
+{
+	if (RREG32(CG_SCLK_DPM_CTRL_3) & DPM_SCLK_ENABLE)
+		return true;
+	else
+		return false;
+}
+
+static void sumo_start_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(CG_SCLK_DPM_CTRL_3, DPM_SCLK_ENABLE, ~DPM_SCLK_ENABLE);
+}
+
+static void sumo_stop_dpm(struct radeon_device *rdev)
+{
+	WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~DPM_SCLK_ENABLE);
+}
+
+static void sumo_set_forced_mode(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE_EN, ~FORCE_SCLK_STATE_EN);
+	else
+		WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_SCLK_STATE_EN);
+}
+
+static void sumo_set_forced_mode_enabled(struct radeon_device *rdev)
+{
+	int i;
+
+	sumo_set_forced_mode(rdev, true);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CG_SCLK_STATUS) & SCLK_OVERCLK_DETECT)
+			break;
+		udelay(1);
+	}
+}
+
+static void sumo_wait_for_level_0(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) == 0)
+			break;
+		udelay(1);
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void sumo_set_forced_mode_disabled(struct radeon_device *rdev)
+{
+	sumo_set_forced_mode(rdev, false);
+}
+
+static void sumo_enable_power_level_0(struct radeon_device *rdev)
+{
+	sumo_power_level_enable(rdev, 0, true);
+}
+
+static void sumo_patch_boost_state(struct radeon_device *rdev,
+				   struct radeon_ps *rps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct sumo_ps *new_ps = sumo_get_ps(rps);
+
+	if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
+		pi->boost_pl = new_ps->levels[new_ps->num_levels - 1];
+		pi->boost_pl.sclk = pi->sys_info.boost_sclk;
+		pi->boost_pl.vddc_index = pi->sys_info.boost_vid_2bit;
+		pi->boost_pl.sclk_dpm_tdp_limit = pi->sys_info.sclk_dpm_tdp_limit_boost;
+	}
+}
+
+static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev,
+					     struct radeon_ps *new_rps,
+					     struct radeon_ps *old_rps)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+	struct sumo_ps *old_ps = sumo_get_ps(old_rps);
+	u32 nbps1_old = 0;
+	u32 nbps1_new = 0;
+
+	if (old_ps != NULL)
+		nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
+
+	nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
+
+	if (nbps1_old == 1 && nbps1_new == 0)
+		sumo_smu_notify_alt_vddnb_change(rdev, 0, 0);
+}
+
+static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
+					      struct radeon_ps *new_rps,
+					      struct radeon_ps *old_rps)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+	struct sumo_ps *old_ps = sumo_get_ps(old_rps);
+	u32 nbps1_old = 0;
+	u32 nbps1_new = 0;
+
+	if (old_ps != NULL)
+		nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+
+	nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+
+	if (nbps1_old == 0 && nbps1_new == 1)
+		sumo_smu_notify_alt_vddnb_change(rdev, 1, 1);
+}
+
+static void sumo_enable_boost(struct radeon_device *rdev,
+			      struct radeon_ps *rps,
+			      bool enable)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(rps);
+
+	if (enable) {
+		if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
+			sumo_boost_state_enable(rdev, true);
+	} else
+		sumo_boost_state_enable(rdev, false);
+}
+
+static void sumo_set_forced_level(struct radeon_device *rdev, u32 index)
+{
+	WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE(index), ~FORCE_SCLK_STATE_MASK);
+}
+
+static void sumo_set_forced_level_0(struct radeon_device *rdev)
+{
+	sumo_set_forced_level(rdev, 0);
+}
+
+static void sumo_program_wl(struct radeon_device *rdev,
+			    struct radeon_ps *rps)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(rps);
+	u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4);
+
+	dpm_ctrl4 &= 0xFFFFFF00;
+	dpm_ctrl4 |= (1 << (new_ps->num_levels - 1));
+
+	if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
+		dpm_ctrl4 |= (1 << BOOST_DPM_LEVEL);
+
+	WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4);
+}
+
+static void sumo_program_power_levels_0_to_n(struct radeon_device *rdev,
+					     struct radeon_ps *new_rps,
+					     struct radeon_ps *old_rps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+	struct sumo_ps *old_ps = sumo_get_ps(old_rps);
+	u32 i;
+	u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels;
+
+	for (i = 0; i < new_ps->num_levels; i++) {
+		sumo_program_power_level(rdev, &new_ps->levels[i], i);
+		sumo_power_level_enable(rdev, i, true);
+	}
+
+	for (i = new_ps->num_levels; i < n_current_state_levels; i++)
+		sumo_power_level_enable(rdev, i, false);
+
+	if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
+		sumo_program_power_level(rdev, &pi->boost_pl, BOOST_DPM_LEVEL);
+}
+
+static void sumo_enable_acpi_pm(struct radeon_device *rdev)
+{
+	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+static void sumo_program_power_level_enter_state(struct radeon_device *rdev)
+{
+	WREG32_P(CG_SCLK_DPM_CTRL_5, SCLK_FSTATE_BOOTUP(0), ~SCLK_FSTATE_BOOTUP_MASK);
+}
+
+static void sumo_program_acpi_power_level(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     pi->acpi_pl.sclk,
+					     false, &dividers);
+	if (ret)
+		return;
+
+	WREG32_P(CG_ACPI_CNTL, SCLK_ACPI_DIV(dividers.post_div), ~SCLK_ACPI_DIV_MASK);
+	WREG32_P(CG_ACPI_VOLTAGE_CNTL, 0, ~ACPI_VOLTAGE_EN);
+}
+
+static void sumo_program_bootup_state(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4);
+	u32 i;
+
+	sumo_program_power_level(rdev, &pi->boot_pl, 0);
+
+	dpm_ctrl4 &= 0xFFFFFF00;
+	WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4);
+
+	for (i = 1; i < 8; i++)
+		sumo_power_level_enable(rdev, i, false);
+}
+
+static void sumo_setup_uvd_clocks(struct radeon_device *rdev,
+				  struct radeon_ps *new_rps,
+				  struct radeon_ps *old_rps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	if (pi->enable_gfx_power_gating) {
+		sumo_gfx_powergating_enable(rdev, false);
+	}
+
+	radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
+
+	if (pi->enable_gfx_power_gating) {
+		if (!pi->disable_gfx_power_gating_in_uvd ||
+		    !r600_is_uvd_state(new_rps->class, new_rps->class2))
+			sumo_gfx_powergating_enable(rdev, true);
+	}
+}
+
+static void sumo_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+						    struct radeon_ps *new_rps,
+						    struct radeon_ps *old_rps)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+	struct sumo_ps *current_ps = sumo_get_ps(old_rps);
+
+	if ((new_rps->vclk == old_rps->vclk) &&
+	    (new_rps->dclk == old_rps->dclk))
+		return;
+
+	if (new_ps->levels[new_ps->num_levels - 1].sclk >=
+	    current_ps->levels[current_ps->num_levels - 1].sclk)
+		return;
+
+	sumo_setup_uvd_clocks(rdev, new_rps, old_rps);
+}
+
+static void sumo_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+						   struct radeon_ps *new_rps,
+						   struct radeon_ps *old_rps)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+	struct sumo_ps *current_ps = sumo_get_ps(old_rps);
+
+	if ((new_rps->vclk == old_rps->vclk) &&
+	    (new_rps->dclk == old_rps->dclk))
+		return;
+
+	if (new_ps->levels[new_ps->num_levels - 1].sclk <
+	    current_ps->levels[current_ps->num_levels - 1].sclk)
+		return;
+
+	sumo_setup_uvd_clocks(rdev, new_rps, old_rps);
+}
+
+void sumo_take_smu_control(struct radeon_device *rdev, bool enable)
+{
+/* This bit selects who handles display phy powergating.
+ * Clear the bit to let atom handle it.
+ * Set it to let the driver handle it.
+ * For now we just let atom handle it.
+ */
+#if 0
+	u32 v = RREG32(DOUT_SCRATCH3);
+
+	if (enable)
+		v |= 0x4;
+	else
+		v &= 0xFFFFFFFB;
+
+	WREG32(DOUT_SCRATCH3, v);
+#endif
+}
+
+static void sumo_enable_sclk_ds(struct radeon_device *rdev, bool enable)
+{
+	if (enable) {
+		u32 deep_sleep_cntl = RREG32(DEEP_SLEEP_CNTL);
+		u32 deep_sleep_cntl2 = RREG32(DEEP_SLEEP_CNTL2);
+		u32 t = 1;
+
+		deep_sleep_cntl &= ~R_DIS;
+		deep_sleep_cntl &= ~HS_MASK;
+		deep_sleep_cntl |= HS(t > 4095 ? 4095 : t);
+
+		deep_sleep_cntl2 |= LB_UFP_EN;
+		deep_sleep_cntl2 &= INOUT_C_MASK;
+		deep_sleep_cntl2 |= INOUT_C(0xf);
+
+		WREG32(DEEP_SLEEP_CNTL2, deep_sleep_cntl2);
+		WREG32(DEEP_SLEEP_CNTL, deep_sleep_cntl);
+	} else
+		WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS);
+}
+
+static void sumo_program_bootup_at(struct radeon_device *rdev)
+{
+	WREG32_P(CG_AT_0, CG_R(0xffff), ~CG_R_MASK);
+	WREG32_P(CG_AT_0, CG_L(0), ~CG_L_MASK);
+}
+
+static void sumo_reset_am(struct radeon_device *rdev)
+{
+	WREG32_P(SCLK_PWRMGT_CNTL, FIR_RESET, ~FIR_RESET);
+}
+
+static void sumo_start_am(struct radeon_device *rdev)
+{
+	WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_RESET);
+}
+
+static void sumo_program_ttp(struct radeon_device *rdev)
+{
+	u32 xclk = radeon_get_xclk(rdev);
+	u32 p, u;
+	u32 cg_sclk_dpm_ctrl_5 = RREG32(CG_SCLK_DPM_CTRL_5);
+
+	r600_calculate_u_and_p(1000,
+			       xclk, 16, &p, &u);
+
+	cg_sclk_dpm_ctrl_5 &= ~(TT_TP_MASK | TT_TU_MASK);
+	cg_sclk_dpm_ctrl_5 |= TT_TP(p) | TT_TU(u);
+
+	WREG32(CG_SCLK_DPM_CTRL_5, cg_sclk_dpm_ctrl_5);
+}
+
+static void sumo_program_ttt(struct radeon_device *rdev)
+{
+	u32 cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3);
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	cg_sclk_dpm_ctrl_3 &= ~(GNB_TT_MASK | GNB_THERMTHRO_MASK);
+	cg_sclk_dpm_ctrl_3 |= GNB_TT(pi->thermal_auto_throttling + 49);
+
+	WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3);
+}
+
+
+static void sumo_enable_voltage_scaling(struct radeon_device *rdev, bool enable)
+{
+	if (enable) {
+		WREG32_P(CG_DPM_VOLTAGE_CNTL, DPM_VOLTAGE_EN, ~DPM_VOLTAGE_EN);
+		WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~CG_VOLTAGE_EN);
+	} else {
+		WREG32_P(CG_CG_VOLTAGE_CNTL, CG_VOLTAGE_EN, ~CG_VOLTAGE_EN);
+		WREG32_P(CG_DPM_VOLTAGE_CNTL, 0, ~DPM_VOLTAGE_EN);
+	}
+}
+
+static void sumo_override_cnb_thermal_events(struct radeon_device *rdev)
+{
+	WREG32_P(CG_SCLK_DPM_CTRL_3, CNB_THERMTHRO_MASK_SCLK,
+		 ~CNB_THERMTHRO_MASK_SCLK);
+}
+
+static void sumo_program_dc_hto(struct radeon_device *rdev)
+{
+	u32 cg_sclk_dpm_ctrl_4 = RREG32(CG_SCLK_DPM_CTRL_4);
+	u32 p, u;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	r600_calculate_u_and_p(100000,
+			       xclk, 14, &p, &u);
+
+	cg_sclk_dpm_ctrl_4 &= ~(DC_HDC_MASK | DC_HU_MASK);
+	cg_sclk_dpm_ctrl_4 |= DC_HDC(p) | DC_HU(u);
+
+	WREG32(CG_SCLK_DPM_CTRL_4, cg_sclk_dpm_ctrl_4);
+}
+
+static void sumo_force_nbp_state(struct radeon_device *rdev,
+				 struct radeon_ps *rps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct sumo_ps *new_ps = sumo_get_ps(rps);
+
+	if (!pi->driver_nbps_policy_disable) {
+		if (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
+			WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_NB_PSTATE_1, ~FORCE_NB_PSTATE_1);
+		else
+			WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_NB_PSTATE_1);
+	}
+}
+
+u32 sumo_get_sleep_divider_from_id(u32 id)
+{
+	return 1 << id;
+}
+
+u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+					 u32 sclk,
+					 u32 min_sclk_in_sr)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 i;
+	u32 temp;
+	u32 min = (min_sclk_in_sr > SUMO_MINIMUM_ENGINE_CLOCK) ?
+		min_sclk_in_sr : SUMO_MINIMUM_ENGINE_CLOCK;
+
+	if (sclk < min)
+		return 0;
+
+	if (!pi->enable_sclk_ds)
+		return 0;
+
+	for (i = SUMO_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+		temp = sclk / sumo_get_sleep_divider_from_id(i);
+
+		if (temp >= min || i == 0)
+			break;
+	}
+	return i;
+}
+
+static u32 sumo_get_valid_engine_clock(struct radeon_device *rdev,
+				       u32 lower_limit)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 i;
+
+	for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) {
+		if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit)
+			return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency;
+	}
+
+	return pi->sys_info.sclk_voltage_mapping_table.entries[pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1].sclk_frequency;
+}
+
+static void sumo_patch_thermal_state(struct radeon_device *rdev,
+				     struct sumo_ps *ps,
+				     struct sumo_ps *current_ps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
+	u32 current_vddc;
+	u32 current_sclk;
+	u32 current_index = 0;
+
+	if (current_ps) {
+		current_vddc = current_ps->levels[current_index].vddc_index;
+		current_sclk = current_ps->levels[current_index].sclk;
+	} else {
+		current_vddc = pi->boot_pl.vddc_index;
+		current_sclk = pi->boot_pl.sclk;
+	}
+
+	ps->levels[0].vddc_index = current_vddc;
+
+	if (ps->levels[0].sclk > current_sclk)
+		ps->levels[0].sclk = current_sclk;
+
+	ps->levels[0].ss_divider_index =
+		sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, sclk_in_sr);
+
+	ps->levels[0].ds_divider_index =
+		sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, SUMO_MINIMUM_ENGINE_CLOCK);
+
+	if (ps->levels[0].ds_divider_index > ps->levels[0].ss_divider_index + 1)
+		ps->levels[0].ds_divider_index = ps->levels[0].ss_divider_index + 1;
+
+	if (ps->levels[0].ss_divider_index == ps->levels[0].ds_divider_index) {
+		if (ps->levels[0].ss_divider_index > 1)
+			ps->levels[0].ss_divider_index = ps->levels[0].ss_divider_index - 1;
+	}
+
+	if (ps->levels[0].ss_divider_index == 0)
+		ps->levels[0].ds_divider_index = 0;
+
+	if (ps->levels[0].ds_divider_index == 0)
+		ps->levels[0].ss_divider_index = 0;
+}
+
+static void sumo_apply_state_adjust_rules(struct radeon_device *rdev,
+					  struct radeon_ps *new_rps,
+					  struct radeon_ps *old_rps)
+{
+	struct sumo_ps *ps = sumo_get_ps(new_rps);
+	struct sumo_ps *current_ps = sumo_get_ps(old_rps);
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 min_voltage = 0; /* ??? */
+	u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */
+	u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
+	u32 i;
+
+	if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+		return sumo_patch_thermal_state(rdev, ps, current_ps);
+
+	if (pi->enable_boost) {
+		if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE)
+			ps->flags |= SUMO_POWERSTATE_FLAGS_BOOST_STATE;
+	}
+
+	if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) ||
+	    (new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ||
+	    (new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE))
+		ps->flags |= SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE;
+
+	for (i = 0; i < ps->num_levels; i++) {
+		if (ps->levels[i].vddc_index < min_voltage)
+			ps->levels[i].vddc_index = min_voltage;
+
+		if (ps->levels[i].sclk < min_sclk)
+			ps->levels[i].sclk =
+				sumo_get_valid_engine_clock(rdev, min_sclk);
+
+		ps->levels[i].ss_divider_index =
+			sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr);
+
+		ps->levels[i].ds_divider_index =
+			sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, SUMO_MINIMUM_ENGINE_CLOCK);
+
+		if (ps->levels[i].ds_divider_index > ps->levels[i].ss_divider_index + 1)
+			ps->levels[i].ds_divider_index = ps->levels[i].ss_divider_index + 1;
+
+		if (ps->levels[i].ss_divider_index == ps->levels[i].ds_divider_index) {
+			if (ps->levels[i].ss_divider_index > 1)
+				ps->levels[i].ss_divider_index = ps->levels[i].ss_divider_index - 1;
+		}
+
+		if (ps->levels[i].ss_divider_index == 0)
+			ps->levels[i].ds_divider_index = 0;
+
+		if (ps->levels[i].ds_divider_index == 0)
+			ps->levels[i].ss_divider_index = 0;
+
+		if (ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
+			ps->levels[i].allow_gnb_slow = 1;
+		else if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ||
+			 (new_rps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC))
+			ps->levels[i].allow_gnb_slow = 0;
+		else if (i == ps->num_levels - 1)
+			ps->levels[i].allow_gnb_slow = 0;
+		else
+			ps->levels[i].allow_gnb_slow = 1;
+	}
+}
+
+static void sumo_cleanup_asic(struct radeon_device *rdev)
+{
+	sumo_take_smu_control(rdev, false);
+}
+
+static int sumo_set_thermal_temperature_range(struct radeon_device *rdev,
+					      int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
+
+	rdev->pm.dpm.thermal.min_temp = low_temp;
+	rdev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+static void sumo_update_current_ps(struct radeon_device *rdev,
+				   struct radeon_ps *rps)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(rps);
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	pi->current_rps = *rps;
+	pi->current_ps = *new_ps;
+	pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void sumo_update_requested_ps(struct radeon_device *rdev,
+				     struct radeon_ps *rps)
+{
+	struct sumo_ps *new_ps = sumo_get_ps(rps);
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	pi->requested_rps = *rps;
+	pi->requested_ps = *new_ps;
+	pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int sumo_dpm_enable(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	if (sumo_dpm_enabled(rdev))
+		return -EINVAL;
+
+	sumo_program_bootup_state(rdev);
+	sumo_init_bsp(rdev);
+	sumo_reset_am(rdev);
+	sumo_program_tp(rdev);
+	sumo_program_bootup_at(rdev);
+	sumo_start_am(rdev);
+	if (pi->enable_auto_thermal_throttling) {
+		sumo_program_ttp(rdev);
+		sumo_program_ttt(rdev);
+	}
+	sumo_program_dc_hto(rdev);
+	sumo_program_power_level_enter_state(rdev);
+	sumo_enable_voltage_scaling(rdev, true);
+	sumo_program_sstp(rdev);
+	sumo_program_vc(rdev, SUMO_VRC_DFLT);
+	sumo_override_cnb_thermal_events(rdev);
+	sumo_start_dpm(rdev);
+	sumo_wait_for_level_0(rdev);
+	if (pi->enable_sclk_ds)
+		sumo_enable_sclk_ds(rdev, true);
+	if (pi->enable_boost)
+		sumo_enable_boost_timer(rdev);
+
+	sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+	return 0;
+}
+
+int sumo_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	ret = sumo_enable_clock_power_gating(rdev);
+	if (ret)
+		return ret;
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+		if (ret)
+			return ret;
+		rdev->irq.dpm_thermal = true;
+		radeon_irq_set(rdev);
+	}
+
+	return 0;
+}
+
+void sumo_dpm_disable(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	if (!sumo_dpm_enabled(rdev))
+		return;
+	sumo_disable_clock_power_gating(rdev);
+	if (pi->enable_sclk_ds)
+		sumo_enable_sclk_ds(rdev, false);
+	sumo_clear_vc(rdev);
+	sumo_wait_for_level_0(rdev);
+	sumo_stop_dpm(rdev);
+	sumo_enable_voltage_scaling(rdev, false);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+
+	sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+}
+
+int sumo_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+	struct radeon_ps *new_ps = &requested_ps;
+
+	sumo_update_requested_ps(rdev, new_ps);
+
+	if (pi->enable_dynamic_patch_ps)
+		sumo_apply_state_adjust_rules(rdev,
+					      &pi->requested_rps,
+					      &pi->current_rps);
+
+	return 0;
+}
+
+int sumo_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+	struct radeon_ps *old_ps = &pi->current_rps;
+
+	if (pi->enable_dpm)
+		sumo_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+	if (pi->enable_boost) {
+		sumo_enable_boost(rdev, new_ps, false);
+		sumo_patch_boost_state(rdev, new_ps);
+	}
+	if (pi->enable_dpm) {
+		sumo_pre_notify_alt_vddnb_change(rdev, new_ps, old_ps);
+		sumo_enable_power_level_0(rdev);
+		sumo_set_forced_level_0(rdev);
+		sumo_set_forced_mode_enabled(rdev);
+		sumo_wait_for_level_0(rdev);
+		sumo_program_power_levels_0_to_n(rdev, new_ps, old_ps);
+		sumo_program_wl(rdev, new_ps);
+		sumo_program_bsp(rdev, new_ps);
+		sumo_program_at(rdev, new_ps);
+		sumo_force_nbp_state(rdev, new_ps);
+		sumo_set_forced_mode_disabled(rdev);
+		sumo_set_forced_mode_enabled(rdev);
+		sumo_set_forced_mode_disabled(rdev);
+		sumo_post_notify_alt_vddnb_change(rdev, new_ps, old_ps);
+	}
+	if (pi->enable_boost)
+		sumo_enable_boost(rdev, new_ps, true);
+	if (pi->enable_dpm)
+		sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+
+	return 0;
+}
+
+void sumo_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+
+	sumo_update_current_ps(rdev, new_ps);
+}
+
+#if 0
+void sumo_dpm_reset_asic(struct radeon_device *rdev)
+{
+	sumo_program_bootup_state(rdev);
+	sumo_enable_power_level_0(rdev);
+	sumo_set_forced_level_0(rdev);
+	sumo_set_forced_mode_enabled(rdev);
+	sumo_wait_for_level_0(rdev);
+	sumo_set_forced_mode_disabled(rdev);
+	sumo_set_forced_mode_enabled(rdev);
+	sumo_set_forced_mode_disabled(rdev);
+}
+#endif
+
+void sumo_dpm_setup_asic(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	sumo_initialize_m3_arb(rdev);
+	pi->fw_version = sumo_get_running_fw_version(rdev);
+	DRM_INFO("Found smc ucode version: 0x%08x\n", pi->fw_version);
+	sumo_program_acpi_power_level(rdev);
+	sumo_enable_acpi_pm(rdev);
+	sumo_take_smu_control(rdev, true);
+}
+
+void sumo_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void sumo_patch_boot_state(struct radeon_device *rdev,
+				  struct sumo_ps *ps)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	ps->num_levels = 1;
+	ps->flags = 0;
+	ps->levels[0] = pi->boot_pl;
+}
+
+static void sumo_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					    struct radeon_ps *rps,
+					    struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					    u8 table_rev)
+{
+	struct sumo_ps *ps = sumo_get_ps(rps);
+
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.dpm.boot_ps = rps;
+		sumo_patch_boot_state(rdev, ps);
+	}
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void sumo_parse_pplib_clock_info(struct radeon_device *rdev,
+					struct radeon_ps *rps, int index,
+					union pplib_clock_info *clock_info)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct sumo_ps *ps = sumo_get_ps(rps);
+	struct sumo_pl *pl = &ps->levels[index];
+	u32 sclk;
+
+	sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+	sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+	pl->sclk = sclk;
+	pl->vddc_index = clock_info->sumo.vddcIndex;
+	pl->sclk_dpm_tdp_limit = clock_info->sumo.tdpLimit;
+
+	ps->num_levels = index + 1;
+
+	if (pi->enable_sclk_ds) {
+		pl->ds_divider_index = 5;
+		pl->ss_divider_index = 4;
+	}
+}
+
+static int sumo_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct sumo_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		if (!rdev->pm.power_state[i].clock_info)
+			return -EINVAL;
+		ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(rdev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		rdev->pm.dpm.ps[i].ps_priv = ps;
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
+				break;
+
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			sumo_parse_pplib_clock_info(rdev,
+						    &rdev->pm.dpm.ps[i], k,
+						    clock_info);
+			k++;
+		}
+		sumo_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+						non_clock_info,
+						non_clock_info_array->ucEntrySize);
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+	return 0;
+}
+
+u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
+			      struct sumo_vid_mapping_table *vid_mapping_table,
+			      u32 vid_2bit)
+{
+	u32 i;
+
+	for (i = 0; i < vid_mapping_table->num_entries; i++) {
+		if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+			return vid_mapping_table->entries[i].vid_7bit;
+	}
+
+	return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
+}
+
+#if 0
+u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
+			      struct sumo_vid_mapping_table *vid_mapping_table,
+			      u32 vid_7bit)
+{
+	u32 i;
+
+	for (i = 0; i < vid_mapping_table->num_entries; i++) {
+		if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+			return vid_mapping_table->entries[i].vid_2bit;
+	}
+
+	return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+}
+#endif
+
+static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
+					       u32 vid_2bit)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit);
+
+	if (vid_7bit > 0x7C)
+		return 0;
+
+	return (15500 - vid_7bit * 125 + 5) / 10;
+}
+
+static void sumo_construct_display_voltage_mapping_table(struct radeon_device *rdev,
+							 struct sumo_disp_clock_voltage_mapping_table *disp_clk_voltage_mapping_table,
+							 ATOM_CLK_VOLT_CAPABILITY *table)
+{
+	u32 i;
+
+	for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
+		if (table[i].ulMaximumSupportedCLK == 0)
+			break;
+
+		disp_clk_voltage_mapping_table->display_clock_frequency[i] =
+			table[i].ulMaximumSupportedCLK;
+	}
+
+	disp_clk_voltage_mapping_table->num_max_voltage_levels = i;
+
+	if (disp_clk_voltage_mapping_table->num_max_voltage_levels == 0) {
+		disp_clk_voltage_mapping_table->display_clock_frequency[0] = 80000;
+		disp_clk_voltage_mapping_table->num_max_voltage_levels = 1;
+	}
+}
+
+void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
+					       struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
+					       ATOM_AVAILABLE_SCLK_LIST *table)
+{
+	u32 i;
+	u32 n = 0;
+	u32 prev_sclk = 0;
+
+	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+		if (table[i].ulSupportedSCLK > prev_sclk) {
+			sclk_voltage_mapping_table->entries[n].sclk_frequency =
+				table[i].ulSupportedSCLK;
+			sclk_voltage_mapping_table->entries[n].vid_2bit =
+				table[i].usVoltageIndex;
+			prev_sclk = table[i].ulSupportedSCLK;
+			n++;
+		}
+	}
+
+	sclk_voltage_mapping_table->num_max_dpm_entries = n;
+}
+
+void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
+				      struct sumo_vid_mapping_table *vid_mapping_table,
+				      ATOM_AVAILABLE_SCLK_LIST *table)
+{
+	u32 i, j;
+
+	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+		if (table[i].ulSupportedSCLK != 0) {
+			vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+				table[i].usVoltageID;
+			vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+				table[i].usVoltageIndex;
+		}
+	}
+
+	for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
+		if (vid_mapping_table->entries[i].vid_7bit == 0) {
+			for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
+				if (vid_mapping_table->entries[j].vid_7bit != 0) {
+					vid_mapping_table->entries[i] =
+						vid_mapping_table->entries[j];
+					vid_mapping_table->entries[j].vid_7bit = 0;
+					break;
+				}
+			}
+
+			if (j == SUMO_MAX_NUMBER_VOLTAGES)
+				break;
+		}
+	}
+
+	vid_mapping_table->num_entries = i;
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+};
+
+static int sumo_parse_sys_info_table(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+	int i;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
+					      data_offset);
+
+		if (crev != 6) {
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			return -EINVAL;
+		}
+		pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_6.ulBootUpEngineClock);
+		pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_6.ulMinEngineClock);
+		pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_6.ulBootUpUMAClock);
+		pi->sys_info.bootup_nb_voltage_index =
+			le16_to_cpu(igp_info->info_6.usBootUpNBVoltage);
+		if (igp_info->info_6.ucHtcTmpLmt == 0)
+			pi->sys_info.htc_tmp_lmt = 203;
+		else
+			pi->sys_info.htc_tmp_lmt = igp_info->info_6.ucHtcTmpLmt;
+		if (igp_info->info_6.ucHtcHystLmt == 0)
+			pi->sys_info.htc_hyst_lmt = 5;
+		else
+			pi->sys_info.htc_hyst_lmt = igp_info->info_6.ucHtcHystLmt;
+		if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+			DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+		}
+		for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++) {
+			pi->sys_info.csr_m3_arb_cntl_default[i] =
+				le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_DEFAULT[i]);
+			pi->sys_info.csr_m3_arb_cntl_uvd[i] =
+				le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_UVD[i]);
+			pi->sys_info.csr_m3_arb_cntl_fs3d[i] =
+				le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_FS3D[i]);
+		}
+		pi->sys_info.sclk_dpm_boost_margin =
+			le32_to_cpu(igp_info->info_6.SclkDpmBoostMargin);
+		pi->sys_info.sclk_dpm_throttle_margin =
+			le32_to_cpu(igp_info->info_6.SclkDpmThrottleMargin);
+		pi->sys_info.sclk_dpm_tdp_limit_pg =
+			le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitPG);
+		pi->sys_info.gnb_tdp_limit = le16_to_cpu(igp_info->info_6.GnbTdpLimit);
+		pi->sys_info.sclk_dpm_tdp_limit_boost =
+			le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitBoost);
+		pi->sys_info.boost_sclk = le32_to_cpu(igp_info->info_6.ulBoostEngineCLock);
+		pi->sys_info.boost_vid_2bit = igp_info->info_6.ulBoostVid_2bit;
+		if (igp_info->info_6.EnableBoost)
+			pi->sys_info.enable_boost = true;
+		else
+			pi->sys_info.enable_boost = false;
+		sumo_construct_display_voltage_mapping_table(rdev,
+							     &pi->sys_info.disp_clk_voltage_mapping_table,
+							     igp_info->info_6.sDISPCLK_Voltage);
+		sumo_construct_sclk_voltage_mapping_table(rdev,
+							  &pi->sys_info.sclk_voltage_mapping_table,
+							  igp_info->info_6.sAvail_SCLK);
+		sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table,
+						 igp_info->info_6.sAvail_SCLK);
+
+	}
+	return 0;
+}
+
+static void sumo_construct_boot_and_acpi_state(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+	pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+	pi->boot_pl.ds_divider_index = 0;
+	pi->boot_pl.ss_divider_index = 0;
+	pi->boot_pl.allow_gnb_slow = 1;
+	pi->acpi_pl = pi->boot_pl;
+	pi->current_ps.num_levels = 1;
+	pi->current_ps.levels[0] = pi->boot_pl;
+}
+
+int sumo_dpm_init(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi;
+	u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
+	int ret;
+
+	pi = kzalloc(sizeof(struct sumo_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = pi;
+
+	pi->driver_nbps_policy_disable = false;
+	if ((rdev->family == CHIP_PALM) && (hw_rev < 3))
+		pi->disable_gfx_power_gating_in_uvd = true;
+	else
+		pi->disable_gfx_power_gating_in_uvd = false;
+	pi->enable_alt_vddnb = true;
+	pi->enable_sclk_ds = true;
+	pi->enable_dynamic_m3_arbiter = false;
+	pi->enable_dynamic_patch_ps = true;
+	/* Some PALM chips don't seem to properly ungate gfx when UVD is in use;
+	 * for now just disable gfx PG.
+	 */
+	if (rdev->family == CHIP_PALM)
+		pi->enable_gfx_power_gating = false;
+	else
+		pi->enable_gfx_power_gating = true;
+	pi->enable_gfx_clock_gating = true;
+	pi->enable_mg_clock_gating = true;
+	pi->enable_auto_thermal_throttling = true;
+
+	ret = sumo_parse_sys_info_table(rdev);
+	if (ret)
+		return ret;
+
+	sumo_construct_boot_and_acpi_state(rdev);
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = sumo_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	pi->pasi = CYPRESS_HASI_DFLT;
+	pi->asi = RV770_ASI_DFLT;
+	pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt;
+	pi->enable_boost = pi->sys_info.enable_boost;
+	pi->enable_dpm = true;
+
+	return 0;
+}
+
+void sumo_dpm_print_power_state(struct radeon_device *rdev,
+				struct radeon_ps *rps)
+{
+	int i;
+	struct sumo_ps *ps = sumo_get_ps(rps);
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->num_levels; i++) {
+		struct sumo_pl *pl = &ps->levels[i];
+		printk("\t\tpower level %d    sclk: %u vddc: %u\n",
+		       i, pl->sclk,
+		       sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
+	}
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						      struct seq_file *m)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
+	struct sumo_ps *ps = sumo_get_ps(rps);
+	struct sumo_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >>
+		CURR_INDEX_SHIFT;
+
+	if (current_index == BOOST_DPM_LEVEL) {
+		pl = &pi->boost_pl;
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
+			   current_index, pl->sclk,
+			   sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
+	} else if (current_index >= ps->num_levels) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		pl = &ps->levels[current_index];
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
+			   current_index, pl->sclk,
+			   sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
+	}
+}
+
+u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
+	struct sumo_ps *ps = sumo_get_ps(rps);
+	struct sumo_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >>
+		CURR_INDEX_SHIFT;
+
+	if (current_index == BOOST_DPM_LEVEL) {
+		pl = &pi->boost_pl;
+		return pl->sclk;
+	} else if (current_index >= ps->num_levels) {
+		return 0;
+	} else {
+		pl = &ps->levels[current_index];
+		return pl->sclk;
+	}
+}
+
+u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	return pi->sys_info.bootup_uma_clk;
+}
+
+void sumo_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	sumo_cleanup_asic(rdev); /* ??? */
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+}
+
+u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct sumo_ps *requested_state = sumo_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->levels[0].sclk;
+	else
+		return requested_state->levels[requested_state->num_levels - 1].sclk;
+}
+
+u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+	return pi->sys_info.bootup_uma_clk;
+}
+
+int sumo_dpm_force_performance_level(struct radeon_device *rdev,
+				     enum radeon_dpm_forced_level level)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
+	struct sumo_ps *ps = sumo_get_ps(rps);
+	int i;
+
+	if (ps->num_levels <= 1)
+		return 0;
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		if (pi->enable_boost)
+			sumo_enable_boost(rdev, rps, false);
+		sumo_power_level_enable(rdev, ps->num_levels - 1, true);
+		sumo_set_forced_level(rdev, ps->num_levels - 1);
+		sumo_set_forced_mode_enabled(rdev);
+		for (i = 0; i < ps->num_levels - 1; i++) {
+			sumo_power_level_enable(rdev, i, false);
+		}
+		sumo_set_forced_mode(rdev, false);
+		sumo_set_forced_mode_enabled(rdev);
+		sumo_set_forced_mode(rdev, false);
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		if (pi->enable_boost)
+			sumo_enable_boost(rdev, rps, false);
+		sumo_power_level_enable(rdev, 0, true);
+		sumo_set_forced_level(rdev, 0);
+		sumo_set_forced_mode_enabled(rdev);
+		for (i = 1; i < ps->num_levels; i++) {
+			sumo_power_level_enable(rdev, i, false);
+		}
+		sumo_set_forced_mode(rdev, false);
+		sumo_set_forced_mode_enabled(rdev);
+		sumo_set_forced_mode(rdev, false);
+	} else {
+		for (i = 0; i < ps->num_levels; i++) {
+			sumo_power_level_enable(rdev, i, true);
+		}
+		if (pi->enable_boost)
+			sumo_enable_boost(rdev, rps, true);
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
new file mode 100644
index 0000000..07dda29
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SUMO_DPM_H__
+#define __SUMO_DPM_H__
+
+#include "atom.h"
+
+#define SUMO_MAX_HARDWARE_POWERLEVELS 5
+#define SUMO_PM_NUMBER_OF_TC 15
+
+struct sumo_pl {
+	u32 sclk;
+	u32 vddc_index;
+	u32 ds_divider_index;
+	u32 ss_divider_index;
+	u32 allow_gnb_slow;
+	u32 sclk_dpm_tdp_limit;
+};
+
+/* used for the flags field */
+#define SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE (1 << 0)
+#define SUMO_POWERSTATE_FLAGS_BOOST_STATE       (1 << 1)
+
+struct sumo_ps {
+	struct sumo_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
+	u32 num_levels;
+	/* flags */
+	u32 flags;
+};
+
+#define NUMBER_OF_M3ARB_PARAM_SETS 10
+#define SUMO_MAX_NUMBER_VOLTAGES    4
+
+struct sumo_disp_clock_voltage_mapping_table {
+	u32 num_max_voltage_levels;
+	u32 display_clock_frequency[SUMO_MAX_NUMBER_VOLTAGES];
+};
+
+struct sumo_vid_mapping_entry {
+	u16 vid_2bit;
+	u16 vid_7bit;
+};
+
+struct sumo_vid_mapping_table {
+	u32 num_entries;
+	struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES];
+};
+
+struct sumo_sclk_voltage_mapping_entry {
+	u32 sclk_frequency;
+	u16 vid_2bit;
+	u16 rsv;
+};
+
+struct sumo_sclk_voltage_mapping_table {
+	u32 num_max_dpm_entries;
+	struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct sumo_sys_info {
+	u32 bootup_sclk;
+	u32 min_sclk;
+	u32 bootup_uma_clk;
+	u16 bootup_nb_voltage_index;
+	u8 htc_tmp_lmt;
+	u8 htc_hyst_lmt;
+	struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
+	struct sumo_disp_clock_voltage_mapping_table disp_clk_voltage_mapping_table;
+	struct sumo_vid_mapping_table vid_mapping_table;
+	u32 csr_m3_arb_cntl_default[NUMBER_OF_M3ARB_PARAM_SETS];
+	u32 csr_m3_arb_cntl_uvd[NUMBER_OF_M3ARB_PARAM_SETS];
+	u32 csr_m3_arb_cntl_fs3d[NUMBER_OF_M3ARB_PARAM_SETS];
+	u32 sclk_dpm_boost_margin;
+	u32 sclk_dpm_throttle_margin;
+	u32 sclk_dpm_tdp_limit_pg;
+	u32 gnb_tdp_limit;
+	u32 sclk_dpm_tdp_limit_boost;
+	u32 boost_sclk;
+	u32 boost_vid_2bit;
+	bool enable_boost;
+};
+
+struct sumo_power_info {
+	u32 asi;
+	u32 pasi;
+	u32 bsp;
+	u32 bsu;
+	u32 pbsp;
+	u32 pbsu;
+	u32 dsp;
+	u32 psp;
+	u32 thermal_auto_throttling;
+	u32 uvd_m3_arbiter;
+	u32 fw_version;
+	struct sumo_sys_info sys_info;
+	struct sumo_pl acpi_pl;
+	struct sumo_pl boot_pl;
+	struct sumo_pl boost_pl;
+	bool disable_gfx_power_gating_in_uvd;
+	bool driver_nbps_policy_disable;
+	bool enable_alt_vddnb;
+	bool enable_dynamic_m3_arbiter;
+	bool enable_gfx_clock_gating;
+	bool enable_gfx_power_gating;
+	bool enable_mg_clock_gating;
+	bool enable_sclk_ds;
+	bool enable_auto_thermal_throttling;
+	bool enable_dynamic_patch_ps;
+	bool enable_dpm;
+	bool enable_boost;
+	struct radeon_ps current_rps;
+	struct sumo_ps current_ps;
+	struct radeon_ps requested_rps;
+	struct sumo_ps requested_ps;
+};
+
+#define SUMO_UTC_DFLT_00                     0x48
+#define SUMO_UTC_DFLT_01                     0x44
+#define SUMO_UTC_DFLT_02                     0x44
+#define SUMO_UTC_DFLT_03                     0x44
+#define SUMO_UTC_DFLT_04                     0x44
+#define SUMO_UTC_DFLT_05                     0x44
+#define SUMO_UTC_DFLT_06                     0x44
+#define SUMO_UTC_DFLT_07                     0x44
+#define SUMO_UTC_DFLT_08                     0x44
+#define SUMO_UTC_DFLT_09                     0x44
+#define SUMO_UTC_DFLT_10                     0x44
+#define SUMO_UTC_DFLT_11                     0x44
+#define SUMO_UTC_DFLT_12                     0x44
+#define SUMO_UTC_DFLT_13                     0x44
+#define SUMO_UTC_DFLT_14                     0x44
+
+#define SUMO_DTC_DFLT_00                     0x48
+#define SUMO_DTC_DFLT_01                     0x44
+#define SUMO_DTC_DFLT_02                     0x44
+#define SUMO_DTC_DFLT_03                     0x44
+#define SUMO_DTC_DFLT_04                     0x44
+#define SUMO_DTC_DFLT_05                     0x44
+#define SUMO_DTC_DFLT_06                     0x44
+#define SUMO_DTC_DFLT_07                     0x44
+#define SUMO_DTC_DFLT_08                     0x44
+#define SUMO_DTC_DFLT_09                     0x44
+#define SUMO_DTC_DFLT_10                     0x44
+#define SUMO_DTC_DFLT_11                     0x44
+#define SUMO_DTC_DFLT_12                     0x44
+#define SUMO_DTC_DFLT_13                     0x44
+#define SUMO_DTC_DFLT_14                     0x44
+
+#define SUMO_AH_DFLT               5
+
+#define SUMO_R_DFLT0               70
+#define SUMO_R_DFLT1               70
+#define SUMO_R_DFLT2               70
+#define SUMO_R_DFLT3               70
+#define SUMO_R_DFLT4               100
+
+#define SUMO_L_DFLT0               0
+#define SUMO_L_DFLT1               20
+#define SUMO_L_DFLT2               20
+#define SUMO_L_DFLT3               20
+#define SUMO_L_DFLT4               20
+#define SUMO_VRC_DFLT              0x30033
+#define SUMO_MGCGTTLOCAL0_DFLT     0
+#define SUMO_MGCGTTLOCAL1_DFLT     0
+#define SUMO_GICST_DFLT            19
+#define SUMO_SST_DFLT              8
+#define SUMO_VOLTAGEDROPT_DFLT     1
+#define SUMO_GFXPOWERGATINGT_DFLT  100
+
+/* sumo_dpm.c */
+void sumo_gfx_clockgating_initialize(struct radeon_device *rdev);
+void sumo_program_vc(struct radeon_device *rdev, u32 vrc);
+void sumo_clear_vc(struct radeon_device *rdev);
+void sumo_program_sstp(struct radeon_device *rdev);
+void sumo_take_smu_control(struct radeon_device *rdev, bool enable);
+void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
+					       struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
+					       ATOM_AVAILABLE_SCLK_LIST *table);
+void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
+				      struct sumo_vid_mapping_table *vid_mapping_table,
+				      ATOM_AVAILABLE_SCLK_LIST *table);
+u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
+			      struct sumo_vid_mapping_table *vid_mapping_table,
+			      u32 vid_2bit);
+u32 sumo_get_sleep_divider_from_id(u32 id);
+u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+					 u32 sclk,
+					 u32 min_sclk_in_sr);
+
+/* sumo_smc.c */
+void sumo_initialize_m3_arb(struct radeon_device *rdev);
+void sumo_smu_pg_init(struct radeon_device *rdev);
+void sumo_set_tdp_limit(struct radeon_device *rdev, u32 index, u32 tdp_limit);
+void sumo_smu_notify_alt_vddnb_change(struct radeon_device *rdev,
+				      bool powersaving, bool force_nbps1);
+void sumo_boost_state_enable(struct radeon_device *rdev, bool enable);
+void sumo_enable_boost_timer(struct radeon_device *rdev);
+u32 sumo_get_running_fw_version(struct radeon_device *rdev);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
new file mode 100644
index 0000000..cc051be
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "sumod.h"
+#include "sumo_dpm.h"
+#include "ppsmc.h"
+
+#define SUMO_SMU_SERVICE_ROUTINE_PG_INIT        1
+#define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY  27
+#define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20  20
+
+struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
+
+static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
+{
+	u32 gfx_int_req;
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(GFX_INT_STATUS) & INT_DONE)
+			break;
+		udelay(1);
+	}
+
+	gfx_int_req = SERV_INDEX(id) | INT_REQ;
+	WREG32(GFX_INT_REQ, gfx_int_req);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(GFX_INT_REQ) & INT_REQ)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(GFX_INT_STATUS) & INT_ACK)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(GFX_INT_STATUS) & INT_DONE)
+			break;
+		udelay(1);
+	}
+
+	gfx_int_req &= ~INT_REQ;
+	WREG32(GFX_INT_REQ, gfx_int_req);
+}
+
+void sumo_initialize_m3_arb(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 i;
+
+	if (!pi->enable_dynamic_m3_arbiter)
+		return;
+
+	for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++)
+		WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
+			   pi->sys_info.csr_m3_arb_cntl_default[i]);
+
+	for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 2; i++)
+		WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
+			   pi->sys_info.csr_m3_arb_cntl_uvd[i % NUMBER_OF_M3ARB_PARAM_SETS]);
+
+	for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 3; i++)
+		WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
+			   pi->sys_info.csr_m3_arb_cntl_fs3d[i % NUMBER_OF_M3ARB_PARAM_SETS]);
+}
+
+static bool sumo_is_alt_vddnb_supported(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	bool return_code = false;
+
+	if (!pi->enable_alt_vddnb)
+		return return_code;
+
+	if ((rdev->family == CHIP_SUMO) || (rdev->family == CHIP_SUMO2)) {
+		if (pi->fw_version >= 0x00010C00)
+			return_code = true;
+	}
+
+	return return_code;
+}
+
+void sumo_smu_notify_alt_vddnb_change(struct radeon_device *rdev,
+				      bool powersaving, bool force_nbps1)
+{
+	u32 param = 0;
+
+	if (!sumo_is_alt_vddnb_supported(rdev))
+		return;
+
+	if (powersaving)
+		param |= 1;
+
+	if (force_nbps1)
+		param |= 2;
+
+	WREG32_RCU(RCU_ALTVDDNB_NOTIFY, param);
+
+	sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY);
+}
+
+void sumo_smu_pg_init(struct radeon_device *rdev)
+{
+	sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_PG_INIT);
+}
+
+static u32 sumo_power_of_4(u32 unit)
+{
+	u32 ret = 1;
+	u32 i;
+
+	for (i = 0; i < unit; i++)
+		ret *= 4;
+
+	return ret;
+}
+
+void sumo_enable_boost_timer(struct radeon_device *rdev)
+{
+	struct sumo_power_info *pi = sumo_get_pi(rdev);
+	u32 period, unit, timer_value;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	unit = (RREG32_RCU(RCU_LCLK_SCALING_CNTL) & LCLK_SCALING_TIMER_PRESCALER_MASK)
+		>> LCLK_SCALING_TIMER_PRESCALER_SHIFT;
+
+	period = 100 * (xclk / 100 / sumo_power_of_4(unit));
+
+	timer_value = (period << 16) | (unit << 4);
+
+	WREG32_RCU(RCU_GNB_PWR_REP_TIMER_CNTL, timer_value);
+	WREG32_RCU(RCU_BOOST_MARGIN, pi->sys_info.sclk_dpm_boost_margin);
+	WREG32_RCU(RCU_THROTTLE_MARGIN, pi->sys_info.sclk_dpm_throttle_margin);
+	WREG32_RCU(GNB_TDP_LIMIT, pi->sys_info.gnb_tdp_limit);
+	WREG32_RCU(RCU_SclkDpmTdpLimitPG, pi->sys_info.sclk_dpm_tdp_limit_pg);
+
+	sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20);
+}
+
+void sumo_set_tdp_limit(struct radeon_device *rdev, u32 index, u32 tdp_limit)
+{
+	u32 regoffset = 0;
+	u32 shift = 0;
+	u32 mask = 0xFFF;
+	u32 sclk_dpm_tdp_limit;
+
+	switch (index) {
+	case 0:
+		regoffset = RCU_SclkDpmTdpLimit01;
+		shift = 16;
+		break;
+	case 1:
+		regoffset = RCU_SclkDpmTdpLimit01;
+		shift = 0;
+		break;
+	case 2:
+		regoffset = RCU_SclkDpmTdpLimit23;
+		shift = 16;
+		break;
+	case 3:
+		regoffset = RCU_SclkDpmTdpLimit23;
+		shift = 0;
+		break;
+	case 4:
+		regoffset = RCU_SclkDpmTdpLimit47;
+		shift = 16;
+		break;
+	case 7:
+		regoffset = RCU_SclkDpmTdpLimit47;
+		shift = 0;
+		break;
+	default:
+		break;
+	}
+
+	sclk_dpm_tdp_limit = RREG32_RCU(regoffset);
+	sclk_dpm_tdp_limit &= ~(mask << shift);
+	sclk_dpm_tdp_limit |= (tdp_limit << shift);
+	WREG32_RCU(regoffset, sclk_dpm_tdp_limit);
+}
+
+void sumo_boost_state_enable(struct radeon_device *rdev, bool enable)
+{
+	u32 boost_disable = RREG32_RCU(RCU_GPU_BOOST_DISABLE);
+
+	boost_disable &= 0xFFFFFFFE;
+	boost_disable |= (enable ? 0 : 1);
+	WREG32_RCU(RCU_GPU_BOOST_DISABLE, boost_disable);
+}
+
+u32 sumo_get_running_fw_version(struct radeon_device *rdev)
+{
+	return RREG32_RCU(RCU_FW_VERSION);
+}
+
diff --git a/drivers/gpu/drm/radeon/sumod.h b/drivers/gpu/drm/radeon/sumod.h
new file mode 100644
index 0000000..7c9c2d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumod.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef _SUMOD_H_
+#define _SUMOD_H_
+
+/* pm registers */
+
+/* rcu */
+#define RCU_FW_VERSION                                  0x30c
+
+#define RCU_PWR_GATING_SEQ0                             0x408
+#define RCU_PWR_GATING_SEQ1                             0x40c
+#define RCU_PWR_GATING_CNTL                             0x410
+#       define PWR_GATING_EN                            (1 << 0)
+#       define RSVD_MASK                                (0x3 << 1)
+#       define PCV(x)                                   ((x) << 3)
+#       define PCV_MASK                                 (0x1f << 3)
+#       define PCV_SHIFT                                3
+#       define PCP(x)                                   ((x) << 8)
+#       define PCP_MASK                                 (0xf << 8)
+#       define PCP_SHIFT                                8
+#       define RPW(x)                                   ((x) << 16)
+#       define RPW_MASK                                 (0xf << 16)
+#       define RPW_SHIFT                                16
+#       define ID(x)                                    ((x) << 24)
+#       define ID_MASK                                  (0xf << 24)
+#       define ID_SHIFT                                 24
+#       define PGS(x)                                   ((x) << 28)
+#       define PGS_MASK                                 (0xf << 28)
+#       define PGS_SHIFT                                28
+
+#define RCU_ALTVDDNB_NOTIFY                             0x430
+#define RCU_LCLK_SCALING_CNTL                           0x434
+#       define LCLK_SCALING_EN                          (1 << 0)
+#       define LCLK_SCALING_TYPE                        (1 << 1)
+#       define LCLK_SCALING_TIMER_PRESCALER(x)          ((x) << 4)
+#       define LCLK_SCALING_TIMER_PRESCALER_MASK        (0xf << 4)
+#       define LCLK_SCALING_TIMER_PRESCALER_SHIFT       4
+#       define LCLK_SCALING_TIMER_PERIOD(x)             ((x) << 16)
+#       define LCLK_SCALING_TIMER_PERIOD_MASK           (0xf << 16)
+#       define LCLK_SCALING_TIMER_PERIOD_SHIFT          16
+
+#define RCU_PWR_GATING_CNTL_2                           0x4a0
+#       define MPPU(x)                                  ((x) << 0)
+#       define MPPU_MASK                                (0xffff << 0)
+#       define MPPU_SHIFT                               0
+#       define MPPD(x)                                  ((x) << 16)
+#       define MPPD_MASK                                (0xffff << 16)
+#       define MPPD_SHIFT                               16
+#define RCU_PWR_GATING_CNTL_3                           0x4a4
+#       define DPPU(x)                                  ((x) << 0)
+#       define DPPU_MASK                                (0xffff << 0)
+#       define DPPU_SHIFT                               0
+#       define DPPD(x)                                  ((x) << 16)
+#       define DPPD_MASK                                (0xffff << 16)
+#       define DPPD_SHIFT                               16
+#define RCU_PWR_GATING_CNTL_4                           0x4a8
+#       define RT(x)                                    ((x) << 0)
+#       define RT_MASK                                  (0xffff << 0)
+#       define RT_SHIFT                                 0
+#       define IT(x)                                    ((x) << 16)
+#       define IT_MASK                                  (0xffff << 16)
+#       define IT_SHIFT                                 16
+
+/* yes these two have the same address */
+#define RCU_PWR_GATING_CNTL_5                           0x504
+#define RCU_GPU_BOOST_DISABLE                           0x508
+
+#define MCU_M3ARB_INDEX                                 0x504
+#define MCU_M3ARB_PARAMS                                0x508
+
+#define RCU_GNB_PWR_REP_TIMER_CNTL                      0x50C
+
+#define RCU_SclkDpmTdpLimit01                           0x514
+#define RCU_SclkDpmTdpLimit23                           0x518
+#define RCU_SclkDpmTdpLimit47                           0x51C
+#define RCU_SclkDpmTdpLimitPG                           0x520
+
+#define GNB_TDP_LIMIT                                   0x540
+#define RCU_BOOST_MARGIN                                0x544
+#define RCU_THROTTLE_MARGIN                             0x548
+
+#define SMU_PCIE_PG_ARGS                                0x58C
+#define SMU_PCIE_PG_ARGS_2                              0x598
+#define SMU_PCIE_PG_ARGS_3                              0x59C
+
+/* mmio */
+#define RCU_STATUS                                      0x11c
+#       define GMC_PWR_GATER_BUSY                       (1 << 8)
+#       define GFX_PWR_GATER_BUSY                       (1 << 9)
+#       define UVD_PWR_GATER_BUSY                       (1 << 10)
+#       define PCIE_PWR_GATER_BUSY                      (1 << 11)
+#       define GMC_PWR_GATER_STATE                      (1 << 12)
+#       define GFX_PWR_GATER_STATE                      (1 << 13)
+#       define UVD_PWR_GATER_STATE                      (1 << 14)
+#       define PCIE_PWR_GATER_STATE                     (1 << 15)
+#       define GFX1_PWR_GATER_BUSY                      (1 << 16)
+#       define GFX2_PWR_GATER_BUSY                      (1 << 17)
+#       define GFX1_PWR_GATER_STATE                     (1 << 18)
+#       define GFX2_PWR_GATER_STATE                     (1 << 19)
+
+#define GFX_INT_REQ                                     0x120
+#       define INT_REQ                                  (1 << 0)
+#       define SERV_INDEX(x)                            ((x) << 1)
+#       define SERV_INDEX_MASK                          (0xff << 1)
+#       define SERV_INDEX_SHIFT                         1
+#define GFX_INT_STATUS                                  0x124
+#       define INT_ACK                                  (1 << 0)
+#       define INT_DONE                                 (1 << 1)
+
+#define CG_SCLK_CNTL                                    0x600
+#       define SCLK_DIVIDER(x)                          ((x) << 0)
+#       define SCLK_DIVIDER_MASK                        (0x7f << 0)
+#       define SCLK_DIVIDER_SHIFT                       0
+#define CG_SCLK_STATUS                                  0x604
+#       define SCLK_OVERCLK_DETECT                      (1 << 2)
+
+#define CG_DCLK_CNTL                                    0x610
+#       define DCLK_DIVIDER_MASK                        0x7f
+#       define DCLK_DIR_CNTL_EN                         (1 << 8)
+#define CG_DCLK_STATUS                                  0x614
+#       define DCLK_STATUS                              (1 << 0)
+#define CG_VCLK_CNTL                                    0x618
+#       define VCLK_DIVIDER_MASK                        0x7f
+#       define VCLK_DIR_CNTL_EN                         (1 << 8)
+#define CG_VCLK_STATUS                                  0x61c
+
+#define GENERAL_PWRMGT                                  0x63c
+#       define STATIC_PM_EN                             (1 << 1)
+
+#define SCLK_PWRMGT_CNTL                                0x644
+#       define SCLK_PWRMGT_OFF                          (1 << 0)
+#       define SCLK_LOW_D1                              (1 << 1)
+#       define FIR_RESET                                (1 << 4)
+#       define FIR_FORCE_TREND_SEL                      (1 << 5)
+#       define FIR_TREND_MODE                           (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                       (1 << 7)
+#       define GFX_CLK_FORCE_ON                         (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                      (1 << 9)
+#       define GFX_CLK_FORCE_OFF                        (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                      (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                      (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                      (1 << 13)
+#       define GFX_VOLTAGE_CHANGE_EN                    (1 << 16)
+#       define GFX_VOLTAGE_CHANGE_MODE                  (1 << 17)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                0x66c
+#       define TARG_SCLK_INDEX(x)                       ((x) << 6)
+#       define TARG_SCLK_INDEX_MASK                     (0x7 << 6)
+#       define TARG_SCLK_INDEX_SHIFT                    6
+#       define CURR_SCLK_INDEX(x)                       ((x) << 9)
+#       define CURR_SCLK_INDEX_MASK                     (0x7 << 9)
+#       define CURR_SCLK_INDEX_SHIFT                    9
+#       define TARG_INDEX(x)                            ((x) << 12)
+#       define TARG_INDEX_MASK                          (0x7 << 12)
+#       define TARG_INDEX_SHIFT                         12
+#       define CURR_INDEX(x)                            ((x) << 15)
+#       define CURR_INDEX_MASK                          (0x7 << 15)
+#       define CURR_INDEX_SHIFT                         15
+
+#define CG_SCLK_DPM_CTRL                                0x684
+#       define SCLK_FSTATE_0_DIV(x)                     ((x) << 0)
+#       define SCLK_FSTATE_0_DIV_MASK                   (0x7f << 0)
+#       define SCLK_FSTATE_0_DIV_SHIFT                  0
+#       define SCLK_FSTATE_0_VLD                        (1 << 7)
+#       define SCLK_FSTATE_1_DIV(x)                     ((x) << 8)
+#       define SCLK_FSTATE_1_DIV_MASK                   (0x7f << 8)
+#       define SCLK_FSTATE_1_DIV_SHIFT                  8
+#       define SCLK_FSTATE_1_VLD                        (1 << 15)
+#       define SCLK_FSTATE_2_DIV(x)                     ((x) << 16)
+#       define SCLK_FSTATE_2_DIV_MASK                   (0x7f << 16)
+#       define SCLK_FSTATE_2_DIV_SHIFT                  16
+#       define SCLK_FSTATE_2_VLD                        (1 << 23)
+#       define SCLK_FSTATE_3_DIV(x)                     ((x) << 24)
+#       define SCLK_FSTATE_3_DIV_MASK                   (0x7f << 24)
+#       define SCLK_FSTATE_3_DIV_SHIFT                  24
+#       define SCLK_FSTATE_3_VLD                        (1 << 31)
+#define CG_SCLK_DPM_CTRL_2                              0x688
+#define CG_GCOOR                                        0x68c
+#       define PHC(x)                                   ((x) << 0)
+#       define PHC_MASK                                 (0x1f << 0)
+#       define PHC_SHIFT                                0
+#       define SDC(x)                                   ((x) << 9)
+#       define SDC_MASK                                 (0x3ff << 9)
+#       define SDC_SHIFT                                9
+#       define SU(x)                                    ((x) << 23)
+#       define SU_MASK                                  (0xf << 23)
+#       define SU_SHIFT                                 23
+#       define DIV_ID(x)                                ((x) << 28)
+#       define DIV_ID_MASK                              (0x7 << 28)
+#       define DIV_ID_SHIFT                             28
+
+#define CG_FTV                                          0x690
+#define CG_FFCT_0                                       0x694
+#       define UTC_0(x)                                 ((x) << 0)
+#       define UTC_0_MASK                               (0x3ff << 0)
+#       define UTC_0_SHIFT                              0
+#       define DTC_0(x)                                 ((x) << 10)
+#       define DTC_0_MASK                               (0x3ff << 10)
+#       define DTC_0_SHIFT                              10
+
+#define CG_GIT                                          0x6d8
+#       define CG_GICST(x)                              ((x) << 0)
+#       define CG_GICST_MASK                            (0xffff << 0)
+#       define CG_GICST_SHIFT                           0
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+#       define CG_GIPOT_SHIFT                           16
+
+#define CG_SCLK_DPM_CTRL_3                              0x6e0
+#       define FORCE_SCLK_STATE(x)                      ((x) << 0)
+#       define FORCE_SCLK_STATE_MASK                    (0x7 << 0)
+#       define FORCE_SCLK_STATE_SHIFT                   0
+#       define FORCE_SCLK_STATE_EN                      (1 << 3)
+#       define GNB_TT(x)                                ((x) << 8)
+#       define GNB_TT_MASK                              (0xff << 8)
+#       define GNB_TT_SHIFT                             8
+#       define GNB_THERMTHRO_MASK                       (1 << 16)
+#       define CNB_THERMTHRO_MASK_SCLK                  (1 << 17)
+#       define DPM_SCLK_ENABLE                          (1 << 18)
+#       define GNB_SLOW_FSTATE_0_MASK                   (1 << 23)
+#       define GNB_SLOW_FSTATE_0_SHIFT                  23
+#       define FORCE_NB_PSTATE_1                        (1 << 31)
+
+#define CG_SSP                                          0x6e8
+#       define SST(x)                                   ((x) << 0)
+#       define SST_MASK                                 (0xffff << 0)
+#       define SST_SHIFT                                0
+#       define SSTU(x)                                  ((x) << 16)
+#       define SSTU_MASK                                (0xffff << 16)
+#       define SSTU_SHIFT                               16
+
+#define CG_ACPI_CNTL                                    0x70c
+#       define SCLK_ACPI_DIV(x)                         ((x) << 0)
+#       define SCLK_ACPI_DIV_MASK                       (0x7f << 0)
+#       define SCLK_ACPI_DIV_SHIFT                      0
+
+#define CG_SCLK_DPM_CTRL_4                              0x71c
+#       define DC_HDC(x)                                ((x) << 14)
+#       define DC_HDC_MASK                              (0x3fff << 14)
+#       define DC_HDC_SHIFT                             14
+#       define DC_HU(x)                                 ((x) << 28)
+#       define DC_HU_MASK                               (0xf << 28)
+#       define DC_HU_SHIFT                              28
+#define CG_SCLK_DPM_CTRL_5                              0x720
+#       define SCLK_FSTATE_BOOTUP(x)                    ((x) << 0)
+#       define SCLK_FSTATE_BOOTUP_MASK                  (0x7 << 0)
+#       define SCLK_FSTATE_BOOTUP_SHIFT                 0
+#       define TT_TP(x)                                 ((x) << 3)
+#       define TT_TP_MASK                               (0xffff << 3)
+#       define TT_TP_SHIFT                              3
+#       define TT_TU(x)                                 ((x) << 19)
+#       define TT_TU_MASK                               (0xff << 19)
+#       define TT_TU_SHIFT                              19
+#define CG_SCLK_DPM_CTRL_6                              0x724
+#define CG_AT_0                                         0x728
+#       define CG_R(x)                                  ((x) << 0)
+#       define CG_R_MASK                                (0xffff << 0)
+#       define CG_R_SHIFT                               0
+#       define CG_L(x)                                  ((x) << 16)
+#       define CG_L_MASK                                (0xffff << 16)
+#       define CG_L_SHIFT                               16
+#define CG_AT_1                                         0x72c
+#define CG_AT_2                                         0x730
+#define	CG_THERMAL_INT					0x734
+#define		DIG_THERM_INTH(x)			((x) << 8)
+#define		DIG_THERM_INTH_MASK			0x0000FF00
+#define		DIG_THERM_INTH_SHIFT			8
+#define		DIG_THERM_INTL(x)			((x) << 16)
+#define		DIG_THERM_INTL_MASK			0x00FF0000
+#define		DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+#define CG_AT_3                                         0x738
+#define CG_AT_4                                         0x73c
+#define CG_AT_5                                         0x740
+#define CG_AT_6                                         0x744
+#define CG_AT_7                                         0x748
+
+#define CG_BSP_0                                        0x750
+#       define BSP(x)                                   ((x) << 0)
+#       define BSP_MASK                                 (0xffff << 0)
+#       define BSP_SHIFT                                0
+#       define BSU(x)                                   ((x) << 16)
+#       define BSU_MASK                                 (0xf << 16)
+#       define BSU_SHIFT                                16
+
+#define CG_CG_VOLTAGE_CNTL                              0x770
+#       define REQ                                      (1 << 0)
+#       define LEVEL(x)                                 ((x) << 1)
+#       define LEVEL_MASK                               (0x3 << 1)
+#       define LEVEL_SHIFT                              1
+#       define CG_VOLTAGE_EN                            (1 << 3)
+#       define FORCE                                    (1 << 4)
+#       define PERIOD(x)                                ((x) << 8)
+#       define PERIOD_MASK                              (0xffff << 8)
+#       define PERIOD_SHIFT                             8
+#       define UNIT(x)                                  ((x) << 24)
+#       define UNIT_MASK                                (0xf << 24)
+#       define UNIT_SHIFT                               24
+
+#define CG_ACPI_VOLTAGE_CNTL                            0x780
+#       define ACPI_VOLTAGE_EN                          (1 << 8)
+
+#define CG_DPM_VOLTAGE_CNTL                             0x788
+#       define DPM_STATE0_LEVEL_MASK                    (0x3 << 0)
+#       define DPM_STATE0_LEVEL_SHIFT                   0
+#       define DPM_VOLTAGE_EN                           (1 << 16)
+
+#define CG_PWR_GATING_CNTL                              0x7ac
+#       define DYN_PWR_DOWN_EN                          (1 << 0)
+#       define ACPI_PWR_DOWN_EN                         (1 << 1)
+#       define GFX_CLK_OFF_PWR_DOWN_EN                  (1 << 2)
+#       define IOC_DISGPU_PWR_DOWN_EN                   (1 << 3)
+#       define FORCE_POWR_ON                            (1 << 4)
+#       define PGP(x)                                   ((x) << 8)
+#       define PGP_MASK                                 (0xffff << 8)
+#       define PGP_SHIFT                                8
+#       define PGU(x)                                   ((x) << 24)
+#       define PGU_MASK                                 (0xf << 24)
+#       define PGU_SHIFT                                24
+
+#define CG_CGTT_LOCAL_0                                 0x7d0
+#define CG_CGTT_LOCAL_1                                 0x7d4
+
+#define DEEP_SLEEP_CNTL                                 0x818
+#       define R_DIS                                    (1 << 3)
+#       define HS(x)                                    ((x) << 4)
+#       define HS_MASK                                  (0xfff << 4)
+#       define HS_SHIFT                                 4
+#       define ENABLE_DS                                (1 << 31)
+#define DEEP_SLEEP_CNTL2                                0x81c
+#       define LB_UFP_EN                                (1 << 0)
+#       define INOUT_C(x)                               ((x) << 4)
+#       define INOUT_C_MASK                             (0xff << 4)
+#       define INOUT_C_SHIFT                            4
+
+#define CG_SCRATCH2                                     0x824
+
+#define CG_SCLK_DPM_CTRL_11                             0x830
+
+#define HW_REV   					0x5564
+#       define ATI_REV_ID_MASK                          (0xf << 28)
+#       define ATI_REV_ID_SHIFT                         28
+/* 0 = A0, 1 = A1, 2 = B0, 3 = C0, etc. */
+
+#define DOUT_SCRATCH3   				0x611c
+
+#define GB_ADDR_CONFIG  				0x98f8
+
+#endif
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
new file mode 100644
index 0000000..5d317f7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -0,0 +1,2105 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "trinityd.h"
+#include "r600_dpm.h"
+#include "trinity_dpm.h"
+#include <linux/seq_file.h>
+
+#define TRINITY_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define TRINITY_MINIMUM_ENGINE_CLOCK 800
+#define SCLK_MIN_DIV_INTV_SHIFT     12
+#define TRINITY_DISPCLK_BYPASS_THRESHOLD 10000
+
+#ifndef TRINITY_MGCG_SEQUENCE
+#define TRINITY_MGCG_SEQUENCE  100
+
+static const u32 trinity_mgcg_shls_default[] =
+{
+	/* Register, Value, Mask */
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x00003fc4, 0xc0000000, 0xffffffff,
+	0x00005448, 0x00000100, 0xffffffff,
+	0x000055e4, 0x00000100, 0xffffffff,
+	0x0000160c, 0x00000100, 0xffffffff,
+	0x00008984, 0x06000100, 0xffffffff,
+	0x0000c164, 0x00000100, 0xffffffff,
+	0x00008a18, 0x00000100, 0xffffffff,
+	0x0000897c, 0x06000100, 0xffffffff,
+	0x00008b28, 0x00000100, 0xffffffff,
+	0x00009144, 0x00800200, 0xffffffff,
+	0x00009a60, 0x00000100, 0xffffffff,
+	0x00009868, 0x00000100, 0xffffffff,
+	0x00008d58, 0x00000100, 0xffffffff,
+	0x00009510, 0x00000100, 0xffffffff,
+	0x0000949c, 0x00000100, 0xffffffff,
+	0x00009654, 0x00000100, 0xffffffff,
+	0x00009030, 0x00000100, 0xffffffff,
+	0x00009034, 0x00000100, 0xffffffff,
+	0x00009038, 0x00000100, 0xffffffff,
+	0x0000903c, 0x00000100, 0xffffffff,
+	0x00009040, 0x00000100, 0xffffffff,
+	0x0000a200, 0x00000100, 0xffffffff,
+	0x0000a204, 0x00000100, 0xffffffff,
+	0x0000a208, 0x00000100, 0xffffffff,
+	0x0000a20c, 0x00000100, 0xffffffff,
+	0x00009744, 0x00000100, 0xffffffff,
+	0x00003f80, 0x00000100, 0xffffffff,
+	0x0000a210, 0x00000100, 0xffffffff,
+	0x0000a214, 0x00000100, 0xffffffff,
+	0x000004d8, 0x00000100, 0xffffffff,
+	0x00009664, 0x00000100, 0xffffffff,
+	0x00009698, 0x00000100, 0xffffffff,
+	0x000004d4, 0x00000200, 0xffffffff,
+	0x000004d0, 0x00000000, 0xffffffff,
+	0x000030cc, 0x00000104, 0xffffffff,
+	0x0000d0c0, 0x00000100, 0xffffffff,
+	0x0000d8c0, 0x00000100, 0xffffffff,
+	0x0000951c, 0x00010000, 0xffffffff,
+	0x00009160, 0x00030002, 0xffffffff,
+	0x00009164, 0x00050004, 0xffffffff,
+	0x00009168, 0x00070006, 0xffffffff,
+	0x00009178, 0x00070000, 0xffffffff,
+	0x0000917c, 0x00030002, 0xffffffff,
+	0x00009180, 0x00050004, 0xffffffff,
+	0x0000918c, 0x00010006, 0xffffffff,
+	0x00009190, 0x00090008, 0xffffffff,
+	0x00009194, 0x00070000, 0xffffffff,
+	0x00009198, 0x00030002, 0xffffffff,
+	0x0000919c, 0x00050004, 0xffffffff,
+	0x000091a8, 0x00010006, 0xffffffff,
+	0x000091ac, 0x00090008, 0xffffffff,
+	0x000091b0, 0x00070000, 0xffffffff,
+	0x000091b4, 0x00030002, 0xffffffff,
+	0x000091b8, 0x00050004, 0xffffffff,
+	0x000091c4, 0x00010006, 0xffffffff,
+	0x000091c8, 0x00090008, 0xffffffff,
+	0x000091cc, 0x00070000, 0xffffffff,
+	0x000091d0, 0x00030002, 0xffffffff,
+	0x000091d4, 0x00050004, 0xffffffff,
+	0x000091e0, 0x00010006, 0xffffffff,
+	0x000091e4, 0x00090008, 0xffffffff,
+	0x000091e8, 0x00000000, 0xffffffff,
+	0x000091ec, 0x00070000, 0xffffffff,
+	0x000091f0, 0x00030002, 0xffffffff,
+	0x000091f4, 0x00050004, 0xffffffff,
+	0x00009200, 0x00010006, 0xffffffff,
+	0x00009204, 0x00090008, 0xffffffff,
+	0x00009208, 0x00070000, 0xffffffff,
+	0x0000920c, 0x00030002, 0xffffffff,
+	0x00009210, 0x00050004, 0xffffffff,
+	0x0000921c, 0x00010006, 0xffffffff,
+	0x00009220, 0x00090008, 0xffffffff,
+	0x00009294, 0x00000000, 0xffffffff
+};
+
+static const u32 trinity_mgcg_shls_enable[] =
+{
+	/* Register, Value, Mask */
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0x00000000, 0x000133FF,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0x00000000, 0xE00B03FC,
+	0x00009150, 0x96944200, 0xffffffff
+};
+
+static const u32 trinity_mgcg_shls_disable[] =
+{
+	/* Register, Value, Mask */
+	0x0000802c, 0xc0000000, 0xffffffff,
+	0x00009150, 0x00600000, 0xffffffff,
+	0x000008f8, 0x00000000, 0xffffffff,
+	0x000008fc, 0xffffffff, 0x000133FF,
+	0x000008f8, 0x00000001, 0xffffffff,
+	0x000008fc, 0xffffffff, 0xE00B03FC
+};
+#endif
+
+#ifndef TRINITY_SYSLS_SEQUENCE
+#define TRINITY_SYSLS_SEQUENCE  100
+
+static const u32 trinity_sysls_default[] =
+{
+	/* Register, Value, Mask */
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x0000d8bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x00002f50, 0x00000404, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x0000641c, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff
+};
+
+static const u32 trinity_sysls_disable[] =
+{
+	/* Register, Value, Mask */
+	0x0000d0c0, 0x00000000, 0xffffffff,
+	0x0000d8c0, 0x00000000, 0xffffffff,
+	0x000055e8, 0x00000000, 0xffffffff,
+	0x0000d0bc, 0x00000000, 0xffffffff,
+	0x0000d8bc, 0x00000000, 0xffffffff,
+	0x000015c0, 0x00041401, 0xffffffff,
+	0x0000264c, 0x00040400, 0xffffffff,
+	0x00002648, 0x00040400, 0xffffffff,
+	0x00002650, 0x00040400, 0xffffffff,
+	0x000020b8, 0x00040400, 0xffffffff,
+	0x000020bc, 0x00040400, 0xffffffff,
+	0x000020c0, 0x00040c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680000, 0xffffffff,
+	0x00002f50, 0x00000404, 0xffffffff,
+	0x000004c8, 0x00000001, 0xffffffff,
+	0x0000641c, 0x00007ffd, 0xffffffff,
+	0x00000c7c, 0x0000ff00, 0xffffffff,
+	0x00006dfc, 0x0000007f, 0xffffffff
+};
+
+static const u32 trinity_sysls_enable[] =
+{
+	/* Register, Value, Mask */
+	0x000055e8, 0x00000001, 0xffffffff,
+	0x0000d0bc, 0x00000100, 0xffffffff,
+	0x0000d8bc, 0x00000100, 0xffffffff,
+	0x000015c0, 0x000c1401, 0xffffffff,
+	0x0000264c, 0x000c0400, 0xffffffff,
+	0x00002648, 0x000c0400, 0xffffffff,
+	0x00002650, 0x000c0400, 0xffffffff,
+	0x000020b8, 0x000c0400, 0xffffffff,
+	0x000020bc, 0x000c0400, 0xffffffff,
+	0x000020c0, 0x000c0c80, 0xffffffff,
+	0x0000f4a0, 0x000000c0, 0xffffffff,
+	0x0000f4a4, 0x00680fff, 0xffffffff,
+	0x00002f50, 0x00000903, 0xffffffff,
+	0x000004c8, 0x00000000, 0xffffffff,
+	0x0000641c, 0x00000000, 0xffffffff,
+	0x00000c7c, 0x00000000, 0xffffffff,
+	0x00006dfc, 0x00000000, 0xffffffff
+};
+#endif
+
+static const u32 trinity_override_mgpg_sequences[] =
+{
+	/* Register, Value */
+	0x00000200, 0xE030032C,
+	0x00000204, 0x00000FFF,
+	0x00000200, 0xE0300058,
+	0x00000204, 0x00030301,
+	0x00000200, 0xE0300054,
+	0x00000204, 0x500010FF,
+	0x00000200, 0xE0300074,
+	0x00000204, 0x00030301,
+	0x00000200, 0xE0300070,
+	0x00000204, 0x500010FF,
+	0x00000200, 0xE0300090,
+	0x00000204, 0x00030301,
+	0x00000200, 0xE030008C,
+	0x00000204, 0x500010FF,
+	0x00000200, 0xE03000AC,
+	0x00000204, 0x00030301,
+	0x00000200, 0xE03000A8,
+	0x00000204, 0x500010FF,
+	0x00000200, 0xE03000C8,
+	0x00000204, 0x00030301,
+	0x00000200, 0xE03000C4,
+	0x00000204, 0x500010FF,
+	0x00000200, 0xE03000E4,
+	0x00000204, 0x00030301,
+	0x00000200, 0xE03000E0,
+	0x00000204, 0x500010FF,
+	0x00000200, 0xE0300100,
+	0x00000204, 0x00030301,
+	0x00000200, 0xE03000FC,
+	0x00000204, 0x500010FF,
+	0x00000200, 0xE0300058,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE0300054,
+	0x00000204, 0x600010FF,
+	0x00000200, 0xE0300074,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE0300070,
+	0x00000204, 0x600010FF,
+	0x00000200, 0xE0300090,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE030008C,
+	0x00000204, 0x600010FF,
+	0x00000200, 0xE03000AC,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000A8,
+	0x00000204, 0x600010FF,
+	0x00000200, 0xE03000C8,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000C4,
+	0x00000204, 0x600010FF,
+	0x00000200, 0xE03000E4,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000E0,
+	0x00000204, 0x600010FF,
+	0x00000200, 0xE0300100,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000FC,
+	0x00000204, 0x600010FF,
+	0x00000200, 0xE0300058,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE0300054,
+	0x00000204, 0x700010FF,
+	0x00000200, 0xE0300074,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE0300070,
+	0x00000204, 0x700010FF,
+	0x00000200, 0xE0300090,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE030008C,
+	0x00000204, 0x700010FF,
+	0x00000200, 0xE03000AC,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000A8,
+	0x00000204, 0x700010FF,
+	0x00000200, 0xE03000C8,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000C4,
+	0x00000204, 0x700010FF,
+	0x00000200, 0xE03000E4,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000E0,
+	0x00000204, 0x700010FF,
+	0x00000200, 0xE0300100,
+	0x00000204, 0x00030303,
+	0x00000200, 0xE03000FC,
+	0x00000204, 0x700010FF,
+	0x00000200, 0xE0300058,
+	0x00000204, 0x00010303,
+	0x00000200, 0xE0300054,
+	0x00000204, 0x800010FF,
+	0x00000200, 0xE0300074,
+	0x00000204, 0x00010303,
+	0x00000200, 0xE0300070,
+	0x00000204, 0x800010FF,
+	0x00000200, 0xE0300090,
+	0x00000204, 0x00010303,
+	0x00000200, 0xE030008C,
+	0x00000204, 0x800010FF,
+	0x00000200, 0xE03000AC,
+	0x00000204, 0x00010303,
+	0x00000200, 0xE03000A8,
+	0x00000204, 0x800010FF,
+	0x00000200, 0xE03000C4,
+	0x00000204, 0x800010FF,
+	0x00000200, 0xE03000C8,
+	0x00000204, 0x00010303,
+	0x00000200, 0xE03000E4,
+	0x00000204, 0x00010303,
+	0x00000200, 0xE03000E0,
+	0x00000204, 0x800010FF,
+	0x00000200, 0xE0300100,
+	0x00000204, 0x00010303,
+	0x00000200, 0xE03000FC,
+	0x00000204, 0x800010FF,
+	0x00000200, 0x0001f198,
+	0x00000204, 0x0003ffff,
+	0x00000200, 0x0001f19C,
+	0x00000204, 0x3fffffff,
+	0x00000200, 0xE030032C,
+	0x00000204, 0x00000000,
+};
+
+extern void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable);
+static void trinity_program_clk_gating_hw_sequence(struct radeon_device *rdev,
+						   const u32 *seq, u32 count);
+static void trinity_override_dynamic_mg_powergating(struct radeon_device *rdev);
+static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
+					     struct radeon_ps *new_rps,
+					     struct radeon_ps *old_rps);
+
+static struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
+{
+	struct trinity_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = rdev->pm.dpm.priv;
+
+	return pi;
+}
+
+static void trinity_gfx_powergating_initialize(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 p, u;
+	u32 value;
+	struct atom_clock_dividers dividers;
+	u32 xclk = radeon_get_xclk(rdev);
+	u32 sssd = 1;
+	int ret;
+	u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     25000, false, &dividers);
+	if (ret)
+		return;
+
+	value = RREG32_SMC(GFX_POWER_GATING_CNTL);
+	value &= ~(SSSD_MASK | PDS_DIV_MASK);
+	if (sssd)
+		value |= SSSD(1);
+	value |= PDS_DIV(dividers.post_div);
+	WREG32_SMC(GFX_POWER_GATING_CNTL, value);
+
+	r600_calculate_u_and_p(500, xclk, 16, &p, &u);
+
+	WREG32(CG_PG_CTRL, SP(p) | SU(u));
+
+	WREG32_P(CG_GIPOTS, CG_GIPOT(p), ~CG_GIPOT_MASK);
+
+	/* XXX double check hw_rev */
+	if (pi->override_dynamic_mgpg && (hw_rev == 0))
+		trinity_override_dynamic_mg_powergating(rdev);
+
+}
+
+#define CGCG_CGTT_LOCAL0_MASK       0xFFFF33FF
+#define CGCG_CGTT_LOCAL1_MASK       0xFFFB0FFE
+#define CGTS_SM_CTRL_REG_DISABLE    0x00600000
+#define CGTS_SM_CTRL_REG_ENABLE     0x96944200
+
+static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
+					  bool enable)
+{
+	u32 local0;
+	u32 local1;
+
+	if (enable) {
+		local0 = RREG32_CG(CG_CGTT_LOCAL_0);
+		local1 = RREG32_CG(CG_CGTT_LOCAL_1);
+
+		WREG32_CG(CG_CGTT_LOCAL_0,
+			  (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+		WREG32_CG(CG_CGTT_LOCAL_1,
+			  (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+
+		WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_ENABLE);
+	} else {
+		WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_DISABLE);
+
+		local0 = RREG32_CG(CG_CGTT_LOCAL_0);
+		local1 = RREG32_CG(CG_CGTT_LOCAL_1);
+
+		WREG32_CG(CG_CGTT_LOCAL_0,
+			  CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+		WREG32_CG(CG_CGTT_LOCAL_1,
+			  CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+	}
+}
+
+static void trinity_mg_clockgating_initialize(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *seq = NULL;
+
+	seq = &trinity_mgcg_shls_default[0];
+	count = sizeof(trinity_mgcg_shls_default) / (3 * sizeof(u32));
+
+	trinity_program_clk_gating_hw_sequence(rdev, seq, count);
+}
+
+static void trinity_gfx_clockgating_enable(struct radeon_device *rdev,
+					   bool enable)
+{
+	if (enable) {
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
+	} else {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+		RREG32(GB_ADDR_CONFIG);
+	}
+}
+
+static void trinity_program_clk_gating_hw_sequence(struct radeon_device *rdev,
+						   const u32 *seq, u32 count)
+{
+	u32 i, length = count * 3;
+
+	for (i = 0; i < length; i += 3)
+		WREG32_P(seq[i], seq[i+1], ~seq[i+2]);
+}
+
+static void trinity_program_override_mgpg_sequences(struct radeon_device *rdev,
+						    const u32 *seq, u32 count)
+{
+	u32  i, length = count * 2;
+
+	for (i = 0; i < length; i += 2)
+		WREG32(seq[i], seq[i+1]);
+
+}
+
+static void trinity_override_dynamic_mg_powergating(struct radeon_device *rdev)
+{
+	u32 count;
+	const u32 *seq = NULL;
+
+	seq = &trinity_override_mgpg_sequences[0];
+	count = sizeof(trinity_override_mgpg_sequences) / (2 * sizeof(u32));
+
+	trinity_program_override_mgpg_sequences(rdev, seq, count);
+}
+
+static void trinity_ls_clockgating_enable(struct radeon_device *rdev,
+					  bool enable)
+{
+	u32 count;
+	const u32 *seq = NULL;
+
+	if (enable) {
+		seq = &trinity_sysls_enable[0];
+		count = sizeof(trinity_sysls_enable) / (3 * sizeof(u32));
+	} else {
+		seq = &trinity_sysls_disable[0];
+		count = sizeof(trinity_sysls_disable) / (3 * sizeof(u32));
+	}
+
+	trinity_program_clk_gating_hw_sequence(rdev, seq, count);
+}
+
+static void trinity_gfx_powergating_enable(struct radeon_device *rdev,
+					   bool enable)
+{
+	if (enable) {
+		if (RREG32_SMC(CC_SMU_TST_EFUSE1_MISC) & RB_BACKEND_DISABLE_MASK)
+			WREG32_SMC(SMU_SCRATCH_A, (RREG32_SMC(SMU_SCRATCH_A) | 0x01));
+
+		WREG32_P(SCLK_PWRMGT_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN);
+	} else {
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_PWR_DOWN_EN);
+		RREG32(GB_ADDR_CONFIG);
+	}
+}
+
+static void trinity_gfx_dynamic_mgpg_enable(struct radeon_device *rdev,
+					    bool enable)
+{
+	u32 value;
+
+	if (enable) {
+		value = RREG32_SMC(PM_I_CNTL_1);
+		value &= ~DS_PG_CNTL_MASK;
+		value |= DS_PG_CNTL(1);
+		WREG32_SMC(PM_I_CNTL_1, value);
+
+		value = RREG32_SMC(SMU_S_PG_CNTL);
+		value &= ~DS_PG_EN_MASK;
+		value |= DS_PG_EN(1);
+		WREG32_SMC(SMU_S_PG_CNTL, value);
+	} else {
+		value = RREG32_SMC(SMU_S_PG_CNTL);
+		value &= ~DS_PG_EN_MASK;
+		WREG32_SMC(SMU_S_PG_CNTL, value);
+
+		value = RREG32_SMC(PM_I_CNTL_1);
+		value &= ~DS_PG_CNTL_MASK;
+		WREG32_SMC(PM_I_CNTL_1, value);
+	}
+
+	trinity_gfx_dynamic_mgpg_config(rdev);
+
+}
+
+static void trinity_enable_clock_power_gating(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (pi->enable_gfx_clock_gating)
+		sumo_gfx_clockgating_initialize(rdev);
+	if (pi->enable_mg_clock_gating)
+		trinity_mg_clockgating_initialize(rdev);
+	if (pi->enable_gfx_power_gating)
+		trinity_gfx_powergating_initialize(rdev);
+	if (pi->enable_mg_clock_gating) {
+		trinity_ls_clockgating_enable(rdev, true);
+		trinity_mg_clockgating_enable(rdev, true);
+	}
+	if (pi->enable_gfx_clock_gating)
+		trinity_gfx_clockgating_enable(rdev, true);
+	if (pi->enable_gfx_dynamic_mgpg)
+		trinity_gfx_dynamic_mgpg_enable(rdev, true);
+	if (pi->enable_gfx_power_gating)
+		trinity_gfx_powergating_enable(rdev, true);
+}
+
+static void trinity_disable_clock_power_gating(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (pi->enable_gfx_power_gating)
+		trinity_gfx_powergating_enable(rdev, false);
+	if (pi->enable_gfx_dynamic_mgpg)
+		trinity_gfx_dynamic_mgpg_enable(rdev, false);
+	if (pi->enable_gfx_clock_gating)
+		trinity_gfx_clockgating_enable(rdev, false);
+	if (pi->enable_mg_clock_gating) {
+		trinity_mg_clockgating_enable(rdev, false);
+		trinity_ls_clockgating_enable(rdev, false);
+	}
+}
+
+static void trinity_set_divider_value(struct radeon_device *rdev,
+				      u32 index, u32 sclk)
+{
+	struct atom_clock_dividers  dividers;
+	int ret;
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     sclk, false, &dividers);
+	if (ret)
+		return;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
+	value &= ~CLK_DIVIDER_MASK;
+	value |= CLK_DIVIDER(dividers.post_div);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
+
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     sclk/2, false, &dividers);
+	if (ret)
+		return;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix);
+	value &= ~PD_SCLK_DIVIDER_MASK;
+	value |= PD_SCLK_DIVIDER(dividers.post_div);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix, value);
+}
+
+static void trinity_set_ds_dividers(struct radeon_device *rdev,
+				    u32 index, u32 divider)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
+	value &= ~DS_DIV_MASK;
+	value |= DS_DIV(divider);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
+}
+
+static void trinity_set_ss_dividers(struct radeon_device *rdev,
+				    u32 index, u32 divider)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
+	value &= ~DS_SH_DIV_MASK;
+	value |= DS_SH_DIV(divider);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
+}
+
+static void trinity_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid);
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
+	value &= ~VID_MASK;
+	value |= VID(vid_7bit);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
+	value &= ~LVRT_MASK;
+	value |= LVRT(0);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
+}
+
+static void trinity_set_allos_gnb_slow(struct radeon_device *rdev,
+				       u32 index, u32 gnb_slow)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix);
+	value &= ~GNB_SLOW_MASK;
+	value |= GNB_SLOW(gnb_slow);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix, value);
+}
+
+static void trinity_set_force_nbp_state(struct radeon_device *rdev,
+					u32 index, u32 force_nbp_state)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix);
+	value &= ~FORCE_NBPS1_MASK;
+	value |= FORCE_NBPS1(force_nbp_state);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix, value);
+}
+
+static void trinity_set_display_wm(struct radeon_device *rdev,
+				   u32 index, u32 wm)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
+	value &= ~DISPLAY_WM_MASK;
+	value |= DISPLAY_WM(wm);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
+}
+
+static void trinity_set_vce_wm(struct radeon_device *rdev,
+			       u32 index, u32 wm)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
+	value &= ~VCE_WM_MASK;
+	value |= VCE_WM(wm);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
+}
+
+static void trinity_set_at(struct radeon_device *rdev,
+			   u32 index, u32 at)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_AT + ix);
+	value &= ~AT_MASK;
+	value |= AT(at);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_AT + ix, value);
+}
+
+static void trinity_program_power_level(struct radeon_device *rdev,
+					struct trinity_pl *pl, u32 index)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (index >= SUMO_MAX_HARDWARE_POWERLEVELS)
+		return;
+
+	trinity_set_divider_value(rdev, index, pl->sclk);
+	trinity_set_vid(rdev, index, pl->vddc_index);
+	trinity_set_ss_dividers(rdev, index, pl->ss_divider_index);
+	trinity_set_ds_dividers(rdev, index, pl->ds_divider_index);
+	trinity_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow);
+	trinity_set_force_nbp_state(rdev, index, pl->force_nbp_state);
+	trinity_set_display_wm(rdev, index, pl->display_wm);
+	trinity_set_vce_wm(rdev, index, pl->vce_wm);
+	trinity_set_at(rdev, index, pi->at[index]);
+}
+
+static void trinity_power_level_enable_disable(struct radeon_device *rdev,
+					       u32 index, bool enable)
+{
+	u32 value;
+	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
+
+	value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
+	value &= ~STATE_VALID_MASK;
+	if (enable)
+		value |= STATE_VALID(1);
+	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
+}
+
+static bool trinity_dpm_enabled(struct radeon_device *rdev)
+{
+	if (RREG32_SMC(SMU_SCLK_DPM_CNTL) & SCLK_DPM_EN(1))
+		return true;
+	else
+		return false;
+}
+
+static void trinity_start_dpm(struct radeon_device *rdev)
+{
+	u32 value = RREG32_SMC(SMU_SCLK_DPM_CNTL);
+
+	value &= ~(SCLK_DPM_EN_MASK | SCLK_DPM_BOOT_STATE_MASK | VOLTAGE_CHG_EN_MASK);
+	value |= SCLK_DPM_EN(1) | SCLK_DPM_BOOT_STATE(0) | VOLTAGE_CHG_EN(1);
+	WREG32_SMC(SMU_SCLK_DPM_CNTL, value);
+
+	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+	WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~EN);
+
+	trinity_dpm_config(rdev, true);
+}
+
+static void trinity_wait_for_dpm_enabled(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(SCLK_PWRMGT_CNTL) & DYNAMIC_PM_EN)
+			break;
+		udelay(1);
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_STATE_MASK) == 0)
+			break;
+		udelay(1);
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void trinity_stop_dpm(struct radeon_device *rdev)
+{
+	u32 sclk_dpm_cntl;
+
+	WREG32_P(CG_CG_VOLTAGE_CNTL, EN, ~EN);
+
+	sclk_dpm_cntl = RREG32_SMC(SMU_SCLK_DPM_CNTL);
+	sclk_dpm_cntl &= ~(SCLK_DPM_EN_MASK | VOLTAGE_CHG_EN_MASK);
+	WREG32_SMC(SMU_SCLK_DPM_CNTL, sclk_dpm_cntl);
+
+	trinity_dpm_config(rdev, false);
+}
+
+static void trinity_start_am(struct radeon_device *rdev)
+{
+	WREG32_P(SCLK_PWRMGT_CNTL, 0, ~(RESET_SCLK_CNT | RESET_BUSY_CNT));
+}
+
+static void trinity_reset_am(struct radeon_device *rdev)
+{
+	WREG32_P(SCLK_PWRMGT_CNTL, RESET_SCLK_CNT | RESET_BUSY_CNT,
+		 ~(RESET_SCLK_CNT | RESET_BUSY_CNT));
+}
+
+static void trinity_wait_for_level_0(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void trinity_enable_power_level_0(struct radeon_device *rdev)
+{
+	trinity_power_level_enable_disable(rdev, 0, true);
+}
+
+static void trinity_force_level_0(struct radeon_device *rdev)
+{
+	trinity_dpm_force_state(rdev, 0);
+}
+
+static void trinity_unforce_levels(struct radeon_device *rdev)
+{
+	trinity_dpm_no_forced_level(rdev);
+}
+
+static void trinity_program_power_levels_0_to_n(struct radeon_device *rdev,
+						struct radeon_ps *new_rps,
+						struct radeon_ps *old_rps)
+{
+	struct trinity_ps *new_ps = trinity_get_ps(new_rps);
+	struct trinity_ps *old_ps = trinity_get_ps(old_rps);
+	u32 i;
+	u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels;
+
+	for (i = 0; i < new_ps->num_levels; i++) {
+		trinity_program_power_level(rdev, &new_ps->levels[i], i);
+		trinity_power_level_enable_disable(rdev, i, true);
+	}
+
+	for (i = new_ps->num_levels; i < n_current_state_levels; i++)
+		trinity_power_level_enable_disable(rdev, i, false);
+}
+
+static void trinity_program_bootup_state(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 i;
+
+	trinity_program_power_level(rdev, &pi->boot_pl, 0);
+	trinity_power_level_enable_disable(rdev, 0, true);
+
+	for (i = 1; i < 8; i++)
+		trinity_power_level_enable_disable(rdev, i, false);
+}
+
+static void trinity_setup_uvd_clock_table(struct radeon_device *rdev,
+					  struct radeon_ps *rps)
+{
+	struct trinity_ps *ps = trinity_get_ps(rps);
+	u32 uvdstates = (ps->vclk_low_divider |
+			 ps->vclk_high_divider << 8 |
+			 ps->dclk_low_divider << 16 |
+			 ps->dclk_high_divider << 24);
+
+	WREG32_SMC(SMU_UVD_DPM_STATES, uvdstates);
+}
+
+static void trinity_setup_uvd_dpm_interval(struct radeon_device *rdev,
+					   u32 interval)
+{
+	u32 p, u;
+	u32 tp = RREG32_SMC(PM_TP);
+	u32 val;
+	u32 xclk = radeon_get_xclk(rdev);
+
+	r600_calculate_u_and_p(interval, xclk, 16, &p, &u);
+
+	val = (p + tp - 1) / tp;
+
+	WREG32_SMC(SMU_UVD_DPM_CNTL, val);
+}
+
+static bool trinity_uvd_clocks_zero(struct radeon_ps *rps)
+{
+	if ((rps->vclk == 0) && (rps->dclk == 0))
+		return true;
+	else
+		return false;
+}
+
+static bool trinity_uvd_clocks_equal(struct radeon_ps *rps1,
+				     struct radeon_ps *rps2)
+{
+	struct trinity_ps *ps1 = trinity_get_ps(rps1);
+	struct trinity_ps *ps2 = trinity_get_ps(rps2);
+
+	if ((rps1->vclk == rps2->vclk) &&
+	    (rps1->dclk == rps2->dclk) &&
+	    (ps1->vclk_low_divider == ps2->vclk_low_divider) &&
+	    (ps1->vclk_high_divider == ps2->vclk_high_divider) &&
+	    (ps1->dclk_low_divider == ps2->dclk_low_divider) &&
+	    (ps1->dclk_high_divider == ps2->dclk_high_divider))
+		return true;
+	else
+		return false;
+}
+
+static void trinity_setup_uvd_clocks(struct radeon_device *rdev,
+				     struct radeon_ps *new_rps,
+				     struct radeon_ps *old_rps)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (pi->enable_gfx_power_gating) {
+		trinity_gfx_powergating_enable(rdev, false);
+	}
+
+	if (pi->uvd_dpm) {
+		if (trinity_uvd_clocks_zero(new_rps) &&
+		    !trinity_uvd_clocks_zero(old_rps)) {
+			trinity_setup_uvd_dpm_interval(rdev, 0);
+		} else if (!trinity_uvd_clocks_zero(new_rps)) {
+			trinity_setup_uvd_clock_table(rdev, new_rps);
+
+			if (trinity_uvd_clocks_zero(old_rps)) {
+				u32 tmp = RREG32(CG_MISC_REG);
+				tmp &= 0xfffffffd;
+				WREG32(CG_MISC_REG, tmp);
+
+				radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
+
+				trinity_setup_uvd_dpm_interval(rdev, 3000);
+			}
+		}
+		trinity_uvd_dpm_config(rdev);
+	} else {
+		if (trinity_uvd_clocks_zero(new_rps) ||
+		    trinity_uvd_clocks_equal(new_rps, old_rps))
+			return;
+
+		radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
+	}
+
+	if (pi->enable_gfx_power_gating) {
+		trinity_gfx_powergating_enable(rdev, true);
+	}
+}
+
+static void trinity_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+						       struct radeon_ps *new_rps,
+						       struct radeon_ps *old_rps)
+{
+	struct trinity_ps *new_ps = trinity_get_ps(new_rps);
+	struct trinity_ps *current_ps = trinity_get_ps(new_rps);
+
+	if (new_ps->levels[new_ps->num_levels - 1].sclk >=
+	    current_ps->levels[current_ps->num_levels - 1].sclk)
+		return;
+
+	trinity_setup_uvd_clocks(rdev, new_rps, old_rps);
+}
+
+static void trinity_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+						      struct radeon_ps *new_rps,
+						      struct radeon_ps *old_rps)
+{
+	struct trinity_ps *new_ps = trinity_get_ps(new_rps);
+	struct trinity_ps *current_ps = trinity_get_ps(old_rps);
+
+	if (new_ps->levels[new_ps->num_levels - 1].sclk <
+	    current_ps->levels[current_ps->num_levels - 1].sclk)
+		return;
+
+	trinity_setup_uvd_clocks(rdev, new_rps, old_rps);
+}
+
+static void trinity_set_vce_clock(struct radeon_device *rdev,
+				  struct radeon_ps *new_rps,
+				  struct radeon_ps *old_rps)
+{
+	if ((old_rps->evclk != new_rps->evclk) ||
+	    (old_rps->ecclk != new_rps->ecclk)) {
+		/* turn the clocks on when encoding, off otherwise */
+		if (new_rps->evclk || new_rps->ecclk)
+			vce_v1_0_enable_mgcg(rdev, false);
+		else
+			vce_v1_0_enable_mgcg(rdev, true);
+		radeon_set_vce_clocks(rdev, new_rps->evclk, new_rps->ecclk);
+	}
+}
+
+static void trinity_program_ttt(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 value = RREG32_SMC(SMU_SCLK_DPM_TTT);
+
+	value &= ~(HT_MASK | LT_MASK);
+	value |= HT((pi->thermal_auto_throttling + 49) * 8);
+	value |= LT((pi->thermal_auto_throttling + 49 - pi->sys_info.htc_hyst_lmt) * 8);
+	WREG32_SMC(SMU_SCLK_DPM_TTT, value);
+}
+
+static void trinity_enable_att(struct radeon_device *rdev)
+{
+	u32 value = RREG32_SMC(SMU_SCLK_DPM_TT_CNTL);
+
+	value &= ~SCLK_TT_EN_MASK;
+	value |= SCLK_TT_EN(1);
+	WREG32_SMC(SMU_SCLK_DPM_TT_CNTL, value);
+}
+
+static void trinity_program_sclk_dpm(struct radeon_device *rdev)
+{
+	u32 p, u;
+	u32 tp = RREG32_SMC(PM_TP);
+	u32 ni;
+	u32 xclk = radeon_get_xclk(rdev);
+	u32 value;
+
+	r600_calculate_u_and_p(400, xclk, 16, &p, &u);
+
+	ni = (p + tp - 1) / tp;
+
+	value = RREG32_SMC(PM_I_CNTL_1);
+	value &= ~SCLK_DPM_MASK;
+	value |= SCLK_DPM(ni);
+	WREG32_SMC(PM_I_CNTL_1, value);
+}
+
+static int trinity_set_thermal_temperature_range(struct radeon_device *rdev,
+						 int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
+	WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
+
+	rdev->pm.dpm.thermal.min_temp = low_temp;
+	rdev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+static void trinity_update_current_ps(struct radeon_device *rdev,
+				      struct radeon_ps *rps)
+{
+	struct trinity_ps *new_ps = trinity_get_ps(rps);
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	pi->current_rps = *rps;
+	pi->current_ps = *new_ps;
+	pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void trinity_update_requested_ps(struct radeon_device *rdev,
+					struct radeon_ps *rps)
+{
+	struct trinity_ps *new_ps = trinity_get_ps(rps);
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	pi->requested_rps = *rps;
+	pi->requested_ps = *new_ps;
+	pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (pi->enable_bapm) {
+		trinity_acquire_mutex(rdev);
+		trinity_dpm_bapm_enable(rdev, enable);
+		trinity_release_mutex(rdev);
+	}
+}
+
+int trinity_dpm_enable(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	trinity_acquire_mutex(rdev);
+
+	if (trinity_dpm_enabled(rdev)) {
+		trinity_release_mutex(rdev);
+		return -EINVAL;
+	}
+
+	trinity_program_bootup_state(rdev);
+	sumo_program_vc(rdev, 0x00C00033);
+	trinity_start_am(rdev);
+	if (pi->enable_auto_thermal_throttling) {
+		trinity_program_ttt(rdev);
+		trinity_enable_att(rdev);
+	}
+	trinity_program_sclk_dpm(rdev);
+	trinity_start_dpm(rdev);
+	trinity_wait_for_dpm_enabled(rdev);
+	trinity_dpm_bapm_enable(rdev, false);
+	trinity_release_mutex(rdev);
+
+	trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+	return 0;
+}
+
+int trinity_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	trinity_acquire_mutex(rdev);
+	trinity_enable_clock_power_gating(rdev);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+		if (ret) {
+			trinity_release_mutex(rdev);
+			return ret;
+		}
+		rdev->irq.dpm_thermal = true;
+		radeon_irq_set(rdev);
+	}
+	trinity_release_mutex(rdev);
+
+	return 0;
+}
+
+void trinity_dpm_disable(struct radeon_device *rdev)
+{
+	trinity_acquire_mutex(rdev);
+	if (!trinity_dpm_enabled(rdev)) {
+		trinity_release_mutex(rdev);
+		return;
+	}
+	trinity_dpm_bapm_enable(rdev, false);
+	trinity_disable_clock_power_gating(rdev);
+	sumo_clear_vc(rdev);
+	trinity_wait_for_level_0(rdev);
+	trinity_stop_dpm(rdev);
+	trinity_reset_am(rdev);
+	trinity_release_mutex(rdev);
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		rdev->irq.dpm_thermal = false;
+		radeon_irq_set(rdev);
+	}
+
+	trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+}
+
+static void trinity_get_min_sclk_divider(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	pi->min_sclk_did =
+		(RREG32_SMC(CC_SMU_MISC_FUSES) & MinSClkDid_MASK) >> MinSClkDid_SHIFT;
+}
+
+static void trinity_setup_nbp_sim(struct radeon_device *rdev,
+				  struct radeon_ps *rps)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct trinity_ps *new_ps = trinity_get_ps(rps);
+	u32 nbpsconfig;
+
+	if (pi->sys_info.nb_dpm_enable) {
+		nbpsconfig = RREG32_SMC(NB_PSTATE_CONFIG);
+		nbpsconfig &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
+		nbpsconfig |= (Dpm0PgNbPsLo(new_ps->Dpm0PgNbPsLo) |
+			       Dpm0PgNbPsHi(new_ps->Dpm0PgNbPsHi) |
+			       DpmXNbPsLo(new_ps->DpmXNbPsLo) |
+			       DpmXNbPsHi(new_ps->DpmXNbPsHi));
+		WREG32_SMC(NB_PSTATE_CONFIG, nbpsconfig);
+	}
+}
+
+int trinity_dpm_force_performance_level(struct radeon_device *rdev,
+					enum radeon_dpm_forced_level level)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
+	struct trinity_ps *ps = trinity_get_ps(rps);
+	int i, ret;
+
+	if (ps->num_levels <= 1)
+		return 0;
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		/* not supported by the hw */
+		return -EINVAL;
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		ret = trinity_dpm_n_levels_disabled(rdev, ps->num_levels - 1);
+		if (ret)
+			return ret;
+	} else {
+		for (i = 0; i < ps->num_levels; i++) {
+			ret = trinity_dpm_n_levels_disabled(rdev, 0);
+			if (ret)
+				return ret;
+		}
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+int trinity_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+	struct radeon_ps *new_ps = &requested_ps;
+
+	trinity_update_requested_ps(rdev, new_ps);
+
+	trinity_apply_state_adjust_rules(rdev,
+					 &pi->requested_rps,
+					 &pi->current_rps);
+
+	return 0;
+}
+
+int trinity_dpm_set_power_state(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+	struct radeon_ps *old_ps = &pi->current_rps;
+
+	trinity_acquire_mutex(rdev);
+	if (pi->enable_dpm) {
+		if (pi->enable_bapm)
+			trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
+		trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
+		trinity_enable_power_level_0(rdev);
+		trinity_force_level_0(rdev);
+		trinity_wait_for_level_0(rdev);
+		trinity_setup_nbp_sim(rdev, new_ps);
+		trinity_program_power_levels_0_to_n(rdev, new_ps, old_ps);
+		trinity_force_level_0(rdev);
+		trinity_unforce_levels(rdev);
+		trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+		trinity_set_vce_clock(rdev, new_ps, old_ps);
+	}
+	trinity_release_mutex(rdev);
+
+	return 0;
+}
+
+void trinity_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_ps *new_ps = &pi->requested_rps;
+
+	trinity_update_current_ps(rdev, new_ps);
+}
+
+void trinity_dpm_setup_asic(struct radeon_device *rdev)
+{
+	trinity_acquire_mutex(rdev);
+	sumo_program_sstp(rdev);
+	sumo_take_smu_control(rdev, true);
+	trinity_get_min_sclk_divider(rdev);
+	trinity_release_mutex(rdev);
+}
+
+#if 0
+void trinity_dpm_reset_asic(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	trinity_acquire_mutex(rdev);
+	if (pi->enable_dpm) {
+		trinity_enable_power_level_0(rdev);
+		trinity_force_level_0(rdev);
+		trinity_wait_for_level_0(rdev);
+		trinity_program_bootup_state(rdev);
+		trinity_force_level_0(rdev);
+		trinity_unforce_levels(rdev);
+	}
+	trinity_release_mutex(rdev);
+}
+#endif
+
+static u16 trinity_convert_voltage_index_to_value(struct radeon_device *rdev,
+						  u32 vid_2bit)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit);
+	u32 svi_mode = (RREG32_SMC(PM_CONFIG) & SVI_Mode) ? 1 : 0;
+	u32 step = (svi_mode == 0) ? 1250 : 625;
+	u32 delta = vid_7bit * step + 50;
+
+	if (delta > 155000)
+		return 0;
+
+	return (155000 - delta) / 100;
+}
+
+static void trinity_patch_boot_state(struct radeon_device *rdev,
+				     struct trinity_ps *ps)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	ps->num_levels = 1;
+	ps->nbps_flags = 0;
+	ps->bapm_flags = 0;
+	ps->levels[0] = pi->boot_pl;
+}
+
+static u8 trinity_calculate_vce_wm(struct radeon_device *rdev, u32 sclk)
+{
+	if (sclk < 20000)
+		return 1;
+	return 0;
+}
+
+static void trinity_construct_boot_state(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+	pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+	pi->boot_pl.ds_divider_index = 0;
+	pi->boot_pl.ss_divider_index = 0;
+	pi->boot_pl.allow_gnb_slow = 1;
+	pi->boot_pl.force_nbp_state = 0;
+	pi->boot_pl.display_wm = 0;
+	pi->boot_pl.vce_wm = 0;
+	pi->current_ps.num_levels = 1;
+	pi->current_ps.levels[0] = pi->boot_pl;
+}
+
+static u8 trinity_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+						  u32 sclk, u32 min_sclk_in_sr)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 i;
+	u32 temp;
+	u32 min = (min_sclk_in_sr > TRINITY_MINIMUM_ENGINE_CLOCK) ?
+		min_sclk_in_sr : TRINITY_MINIMUM_ENGINE_CLOCK;
+
+	if (sclk < min)
+		return 0;
+
+	if (!pi->enable_sclk_ds)
+		return 0;
+
+	for (i = TRINITY_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+		temp = sclk / sumo_get_sleep_divider_from_id(i);
+		if (temp >= min || i == 0)
+			break;
+	}
+
+	return (u8)i;
+}
+
+static u32 trinity_get_valid_engine_clock(struct radeon_device *rdev,
+					  u32 lower_limit)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 i;
+
+	for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) {
+		if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit)
+			return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency;
+	}
+
+	if (i == pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries)
+		DRM_ERROR("engine clock out of range!");
+
+	return 0;
+}
+
+static void trinity_patch_thermal_state(struct radeon_device *rdev,
+					struct trinity_ps *ps,
+					struct trinity_ps *current_ps)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
+	u32 current_vddc;
+	u32 current_sclk;
+	u32 current_index = 0;
+
+	if (current_ps) {
+		current_vddc = current_ps->levels[current_index].vddc_index;
+		current_sclk = current_ps->levels[current_index].sclk;
+	} else {
+		current_vddc = pi->boot_pl.vddc_index;
+		current_sclk = pi->boot_pl.sclk;
+	}
+
+	ps->levels[0].vddc_index = current_vddc;
+
+	if (ps->levels[0].sclk > current_sclk)
+		ps->levels[0].sclk = current_sclk;
+
+	ps->levels[0].ds_divider_index =
+		trinity_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, sclk_in_sr);
+	ps->levels[0].ss_divider_index = ps->levels[0].ds_divider_index;
+	ps->levels[0].allow_gnb_slow = 1;
+	ps->levels[0].force_nbp_state = 0;
+	ps->levels[0].display_wm = 0;
+	ps->levels[0].vce_wm =
+		trinity_calculate_vce_wm(rdev, ps->levels[0].sclk);
+}
+
+static u8 trinity_calculate_display_wm(struct radeon_device *rdev,
+				       struct trinity_ps *ps, u32 index)
+{
+	if (ps == NULL || ps->num_levels <= 1)
+		return 0;
+	else if (ps->num_levels == 2) {
+		if (index == 0)
+			return 0;
+		else
+			return 1;
+	} else {
+		if (index == 0)
+			return 0;
+		else if (ps->levels[index].sclk < 30000)
+			return 0;
+		else
+			return 1;
+	}
+}
+
+static u32 trinity_get_uvd_clock_index(struct radeon_device *rdev,
+				       struct radeon_ps *rps)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 i = 0;
+
+	for (i = 0; i < 4; i++) {
+		if ((rps->vclk == pi->sys_info.uvd_clock_table_entries[i].vclk) &&
+		    (rps->dclk == pi->sys_info.uvd_clock_table_entries[i].dclk))
+		    break;
+	}
+
+	if (i >= 4) {
+		DRM_ERROR("UVD clock index not found!\n");
+		i = 3;
+	}
+	return i;
+}
+
+static void trinity_adjust_uvd_state(struct radeon_device *rdev,
+				     struct radeon_ps *rps)
+{
+	struct trinity_ps *ps = trinity_get_ps(rps);
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 high_index = 0;
+	u32 low_index = 0;
+
+	if (pi->uvd_dpm && r600_is_uvd_state(rps->class, rps->class2)) {
+		high_index = trinity_get_uvd_clock_index(rdev, rps);
+
+		switch(high_index) {
+		case 3:
+		case 2:
+			low_index = 1;
+			break;
+		case 1:
+		case 0:
+		default:
+			low_index = 0;
+			break;
+		}
+
+		ps->vclk_low_divider =
+			pi->sys_info.uvd_clock_table_entries[high_index].vclk_did;
+		ps->dclk_low_divider =
+			pi->sys_info.uvd_clock_table_entries[high_index].dclk_did;
+		ps->vclk_high_divider =
+			pi->sys_info.uvd_clock_table_entries[low_index].vclk_did;
+		ps->dclk_high_divider =
+			pi->sys_info.uvd_clock_table_entries[low_index].dclk_did;
+	}
+}
+
+static int trinity_get_vce_clock_voltage(struct radeon_device *rdev,
+					 u32 evclk, u32 ecclk, u16 *voltage)
+{
+	u32 i;
+	int ret = -EINVAL;
+	struct radeon_vce_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+	if (((evclk == 0) && (ecclk == 0)) ||
+	    (table && (table->count == 0))) {
+		*voltage = 0;
+		return 0;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if ((evclk <= table->entries[i].evclk) &&
+		    (ecclk <= table->entries[i].ecclk)) {
+			*voltage = table->entries[i].v;
+			ret = 0;
+			break;
+		}
+	}
+
+	/* if no match return the highest voltage */
+	if (ret)
+		*voltage = table->entries[table->count - 1].v;
+
+	return ret;
+}
+
+static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
+					     struct radeon_ps *new_rps,
+					     struct radeon_ps *old_rps)
+{
+	struct trinity_ps *ps = trinity_get_ps(new_rps);
+	struct trinity_ps *current_ps = trinity_get_ps(old_rps);
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 min_voltage = 0; /* ??? */
+	u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */
+	u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
+	u32 i;
+	u16 min_vce_voltage;
+	bool force_high;
+	u32 num_active_displays = rdev->pm.dpm.new_active_crtc_count;
+
+	if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+		return trinity_patch_thermal_state(rdev, ps, current_ps);
+
+	trinity_adjust_uvd_state(rdev, new_rps);
+
+	if (new_rps->vce_active) {
+		new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+		new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
+	} else {
+		new_rps->evclk = 0;
+		new_rps->ecclk = 0;
+	}
+
+	for (i = 0; i < ps->num_levels; i++) {
+		if (ps->levels[i].vddc_index < min_voltage)
+			ps->levels[i].vddc_index = min_voltage;
+
+		if (ps->levels[i].sclk < min_sclk)
+			ps->levels[i].sclk =
+				trinity_get_valid_engine_clock(rdev, min_sclk);
+
+		/* patch in vce limits */
+		if (new_rps->vce_active) {
+			/* sclk */
+			if (ps->levels[i].sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
+				ps->levels[i].sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
+			/* vddc */
+			trinity_get_vce_clock_voltage(rdev, new_rps->evclk, new_rps->ecclk, &min_vce_voltage);
+			if (ps->levels[i].vddc_index < min_vce_voltage)
+				ps->levels[i].vddc_index = min_vce_voltage;
+		}
+
+		ps->levels[i].ds_divider_index =
+			sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr);
+
+		ps->levels[i].ss_divider_index = ps->levels[i].ds_divider_index;
+
+		ps->levels[i].allow_gnb_slow = 1;
+		ps->levels[i].force_nbp_state = 0;
+		ps->levels[i].display_wm =
+			trinity_calculate_display_wm(rdev, ps, i);
+		ps->levels[i].vce_wm =
+			trinity_calculate_vce_wm(rdev, ps->levels[0].sclk);
+	}
+
+	if ((new_rps->class & (ATOM_PPLIB_CLASSIFICATION_HDSTATE | ATOM_PPLIB_CLASSIFICATION_SDSTATE)) ||
+	    ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY))
+		ps->bapm_flags |= TRINITY_POWERSTATE_FLAGS_BAPM_DISABLE;
+
+	if (pi->sys_info.nb_dpm_enable) {
+		ps->Dpm0PgNbPsLo = 0x1;
+		ps->Dpm0PgNbPsHi = 0x0;
+		ps->DpmXNbPsLo = 0x2;
+		ps->DpmXNbPsHi = 0x1;
+
+		if ((new_rps->class & (ATOM_PPLIB_CLASSIFICATION_HDSTATE | ATOM_PPLIB_CLASSIFICATION_SDSTATE)) ||
+		    ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)) {
+			force_high = ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ||
+				      ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) &&
+				       (pi->sys_info.uma_channel_number == 1)));
+			force_high = (num_active_displays >= 3) || force_high;
+			ps->Dpm0PgNbPsLo = force_high ? 0x2 : 0x3;
+			ps->Dpm0PgNbPsHi = 0x1;
+			ps->DpmXNbPsLo = force_high ? 0x2 : 0x3;
+			ps->DpmXNbPsHi = 0x2;
+			ps->levels[ps->num_levels - 1].allow_gnb_slow = 0;
+		}
+	}
+}
+
+static void trinity_cleanup_asic(struct radeon_device *rdev)
+{
+	sumo_take_smu_control(rdev, false);
+}
+
+#if 0
+static void trinity_pre_display_configuration_change(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (pi->voltage_drop_in_dce)
+		trinity_dce_enable_voltage_adjustment(rdev, false);
+}
+#endif
+
+static void trinity_add_dccac_value(struct radeon_device *rdev)
+{
+	u32 gpu_cac_avrg_cntl_window_size;
+	u32 num_active_displays = rdev->pm.dpm.new_active_crtc_count;
+	u64 disp_clk = rdev->clock.default_dispclk / 100;
+	u32 dc_cac_value;
+
+	gpu_cac_avrg_cntl_window_size =
+		(RREG32_SMC(GPU_CAC_AVRG_CNTL) & WINDOW_SIZE_MASK) >> WINDOW_SIZE_SHIFT;
+
+	dc_cac_value = (u32)((14213 * disp_clk * disp_clk * (u64)num_active_displays) >>
+			     (32 - gpu_cac_avrg_cntl_window_size));
+
+	WREG32_SMC(DC_CAC_VALUE, dc_cac_value);
+}
+
+void trinity_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (pi->voltage_drop_in_dce)
+		trinity_dce_enable_voltage_adjustment(rdev, true);
+	trinity_add_dccac_value(rdev);
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void trinity_parse_pplib_non_clock_info(struct radeon_device *rdev,
+					       struct radeon_ps *rps,
+					       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					       u8 table_rev)
+{
+	struct trinity_ps *ps = trinity_get_ps(rps);
+
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.dpm.boot_ps = rps;
+		trinity_patch_boot_state(rdev, ps);
+	}
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void trinity_parse_pplib_clock_info(struct radeon_device *rdev,
+					   struct radeon_ps *rps, int index,
+					   union pplib_clock_info *clock_info)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct trinity_ps *ps = trinity_get_ps(rps);
+	struct trinity_pl *pl = &ps->levels[index];
+	u32 sclk;
+
+	sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+	sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+	pl->sclk = sclk;
+	pl->vddc_index = clock_info->sumo.vddcIndex;
+
+	ps->num_levels = index + 1;
+
+	if (pi->enable_sclk_ds) {
+		pl->ds_divider_index = 5;
+		pl->ss_divider_index = 5;
+	}
+}
+
+static int trinity_parse_power_table(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct sumo_ps *ps;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+				  sizeof(struct radeon_ps),
+				  GFP_KERNEL);
+	if (!rdev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		if (!rdev->pm.power_state[i].clock_info)
+			return -EINVAL;
+		ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(rdev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		rdev->pm.dpm.ps[i].ps_priv = ps;
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			trinity_parse_pplib_clock_info(rdev,
+						       &rdev->pm.dpm.ps[i], k,
+						       clock_info);
+			k++;
+		}
+		trinity_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+						   non_clock_info,
+						   non_clock_info_array->ucEntrySize);
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	/* fill in the vce power states */
+	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
+		u32 sclk;
+		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
+		clock_info = (union pplib_clock_info *)
+			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+		sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+		sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+		rdev->pm.dpm.vce_states[i].sclk = sclk;
+		rdev->pm.dpm.vce_states[i].mclk = 0;
+	}
+
+	return 0;
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+};
+
+static u32 trinity_convert_did_to_freq(struct radeon_device *rdev, u8 did)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	u32 divider;
+
+	if (did >= 8 && did <= 0x3f)
+		divider = did * 25;
+	else if (did > 0x3f && did <= 0x5f)
+		divider = (did - 64) * 50 + 1600;
+	else if (did > 0x5f && did <= 0x7e)
+		divider = (did - 96) * 100 + 3200;
+	else if (did == 0x7f)
+		divider = 128 * 100;
+	else
+		return 10000;
+
+	return ((pi->sys_info.dentist_vco_freq * 100) + (divider - 1)) / divider;
+}
+
+static int trinity_parse_sys_info_table(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+	int i;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
+					      data_offset);
+
+		if (crev != 7) {
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			return -EINVAL;
+		}
+		pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_7.ulBootUpEngineClock);
+		pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_7.ulMinEngineClock);
+		pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_7.ulBootUpUMAClock);
+		pi->sys_info.dentist_vco_freq = le32_to_cpu(igp_info->info_7.ulDentistVCOFreq);
+		pi->sys_info.bootup_nb_voltage_index =
+			le16_to_cpu(igp_info->info_7.usBootUpNBVoltage);
+		if (igp_info->info_7.ucHtcTmpLmt == 0)
+			pi->sys_info.htc_tmp_lmt = 203;
+		else
+			pi->sys_info.htc_tmp_lmt = igp_info->info_7.ucHtcTmpLmt;
+		if (igp_info->info_7.ucHtcHystLmt == 0)
+			pi->sys_info.htc_hyst_lmt = 5;
+		else
+			pi->sys_info.htc_hyst_lmt = igp_info->info_7.ucHtcHystLmt;
+		if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+			DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+		}
+
+		if (pi->enable_nbps_policy)
+			pi->sys_info.nb_dpm_enable = igp_info->info_7.ucNBDPMEnable;
+		else
+			pi->sys_info.nb_dpm_enable = 0;
+
+		for (i = 0; i < TRINITY_NUM_NBPSTATES; i++) {
+			pi->sys_info.nbp_mclk[i] = le32_to_cpu(igp_info->info_7.ulNbpStateMemclkFreq[i]);
+			pi->sys_info.nbp_nclk[i] = le32_to_cpu(igp_info->info_7.ulNbpStateNClkFreq[i]);
+		}
+
+		pi->sys_info.nbp_voltage_index[0] = le16_to_cpu(igp_info->info_7.usNBP0Voltage);
+		pi->sys_info.nbp_voltage_index[1] = le16_to_cpu(igp_info->info_7.usNBP1Voltage);
+		pi->sys_info.nbp_voltage_index[2] = le16_to_cpu(igp_info->info_7.usNBP2Voltage);
+		pi->sys_info.nbp_voltage_index[3] = le16_to_cpu(igp_info->info_7.usNBP3Voltage);
+
+		if (!pi->sys_info.nb_dpm_enable) {
+			for (i = 1; i < TRINITY_NUM_NBPSTATES; i++) {
+				pi->sys_info.nbp_mclk[i] = pi->sys_info.nbp_mclk[0];
+				pi->sys_info.nbp_nclk[i] = pi->sys_info.nbp_nclk[0];
+				pi->sys_info.nbp_voltage_index[i] = pi->sys_info.nbp_voltage_index[0];
+			}
+		}
+
+		pi->sys_info.uma_channel_number = igp_info->info_7.ucUMAChannelNumber;
+
+		sumo_construct_sclk_voltage_mapping_table(rdev,
+							  &pi->sys_info.sclk_voltage_mapping_table,
+							  igp_info->info_7.sAvail_SCLK);
+		sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table,
+						 igp_info->info_7.sAvail_SCLK);
+
+		pi->sys_info.uvd_clock_table_entries[0].vclk_did =
+			igp_info->info_7.ucDPMState0VclkFid;
+		pi->sys_info.uvd_clock_table_entries[1].vclk_did =
+			igp_info->info_7.ucDPMState1VclkFid;
+		pi->sys_info.uvd_clock_table_entries[2].vclk_did =
+			igp_info->info_7.ucDPMState2VclkFid;
+		pi->sys_info.uvd_clock_table_entries[3].vclk_did =
+			igp_info->info_7.ucDPMState3VclkFid;
+
+		pi->sys_info.uvd_clock_table_entries[0].dclk_did =
+			igp_info->info_7.ucDPMState0DclkFid;
+		pi->sys_info.uvd_clock_table_entries[1].dclk_did =
+			igp_info->info_7.ucDPMState1DclkFid;
+		pi->sys_info.uvd_clock_table_entries[2].dclk_did =
+			igp_info->info_7.ucDPMState2DclkFid;
+		pi->sys_info.uvd_clock_table_entries[3].dclk_did =
+			igp_info->info_7.ucDPMState3DclkFid;
+
+		for (i = 0; i < 4; i++) {
+			pi->sys_info.uvd_clock_table_entries[i].vclk =
+				trinity_convert_did_to_freq(rdev,
+							    pi->sys_info.uvd_clock_table_entries[i].vclk_did);
+			pi->sys_info.uvd_clock_table_entries[i].dclk =
+				trinity_convert_did_to_freq(rdev,
+							    pi->sys_info.uvd_clock_table_entries[i].dclk_did);
+		}
+
+
+
+	}
+	return 0;
+}
+
+int trinity_dpm_init(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi;
+	int ret, i;
+
+	pi = kzalloc(sizeof(struct trinity_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	rdev->pm.dpm.priv = pi;
+
+	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+		pi->at[i] = TRINITY_AT_DFLT;
+
+	if (radeon_bapm == -1) {
+		/* There are stability issues reported on with
+		 * bapm enabled when switching between AC and battery
+		 * power.  At the same time, some MSI boards hang
+		 * if it's not enabled and dpm is enabled.  Just enable
+		 * it for MSI boards right now.
+		 */
+		if (rdev->pdev->subsystem_vendor == 0x1462)
+			pi->enable_bapm = true;
+		else
+			pi->enable_bapm = false;
+	} else if (radeon_bapm == 0) {
+		pi->enable_bapm = false;
+	} else {
+		pi->enable_bapm = true;
+	}
+	pi->enable_nbps_policy = true;
+	pi->enable_sclk_ds = true;
+	pi->enable_gfx_power_gating = true;
+	pi->enable_gfx_clock_gating = true;
+	pi->enable_mg_clock_gating = false;
+	pi->enable_gfx_dynamic_mgpg = false;
+	pi->override_dynamic_mgpg = false;
+	pi->enable_auto_thermal_throttling = true;
+	pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
+	pi->uvd_dpm = true; /* ??? */
+
+	ret = trinity_parse_sys_info_table(rdev);
+	if (ret)
+		return ret;
+
+	trinity_construct_boot_state(rdev);
+
+	ret = r600_get_platform_caps(rdev);
+	if (ret)
+		return ret;
+
+	ret = r600_parse_extended_power_table(rdev);
+	if (ret)
+		return ret;
+
+	ret = trinity_parse_power_table(rdev);
+	if (ret)
+		return ret;
+
+	pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt;
+	pi->enable_dpm = true;
+
+	return 0;
+}
+
+void trinity_dpm_print_power_state(struct radeon_device *rdev,
+				   struct radeon_ps *rps)
+{
+	int i;
+	struct trinity_ps *ps = trinity_get_ps(rps);
+
+	r600_dpm_print_class_info(rps->class, rps->class2);
+	r600_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->num_levels; i++) {
+		struct trinity_pl *pl = &ps->levels[i];
+		printk("\t\tpower level %d    sclk: %u vddc: %u\n",
+		       i, pl->sclk,
+		       trinity_convert_voltage_index_to_value(rdev, pl->vddc_index));
+	}
+	r600_dpm_print_ps_status(rdev, rps);
+}
+
+void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+							 struct seq_file *m)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
+	struct trinity_ps *ps = trinity_get_ps(rps);
+	struct trinity_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) >>
+		CURRENT_STATE_SHIFT;
+
+	if (current_index >= ps->num_levels) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		pl = &ps->levels[current_index];
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
+			   current_index, pl->sclk,
+			   trinity_convert_voltage_index_to_value(rdev, pl->vddc_index));
+	}
+}
+
+u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
+	struct trinity_ps *ps = trinity_get_ps(rps);
+	struct trinity_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) >>
+		CURRENT_STATE_SHIFT;
+
+	if (current_index >= ps->num_levels) {
+		return 0;
+	} else {
+		pl = &ps->levels[current_index];
+		return pl->sclk;
+	}
+}
+
+u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	return pi->sys_info.bootup_uma_clk;
+}
+
+void trinity_dpm_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	trinity_cleanup_asic(rdev); /* ??? */
+
+	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+		kfree(rdev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(rdev->pm.dpm.ps);
+	kfree(rdev->pm.dpm.priv);
+	r600_free_extended_power_table(rdev);
+}
+
+u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct trinity_ps *requested_state = trinity_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->levels[0].sclk;
+	else
+		return requested_state->levels[requested_state->num_levels - 1].sclk;
+}
+
+u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	return pi->sys_info.bootup_uma_clk;
+}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
new file mode 100644
index 0000000..c261657
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __TRINITY_DPM_H__
+#define __TRINITY_DPM_H__
+
+#include "sumo_dpm.h"
+
+#define TRINITY_SIZEOF_DPM_STATE_TABLE (SMU_SCLK_DPM_STATE_1_CNTL_0 - SMU_SCLK_DPM_STATE_0_CNTL_0)
+
+struct trinity_pl {
+	u32 sclk;
+	u8 vddc_index;
+	u8 ds_divider_index;
+	u8 ss_divider_index;
+	u8 allow_gnb_slow;
+	u8 force_nbp_state;
+	u8 display_wm;
+	u8 vce_wm;
+};
+
+#define TRINITY_POWERSTATE_FLAGS_NBPS_FORCEHIGH  (1 << 0)
+#define TRINITY_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1 << 1)
+#define TRINITY_POWERSTATE_FLAGS_NBPS_LOCKTOLOW  (1 << 2)
+
+#define TRINITY_POWERSTATE_FLAGS_BAPM_DISABLE    (1 << 0)
+
+struct trinity_ps {
+	u32 num_levels;
+	struct trinity_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
+
+	u32 nbps_flags;
+	u32 bapm_flags;
+
+	u8 Dpm0PgNbPsLo;
+	u8 Dpm0PgNbPsHi;
+	u8 DpmXNbPsLo;
+	u8 DpmXNbPsHi;
+
+	u32 vclk_low_divider;
+	u32 vclk_high_divider;
+	u32 dclk_low_divider;
+	u32 dclk_high_divider;
+};
+
+#define TRINITY_NUM_NBPSTATES   4
+
+struct trinity_uvd_clock_table_entry
+{
+	u32 vclk;
+	u32 dclk;
+	u8 vclk_did;
+	u8 dclk_did;
+	u8 rsv[2];
+};
+
+struct trinity_sys_info {
+	u32 bootup_uma_clk;
+	u32 bootup_sclk;
+	u32 min_sclk;
+	u32 dentist_vco_freq;
+	u32 nb_dpm_enable;
+	u32 nbp_mclk[TRINITY_NUM_NBPSTATES];
+	u32 nbp_nclk[TRINITY_NUM_NBPSTATES];
+	u16 nbp_voltage_index[TRINITY_NUM_NBPSTATES];
+	u16 bootup_nb_voltage_index;
+	u8 htc_tmp_lmt;
+	u8 htc_hyst_lmt;
+	struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
+	struct sumo_vid_mapping_table vid_mapping_table;
+	u32 uma_channel_number;
+	struct trinity_uvd_clock_table_entry uvd_clock_table_entries[4];
+};
+
+struct trinity_power_info {
+	u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
+	u32 dpm_interval;
+	u32 thermal_auto_throttling;
+	struct trinity_sys_info sys_info;
+	struct trinity_pl boot_pl;
+	u32 min_sclk_did;
+	bool enable_nbps_policy;
+	bool voltage_drop_in_dce;
+	bool override_dynamic_mgpg;
+	bool enable_gfx_clock_gating;
+	bool enable_gfx_power_gating;
+	bool enable_mg_clock_gating;
+	bool enable_gfx_dynamic_mgpg;
+	bool enable_auto_thermal_throttling;
+	bool enable_dpm;
+	bool enable_sclk_ds;
+	bool enable_bapm;
+	bool uvd_dpm;
+	struct radeon_ps current_rps;
+	struct trinity_ps current_ps;
+	struct radeon_ps requested_rps;
+	struct trinity_ps requested_ps;
+};
+
+#define TRINITY_AT_DFLT            30
+
+/* trinity_smc.c */
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
+int trinity_dpm_config(struct radeon_device *rdev, bool enable);
+int trinity_uvd_dpm_config(struct radeon_device *rdev);
+int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
+int trinity_dpm_n_levels_disabled(struct radeon_device *rdev, u32 n);
+int trinity_dpm_no_forced_level(struct radeon_device *rdev);
+int trinity_dce_enable_voltage_adjustment(struct radeon_device *rdev,
+					  bool enable);
+int trinity_gfx_dynamic_mgpg_config(struct radeon_device *rdev);
+void trinity_acquire_mutex(struct radeon_device *rdev);
+void trinity_release_mutex(struct radeon_device *rdev);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
new file mode 100644
index 0000000..0310e36
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "trinityd.h"
+#include "trinity_dpm.h"
+#include "ppsmc.h"
+
+static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
+{
+	int i;
+	u32 v = 0;
+
+	WREG32(SMC_MESSAGE_0, id);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(SMC_RESP_0) != 0)
+			break;
+		udelay(1);
+	}
+	v = RREG32(SMC_RESP_0);
+
+	if (v != 1) {
+		if (v == 0xFF) {
+			DRM_ERROR("SMC failed to handle the message!\n");
+			return -EINVAL;
+		} else if (v == 0xFE) {
+			DRM_ERROR("Unknown SMC message!\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+	else
+		return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
+int trinity_dpm_config(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32_SMC(SMU_SCRATCH0, 1);
+	else
+		WREG32_SMC(SMU_SCRATCH0, 0);
+
+	return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Config);
+}
+
+int trinity_dpm_force_state(struct radeon_device *rdev, u32 n)
+{
+	WREG32_SMC(SMU_SCRATCH0, n);
+
+	return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_ForceState);
+}
+
+int trinity_dpm_n_levels_disabled(struct radeon_device *rdev, u32 n)
+{
+	WREG32_SMC(SMU_SCRATCH0, n);
+
+	return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_N_LevelsDisabled);
+}
+
+int trinity_uvd_dpm_config(struct radeon_device *rdev)
+{
+	return trinity_notify_message_to_smu(rdev, PPSMC_MSG_UVD_DPM_Config);
+}
+
+int trinity_dpm_no_forced_level(struct radeon_device *rdev)
+{
+	return trinity_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+}
+
+int trinity_dce_enable_voltage_adjustment(struct radeon_device *rdev,
+					  bool enable)
+{
+	if (enable)
+		return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DCE_AllowVoltageAdjustment);
+	else
+		return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DCE_RemoveVoltageAdjustment);
+}
+
+int trinity_gfx_dynamic_mgpg_config(struct radeon_device *rdev)
+{
+	return trinity_notify_message_to_smu(rdev, PPSMC_MSG_PG_SIMD_Config);
+}
+
+void trinity_acquire_mutex(struct radeon_device *rdev)
+{
+	int i;
+
+	WREG32(SMC_INT_REQ, 1);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if ((RREG32(SMC_INT_REQ) & 0xffff) == 1)
+			break;
+		udelay(1);
+	}
+}
+
+void trinity_release_mutex(struct radeon_device *rdev)
+{
+	WREG32(SMC_INT_REQ, 0);
+}
diff --git a/drivers/gpu/drm/radeon/trinityd.h b/drivers/gpu/drm/radeon/trinityd.h
new file mode 100644
index 0000000..fd32e27
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinityd.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef _TRINITYD_H_
+#define _TRINITYD_H_
+
+/* pm registers */
+
+/* cg */
+#define CG_CGTT_LOCAL_0                                 0x0
+#define CG_CGTT_LOCAL_1                                 0x1
+
+/* smc */
+#define SMU_SCLK_DPM_STATE_0_CNTL_0                     0x1f000
+#       define STATE_VALID(x)                           ((x) << 0)
+#       define STATE_VALID_MASK                         (0xff << 0)
+#       define STATE_VALID_SHIFT                        0
+#       define CLK_DIVIDER(x)                           ((x) << 8)
+#       define CLK_DIVIDER_MASK                         (0xff << 8)
+#       define CLK_DIVIDER_SHIFT                        8
+#       define VID(x)                                   ((x) << 16)
+#       define VID_MASK                                 (0xff << 16)
+#       define VID_SHIFT                                16
+#       define LVRT(x)                                  ((x) << 24)
+#       define LVRT_MASK                                (0xff << 24)
+#       define LVRT_SHIFT                               24
+#define SMU_SCLK_DPM_STATE_0_CNTL_1                     0x1f004
+#       define DS_DIV(x)                                ((x) << 0)
+#       define DS_DIV_MASK                              (0xff << 0)
+#       define DS_DIV_SHIFT                             0
+#       define DS_SH_DIV(x)                             ((x) << 8)
+#       define DS_SH_DIV_MASK                           (0xff << 8)
+#       define DS_SH_DIV_SHIFT                          8
+#       define DISPLAY_WM(x)                            ((x) << 16)
+#       define DISPLAY_WM_MASK                          (0xff << 16)
+#       define DISPLAY_WM_SHIFT                         16
+#       define VCE_WM(x)                                ((x) << 24)
+#       define VCE_WM_MASK                              (0xff << 24)
+#       define VCE_WM_SHIFT                             24
+
+#define SMU_SCLK_DPM_STATE_0_CNTL_3                     0x1f00c
+#       define GNB_SLOW(x)                              ((x) << 0)
+#       define GNB_SLOW_MASK                            (0xff << 0)
+#       define GNB_SLOW_SHIFT                           0
+#       define FORCE_NBPS1(x)                           ((x) << 8)
+#       define FORCE_NBPS1_MASK                         (0xff << 8)
+#       define FORCE_NBPS1_SHIFT                        8
+#define SMU_SCLK_DPM_STATE_0_AT                         0x1f010
+#       define AT(x)                                    ((x) << 0)
+#       define AT_MASK                                  (0xff << 0)
+#       define AT_SHIFT                                 0
+
+#define SMU_SCLK_DPM_STATE_0_PG_CNTL                    0x1f014
+#       define PD_SCLK_DIVIDER(x)                       ((x) << 16)
+#       define PD_SCLK_DIVIDER_MASK                     (0xff << 16)
+#       define PD_SCLK_DIVIDER_SHIFT                    16
+
+#define SMU_SCLK_DPM_STATE_1_CNTL_0                     0x1f020
+
+#define SMU_SCLK_DPM_CNTL                               0x1f100
+#       define SCLK_DPM_EN(x)                           ((x) << 0)
+#       define SCLK_DPM_EN_MASK                         (0xff << 0)
+#       define SCLK_DPM_EN_SHIFT                        0
+#       define SCLK_DPM_BOOT_STATE(x)                   ((x) << 16)
+#       define SCLK_DPM_BOOT_STATE_MASK                 (0xff << 16)
+#       define SCLK_DPM_BOOT_STATE_SHIFT                16
+#       define VOLTAGE_CHG_EN(x)                        ((x) << 24)
+#       define VOLTAGE_CHG_EN_MASK                      (0xff << 24)
+#       define VOLTAGE_CHG_EN_SHIFT                     24
+
+#define SMU_SCLK_DPM_TT_CNTL                            0x1f108
+#       define SCLK_TT_EN(x)                            ((x) << 0)
+#       define SCLK_TT_EN_MASK                          (0xff << 0)
+#       define SCLK_TT_EN_SHIFT                         0
+#define SMU_SCLK_DPM_TTT                                0x1f10c
+#       define LT(x)                                    ((x) << 0)
+#       define LT_MASK                                  (0xffff << 0)
+#       define LT_SHIFT                                 0
+#       define HT(x)                                    ((x) << 16)
+#       define HT_MASK                                  (0xffff << 16)
+#       define HT_SHIFT                                 16
+
+#define SMU_UVD_DPM_STATES                              0x1f1a0
+#define SMU_UVD_DPM_CNTL                                0x1f1a4
+
+#define SMU_S_PG_CNTL                                   0x1f118
+#       define DS_PG_EN(x)                              ((x) << 16)
+#       define DS_PG_EN_MASK                            (0xff << 16)
+#       define DS_PG_EN_SHIFT                           16
+
+#define GFX_POWER_GATING_CNTL                           0x1f38c
+#       define PDS_DIV(x)                               ((x) << 0)
+#       define PDS_DIV_MASK                             (0xff << 0)
+#       define PDS_DIV_SHIFT                            0
+#       define SSSD(x)                                  ((x) << 8)
+#       define SSSD_MASK                                (0xff << 8)
+#       define SSSD_SHIFT                               8
+
+#define PM_CONFIG                                       0x1f428
+#       define SVI_Mode                                 (1 << 29)
+
+#define PM_I_CNTL_1                                     0x1f464
+#       define SCLK_DPM(x)                              ((x) << 0)
+#       define SCLK_DPM_MASK                            (0xff << 0)
+#       define SCLK_DPM_SHIFT                           0
+#       define DS_PG_CNTL(x)                            ((x) << 16)
+#       define DS_PG_CNTL_MASK                          (0xff << 16)
+#       define DS_PG_CNTL_SHIFT                         16
+#define PM_TP                                           0x1f468
+
+#define NB_PSTATE_CONFIG                                0x1f5f8
+#       define Dpm0PgNbPsLo(x)                          ((x) << 0)
+#       define Dpm0PgNbPsLo_MASK                        (3 << 0)
+#       define Dpm0PgNbPsLo_SHIFT                       0
+#       define Dpm0PgNbPsHi(x)                          ((x) << 2)
+#       define Dpm0PgNbPsHi_MASK                        (3 << 2)
+#       define Dpm0PgNbPsHi_SHIFT                       2
+#       define DpmXNbPsLo(x)                            ((x) << 4)
+#       define DpmXNbPsLo_MASK                          (3 << 4)
+#       define DpmXNbPsLo_SHIFT                         4
+#       define DpmXNbPsHi(x)                            ((x) << 6)
+#       define DpmXNbPsHi_MASK                          (3 << 6)
+#       define DpmXNbPsHi_SHIFT                         6
+
+#define DC_CAC_VALUE                                    0x1f908
+
+#define GPU_CAC_AVRG_CNTL                               0x1f920
+#       define WINDOW_SIZE(x)                           ((x) << 0)
+#       define WINDOW_SIZE_MASK                         (0xff << 0)
+#       define WINDOW_SIZE_SHIFT                        0
+
+#define CC_SMU_MISC_FUSES                               0xe0001004
+#       define MinSClkDid(x)                   ((x) << 2)
+#       define MinSClkDid_MASK                 (0x7f << 2)
+#       define MinSClkDid_SHIFT                2
+
+#define CC_SMU_TST_EFUSE1_MISC                          0xe000101c
+#       define RB_BACKEND_DISABLE(x)                    ((x) << 16)
+#       define RB_BACKEND_DISABLE_MASK                  (3 << 16)
+#       define RB_BACKEND_DISABLE_SHIFT                 16
+
+#define SMU_SCRATCH_A                                   0xe0003024
+
+#define SMU_SCRATCH0                                    0xe0003040
+
+/* mmio */
+#define SMC_INT_REQ                                     0x220
+
+#define SMC_MESSAGE_0                                   0x22c
+#define SMC_RESP_0                                      0x230
+
+#define GENERAL_PWRMGT                                  0x670
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+
+#define SCLK_PWRMGT_CNTL                                0x678
+#       define DYN_PWR_DOWN_EN                          (1 << 2)
+#       define RESET_BUSY_CNT                           (1 << 4)
+#       define RESET_SCLK_CNT                           (1 << 5)
+#       define DYN_GFX_CLK_OFF_EN                       (1 << 7)
+#       define GFX_CLK_FORCE_ON                         (1 << 8)
+#       define DYNAMIC_PM_EN                            (1 << 21)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                0x684
+#       define TARGET_STATE(x)                          ((x) << 0)
+#       define TARGET_STATE_MASK                        (0xf << 0)
+#       define TARGET_STATE_SHIFT                       0
+#       define CURRENT_STATE(x)                         ((x) << 4)
+#       define CURRENT_STATE_MASK                       (0xf << 4)
+#       define CURRENT_STATE_SHIFT                      4
+
+#define CG_GIPOTS                                       0x6d8
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+#       define CG_GIPOT_SHIFT                           16
+
+#define CG_PG_CTRL                                      0x6e0
+#       define SP(x)                                    ((x) << 0)
+#       define SP_MASK                                  (0xffff << 0)
+#       define SP_SHIFT                                 0
+#       define SU(x)                                    ((x) << 16)
+#       define SU_MASK                                  (0xffff << 16)
+#       define SU_SHIFT                                 16
+
+#define CG_MISC_REG                                     0x708
+
+#define CG_THERMAL_INT_CTRL                             0x738
+#       define DIG_THERM_INTH(x)                        ((x) << 0)
+#       define DIG_THERM_INTH_MASK                      (0xff << 0)
+#       define DIG_THERM_INTH_SHIFT                     0
+#       define DIG_THERM_INTL(x)                        ((x) << 8)
+#       define DIG_THERM_INTL_MASK                      (0xff << 8)
+#       define DIG_THERM_INTL_SHIFT                     8
+#       define THERM_INTH_MASK                          (1 << 24)
+#       define THERM_INTL_MASK                          (1 << 25)
+
+#define CG_CG_VOLTAGE_CNTL                              0x770
+#       define EN                                       (1 << 9)
+
+#define HW_REV   					0x5564
+#       define ATI_REV_ID_MASK                          (0xf << 28)
+#       define ATI_REV_ID_SHIFT                         28
+/* 0 = A0, 1 = A1, 2 = B0, 3 = C0, etc. */
+
+#define CGTS_SM_CTRL_REG                                0x9150
+
+#define GB_ADDR_CONFIG                                  0x98f8
+
+#endif
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
new file mode 100644
index 0000000..0dbeb50
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+/**
+ * uvd_v1_0_get_rptr - get read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	return RREG32(UVD_RBC_RB_RPTR);
+}
+
+/**
+ * uvd_v1_0_get_wptr - get write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	return RREG32(UVD_RBC_RB_WPTR);
+}
+
+/**
+ * uvd_v1_0_set_wptr - set write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+}
+
+/**
+ * uvd_v1_0_fence_emit - emit an fence & trap command
+ *
+ * @rdev: radeon_device pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void uvd_v1_0_fence_emit(struct radeon_device *rdev,
+			 struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+	radeon_ring_write(ring, 2);
+	return;
+}
+
+/**
+ * uvd_v1_0_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v1_0_resume(struct radeon_device *rdev)
+{
+	uint64_t addr;
+	uint32_t size;
+	int r;
+
+	r = radeon_uvd_resume(rdev);
+	if (r)
+		return r;
+
+	/* programm the VCPU memory controller bits 0-27 */
+	addr = (rdev->uvd.gpu_addr >> 3) + 16;
+	size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = RADEON_UVD_HEAP_SIZE >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = (RADEON_UVD_STACK_SIZE +
+	       (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+	/* bits 28-31 */
+	addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+	WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+	/* bits 32-39 */
+	addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+	WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+	WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
+
+	return 0;
+}
+
+/**
+ * uvd_v1_0_init - start and test UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+int uvd_v1_0_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	uint32_t tmp;
+	int r;
+
+	/* raise clocks while booting up the VCPU */
+	if (rdev->family < CHIP_RV740)
+		radeon_set_uvd_clocks(rdev, 10000, 10000);
+	else
+		radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+	r = uvd_v1_0_start(rdev);
+	if (r)
+		goto done;
+
+	ring->ready = true;
+	r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		goto done;
+	}
+
+	r = radeon_ring_lock(rdev, ring, 10);
+	if (r) {
+		DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+		goto done;
+	}
+
+	tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+	radeon_ring_write(ring, tmp);
+	radeon_ring_write(ring, 0xFFFFF);
+
+	tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+	radeon_ring_write(ring, tmp);
+	radeon_ring_write(ring, 0xFFFFF);
+
+	tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+	radeon_ring_write(ring, tmp);
+	radeon_ring_write(ring, 0xFFFFF);
+
+	/* Clear timeout status bits */
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+	radeon_ring_write(ring, 0x8);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+	radeon_ring_write(ring, 3);
+
+	radeon_ring_unlock_commit(rdev, ring, false);
+
+done:
+	/* lower clocks again */
+	radeon_set_uvd_clocks(rdev, 0, 0);
+
+	if (!r) {
+		switch (rdev->family) {
+		case CHIP_RV610:
+		case CHIP_RV630:
+		case CHIP_RV620:
+			/* 64byte granularity workaround */
+			WREG32(MC_CONFIG, 0);
+			WREG32(MC_CONFIG, 1 << 4);
+			WREG32(RS_DQ_RD_RET_CONF, 0x3f);
+			WREG32(MC_CONFIG, 0x1f);
+
+			/* fall through */
+		case CHIP_RV670:
+		case CHIP_RV635:
+
+			/* write clean workaround */
+			WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10);
+			break;
+
+		default:
+			/* TODO: Do we need more? */
+			break;
+		}
+
+		DRM_INFO("UVD initialized successfully.\n");
+	}
+
+	return r;
+}
+
+/**
+ * uvd_v1_0_fini - stop the hardware block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the UVD block, mark ring as not ready any more
+ */
+void uvd_v1_0_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+	uvd_v1_0_stop(rdev);
+	ring->ready = false;
+}
+
+/**
+ * uvd_v1_0_start - start UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup and start the UVD block
+ */
+int uvd_v1_0_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	uint32_t rb_bufsz;
+	int i, j, r;
+
+	/* disable byte swapping */
+	u32 lmi_swap_cntl = 0;
+	u32 mp_swap_cntl = 0;
+
+	/* disable clock gating */
+	WREG32(UVD_CGC_GATE, 0);
+
+	/* disable interupt */
+	WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+	mdelay(1);
+
+	/* put LMI, VCPU, RBC etc... into reset */
+	WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+	       LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+	       CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+	mdelay(5);
+
+	/* take UVD block out of reset */
+	WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+	mdelay(5);
+
+	/* initialize UVD memory controller */
+	WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+			     (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+	/* swap (8 in 32) RB and IB */
+	lmi_swap_cntl = 0xa;
+	mp_swap_cntl = 0;
+#endif
+	WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+	WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+	WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+	WREG32(UVD_MPC_SET_MUXA1, 0x0);
+	WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+	WREG32(UVD_MPC_SET_MUXB1, 0x0);
+	WREG32(UVD_MPC_SET_ALU, 0);
+	WREG32(UVD_MPC_SET_MUX, 0x88);
+
+	/* take all subblocks out of reset, except VCPU */
+	WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+	mdelay(5);
+
+	/* enable VCPU clock */
+	WREG32(UVD_VCPU_CNTL,  1 << 9);
+
+	/* enable UMC */
+	WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+	WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+	/* boot up the VCPU */
+	WREG32(UVD_SOFT_RESET, 0);
+	mdelay(10);
+
+	for (i = 0; i < 10; ++i) {
+		uint32_t status;
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(UVD_STATUS);
+			if (status & 2)
+				break;
+			mdelay(10);
+		}
+		r = 0;
+		if (status & 2)
+			break;
+
+		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+		WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+		mdelay(10);
+		WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+		mdelay(10);
+		r = -1;
+	}
+
+	if (r) {
+		DRM_ERROR("UVD not responding, giving up!!!\n");
+		return r;
+	}
+
+	/* enable interupt */
+	WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+	/* force RBC into idle state */
+	WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+	/* Set the write pointer delay */
+	WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+	/* programm the 4GB memory segment for rptr and ring buffer */
+	WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
+				   (0x7 << 16) | (0x1 << 31));
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+	ring->wptr = RREG32(UVD_RBC_RB_RPTR);
+	WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+	/* set the ring address */
+	WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+	/* Set ring buffer size */
+	rb_bufsz = order_base_2(ring->ring_size);
+	rb_bufsz = (0x1 << 8) | rb_bufsz;
+	WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
+
+	return 0;
+}
+
+/**
+ * uvd_v1_0_stop - stop UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * stop the UVD block
+ */
+void uvd_v1_0_stop(struct radeon_device *rdev)
+{
+	/* force RBC into idle state */
+	WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+	mdelay(1);
+
+	/* put VCPU into reset */
+	WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+	mdelay(5);
+
+	/* disable VCPU clock */
+	WREG32(UVD_VCPU_CNTL, 0x0);
+
+	/* Unstall UMC and register bus */
+	WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+}
+
+/**
+ * uvd_v1_0_ring_test - register write test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully write to the context register
+ */
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 3);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+			  ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring, false);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(UVD_CONTEXT_ID);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n",
+			 ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	return r;
+}
+
+/**
+ * uvd_v1_0_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait)
+{
+	/* disable semaphores for UVD V1 hardware */
+	return false;
+}
+
+/**
+ * uvd_v1_0_ib_execute - execute indirect buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer
+ */
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+	radeon_ring_write(ring, ib->gpu_addr);
+	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * uvd_v1_0_ib_test - test ib execution
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_fence *fence = NULL;
+	int r;
+
+	if (rdev->family < CHIP_RV740)
+		r = radeon_set_uvd_clocks(rdev, 10000, 10000);
+	else
+		r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+	if (r) {
+		DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+	if (r) {
+		DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+		goto error;
+	}
+
+	r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+	if (r) {
+		DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+		goto error;
+	}
+
+	r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto error;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
+		goto error;
+	}
+	r = 0;
+	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
+error:
+	radeon_fence_unref(&fence);
+	radeon_set_uvd_clocks(rdev, 0, 0);
+	return r;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
new file mode 100644
index 0000000..9071e65
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * uvd_v2_2_fence_emit - emit an fence & trap command
+ *
+ * @rdev: radeon_device pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+			 struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+	radeon_ring_write(ring, lower_32_bits(addr));
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+	radeon_ring_write(ring, 2);
+}
+
+/**
+ * uvd_v2_2_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+	radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+	return true;
+}
+
+/**
+ * uvd_v2_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v2_2_resume(struct radeon_device *rdev)
+{
+	uint64_t addr;
+	uint32_t chip_id, size;
+	int r;
+
+	/* RV770 uses V1.0 MC */
+	if (rdev->family == CHIP_RV770)
+		return uvd_v1_0_resume(rdev);
+
+	r = radeon_uvd_resume(rdev);
+	if (r)
+		return r;
+
+	/* programm the VCPU memory controller bits 0-27 */
+	addr = rdev->uvd.gpu_addr >> 3;
+	size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = RADEON_UVD_HEAP_SIZE >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = (RADEON_UVD_STACK_SIZE +
+	       (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+	/* bits 28-31 */
+	addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+	WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+	/* bits 32-39 */
+	addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+	WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+	/* tell firmware which hardware it is running on */
+	switch (rdev->family) {
+	default:
+		return -EINVAL;
+	case CHIP_RV710:
+		chip_id = 0x01000005;
+		break;
+	case CHIP_RV730:
+		chip_id = 0x01000006;
+		break;
+	case CHIP_RV740:
+		chip_id = 0x01000007;
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		chip_id = 0x01000008;
+		break;
+	case CHIP_JUNIPER:
+		chip_id = 0x01000009;
+		break;
+	case CHIP_REDWOOD:
+		chip_id = 0x0100000a;
+		break;
+	case CHIP_CEDAR:
+		chip_id = 0x0100000b;
+		break;
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		chip_id = 0x0100000c;
+		break;
+	case CHIP_PALM:
+		chip_id = 0x0100000e;
+		break;
+	case CHIP_CAYMAN:
+		chip_id = 0x0100000f;
+		break;
+	case CHIP_BARTS:
+		chip_id = 0x01000010;
+		break;
+	case CHIP_TURKS:
+		chip_id = 0x01000011;
+		break;
+	case CHIP_CAICOS:
+		chip_id = 0x01000012;
+		break;
+	case CHIP_TAHITI:
+		chip_id = 0x01000014;
+		break;
+	case CHIP_VERDE:
+		chip_id = 0x01000015;
+		break;
+	case CHIP_PITCAIRN:
+	case CHIP_OLAND:
+		chip_id = 0x01000016;
+		break;
+	case CHIP_ARUBA:
+		chip_id = 0x01000017;
+		break;
+	}
+	WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
new file mode 100644
index 0000000..d722db2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+
+/**
+ * uvd_v3_1_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+	radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+	return true;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
new file mode 100644
index 0000000..91613b8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+/**
+ * uvd_v4_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v4_2_resume(struct radeon_device *rdev)
+{
+	uint64_t addr;
+	uint32_t size;
+
+	/* programm the VCPU memory controller bits 0-27 */
+
+	/* skip over the header of the new firmware format */
+	if (rdev->uvd.fw_header_present)
+		addr = (rdev->uvd.gpu_addr + 0x200) >> 3;
+	else
+		addr = rdev->uvd.gpu_addr >> 3;
+
+	size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = RADEON_UVD_HEAP_SIZE >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = (RADEON_UVD_STACK_SIZE +
+	       (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+	/* bits 28-31 */
+	addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+	WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+	/* bits 32-39 */
+	addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+	WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+	if (rdev->uvd.fw_header_present)
+		WREG32(UVD_GP_SCRATCH4, rdev->uvd.max_handles);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
new file mode 100644
index 0000000..f541a4b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+
+#define VCE_V1_0_FW_SIZE	(256 * 1024)
+#define VCE_V1_0_STACK_SIZE	(64 * 1024)
+#define VCE_V1_0_DATA_SIZE	(7808 * (RADEON_MAX_VCE_HANDLES + 1))
+
+struct vce_v1_0_fw_signature
+{
+	int32_t off;
+	uint32_t len;
+	int32_t num;
+	struct {
+		uint32_t chip_id;
+		uint32_t keyselect;
+		uint32_t nonce[4];
+		uint32_t sigval[4];
+	} val[8];
+};
+
+/**
+ * vce_v1_0_get_rptr - get read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
+		return RREG32(VCE_RB_RPTR);
+	else
+		return RREG32(VCE_RB_RPTR2);
+}
+
+/**
+ * vce_v1_0_get_wptr - get write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
+		return RREG32(VCE_RB_WPTR);
+	else
+		return RREG32(VCE_RB_WPTR2);
+}
+
+/**
+ * vce_v1_0_set_wptr - set write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+void vce_v1_0_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
+		WREG32(VCE_RB_WPTR, ring->wptr);
+	else
+		WREG32(VCE_RB_WPTR2, ring->wptr);
+}
+
+void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable)
+{
+	u32 tmp;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) {
+		tmp = RREG32(VCE_CLOCK_GATING_A);
+		tmp |= CGC_DYN_CLOCK_MODE;
+		WREG32(VCE_CLOCK_GATING_A, tmp);
+
+		tmp = RREG32(VCE_UENC_CLOCK_GATING);
+		tmp &= ~0x1ff000;
+		tmp |= 0xff800000;
+		WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+		tmp &= ~0x3ff;
+		WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+	} else {
+		tmp = RREG32(VCE_CLOCK_GATING_A);
+		tmp &= ~CGC_DYN_CLOCK_MODE;
+		WREG32(VCE_CLOCK_GATING_A, tmp);
+
+		tmp = RREG32(VCE_UENC_CLOCK_GATING);
+		tmp |= 0x1ff000;
+		tmp &= ~0xff800000;
+		WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+		tmp |= 0x3ff;
+		WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+	}
+}
+
+static void vce_v1_0_init_cg(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(VCE_CLOCK_GATING_A);
+	tmp |= CGC_DYN_CLOCK_MODE;
+	WREG32(VCE_CLOCK_GATING_A, tmp);
+
+	tmp = RREG32(VCE_CLOCK_GATING_B);
+	tmp |= 0x1e;
+	tmp &= ~0xe100e1;
+	WREG32(VCE_CLOCK_GATING_B, tmp);
+
+	tmp = RREG32(VCE_UENC_CLOCK_GATING);
+	tmp &= ~0xff9ff000;
+	WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+	tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+	tmp &= ~0x3ff;
+	WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+}
+
+int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
+{
+	struct vce_v1_0_fw_signature *sign = (void*)rdev->vce_fw->data;
+	uint32_t chip_id;
+	int i;
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		chip_id = 0x01000014;
+		break;
+	case CHIP_VERDE:
+		chip_id = 0x01000015;
+		break;
+	case CHIP_PITCAIRN:
+	case CHIP_OLAND:
+		chip_id = 0x01000016;
+		break;
+	case CHIP_ARUBA:
+		chip_id = 0x01000017;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	for (i = 0; i < le32_to_cpu(sign->num); ++i) {
+		if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
+			break;
+	}
+
+	if (i == le32_to_cpu(sign->num))
+		return -EINVAL;
+
+	data += (256 - 64) / 4;
+	data[0] = sign->val[i].nonce[0];
+	data[1] = sign->val[i].nonce[1];
+	data[2] = sign->val[i].nonce[2];
+	data[3] = sign->val[i].nonce[3];
+	data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
+
+	memset(&data[5], 0, 44);
+	memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
+
+	data += (le32_to_cpu(sign->len) + 64) / 4;
+	data[0] = sign->val[i].sigval[0];
+	data[1] = sign->val[i].sigval[1];
+	data[2] = sign->val[i].sigval[2];
+	data[3] = sign->val[i].sigval[3];
+
+	rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
+
+	return 0;
+}
+
+unsigned vce_v1_0_bo_size(struct radeon_device *rdev)
+{
+	WARN_ON(VCE_V1_0_FW_SIZE < rdev->vce_fw->size);
+	return VCE_V1_0_FW_SIZE + VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE;
+}
+
+int vce_v1_0_resume(struct radeon_device *rdev)
+{
+	uint64_t addr = rdev->vce.gpu_addr;
+	uint32_t size;
+	int i;
+
+	WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16));
+	WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+	WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+	WREG32(VCE_CLOCK_GATING_B, 0);
+
+	WREG32_P(VCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4);
+
+	WREG32(VCE_LMI_CTRL, 0x00398000);
+	WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+	WREG32(VCE_LMI_SWAP_CNTL, 0);
+	WREG32(VCE_LMI_SWAP_CNTL1, 0);
+	WREG32(VCE_LMI_VM_CTRL, 0);
+
+	WREG32(VCE_VCPU_SCRATCH7, RADEON_MAX_VCE_HANDLES);
+
+	addr += 256;
+	size = VCE_V1_0_FW_SIZE;
+	WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
+	WREG32(VCE_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = VCE_V1_0_STACK_SIZE;
+	WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
+	WREG32(VCE_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = VCE_V1_0_DATA_SIZE;
+	WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
+	WREG32(VCE_VCPU_CACHE_SIZE2, size);
+
+	WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100);
+
+	WREG32(VCE_LMI_FW_START_KEYSEL, rdev->vce.keyselect);
+
+	for (i = 0; i < 10; ++i) {
+		mdelay(10);
+		if (RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_DONE)
+			break;
+	}
+
+	if (i == 10)
+		return -ETIMEDOUT;
+
+	if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_PASS))
+		return -EINVAL;
+
+	for (i = 0; i < 10; ++i) {
+		mdelay(10);
+		if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_BUSY))
+			break;
+	}
+
+	if (i == 10)
+		return -ETIMEDOUT;
+
+	vce_v1_0_init_cg(rdev);
+
+	return 0;
+}
+
+/**
+ * vce_v1_0_start - start VCE block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup and start the VCE block
+ */
+int vce_v1_0_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int i, j, r;
+
+	/* set BUSY flag */
+	WREG32_P(VCE_STATUS, 1, ~1);
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	WREG32(VCE_RB_RPTR, ring->wptr);
+	WREG32(VCE_RB_WPTR, ring->wptr);
+	WREG32(VCE_RB_BASE_LO, ring->gpu_addr);
+	WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+	WREG32(VCE_RB_SIZE, ring->ring_size / 4);
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	WREG32(VCE_RB_RPTR2, ring->wptr);
+	WREG32(VCE_RB_WPTR2, ring->wptr);
+	WREG32(VCE_RB_BASE_LO2, ring->gpu_addr);
+	WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+	WREG32(VCE_RB_SIZE2, ring->ring_size / 4);
+
+	WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN);
+
+	WREG32_P(VCE_SOFT_RESET,
+		 VCE_ECPU_SOFT_RESET |
+		 VCE_FME_SOFT_RESET, ~(
+		 VCE_ECPU_SOFT_RESET |
+		 VCE_FME_SOFT_RESET));
+
+	mdelay(100);
+
+	WREG32_P(VCE_SOFT_RESET, 0, ~(
+		 VCE_ECPU_SOFT_RESET |
+		 VCE_FME_SOFT_RESET));
+
+	for (i = 0; i < 10; ++i) {
+		uint32_t status;
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(VCE_STATUS);
+			if (status & 2)
+				break;
+			mdelay(10);
+		}
+		r = 0;
+		if (status & 2)
+			break;
+
+		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+		WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET);
+		mdelay(10);
+		WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET);
+		mdelay(10);
+		r = -1;
+	}
+
+	/* clear BUSY flag */
+	WREG32_P(VCE_STATUS, 0, ~1);
+
+	if (r) {
+		DRM_ERROR("VCE not responding, giving up!!!\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int vce_v1_0_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	r = vce_v1_0_start(rdev);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	ring->ready = true;
+	r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	ring->ready = true;
+	r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	DRM_INFO("VCE initialized successfully.\n");
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
new file mode 100644
index 0000000..b0a43b6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+#define VCE_V2_0_FW_SIZE	(256 * 1024)
+#define VCE_V2_0_STACK_SIZE	(64 * 1024)
+#define VCE_V2_0_DATA_SIZE	(23552 * RADEON_MAX_VCE_HANDLES)
+
+static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated)
+{
+	u32 tmp;
+
+	if (gated) {
+		tmp = RREG32(VCE_CLOCK_GATING_B);
+		tmp |= 0xe70000;
+		WREG32(VCE_CLOCK_GATING_B, tmp);
+
+		tmp = RREG32(VCE_UENC_CLOCK_GATING);
+		tmp |= 0xff000000;
+		WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+		tmp &= ~0x3fc;
+		WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+
+		WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
+	} else {
+		tmp = RREG32(VCE_CLOCK_GATING_B);
+		tmp |= 0xe7;
+		tmp &= ~0xe70000;
+		WREG32(VCE_CLOCK_GATING_B, tmp);
+
+		tmp = RREG32(VCE_UENC_CLOCK_GATING);
+		tmp |= 0x1fe000;
+		tmp &= ~0xff000000;
+		WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+		tmp |= 0x3fc;
+		WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+	}
+}
+
+static void vce_v2_0_set_dyn_cg(struct radeon_device *rdev, bool gated)
+{
+	u32 orig, tmp;
+
+	tmp = RREG32(VCE_CLOCK_GATING_B);
+	tmp &= ~0x00060006;
+	if (gated) {
+		tmp |= 0xe10000;
+	} else {
+		tmp |= 0xe1;
+		tmp &= ~0xe10000;
+	}
+	WREG32(VCE_CLOCK_GATING_B, tmp);
+
+	orig = tmp = RREG32(VCE_UENC_CLOCK_GATING);
+	tmp &= ~0x1fe000;
+	tmp &= ~0xff000000;
+	if (tmp != orig)
+		WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+	orig = tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
+	tmp &= ~0x3fc;
+	if (tmp != orig)
+		WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
+
+	if (gated)
+		WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
+}
+
+static void vce_v2_0_disable_cg(struct radeon_device *rdev)
+{
+	WREG32(VCE_CGTT_CLK_OVERRIDE, 7);
+}
+
+/*
+ * Local variable sw_cg is used for debugging purposes, in case we
+ * ran into problems with dynamic clock gating. Don't remove it.
+ */
+void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable)
+{
+	bool sw_cg = false;
+
+	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) {
+		if (sw_cg)
+			vce_v2_0_set_sw_cg(rdev, true);
+		else
+			vce_v2_0_set_dyn_cg(rdev, true);
+	} else {
+		vce_v2_0_disable_cg(rdev);
+
+		if (sw_cg)
+			vce_v2_0_set_sw_cg(rdev, false);
+		else
+			vce_v2_0_set_dyn_cg(rdev, false);
+	}
+}
+
+static void vce_v2_0_init_cg(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(VCE_CLOCK_GATING_A);
+	tmp &= ~(CGC_CLK_GATE_DLY_TIMER_MASK | CGC_CLK_GATER_OFF_DLY_TIMER_MASK);
+	tmp |= (CGC_CLK_GATE_DLY_TIMER(0) | CGC_CLK_GATER_OFF_DLY_TIMER(4));
+	tmp |= CGC_UENC_WAIT_AWAKE;
+	WREG32(VCE_CLOCK_GATING_A, tmp);
+
+	tmp = RREG32(VCE_UENC_CLOCK_GATING);
+	tmp &= ~(CLOCK_ON_DELAY_MASK | CLOCK_OFF_DELAY_MASK);
+	tmp |= (CLOCK_ON_DELAY(0) | CLOCK_OFF_DELAY(4));
+	WREG32(VCE_UENC_CLOCK_GATING, tmp);
+
+	tmp = RREG32(VCE_CLOCK_GATING_B);
+	tmp |= 0x10;
+	tmp &= ~0x100000;
+	WREG32(VCE_CLOCK_GATING_B, tmp);
+}
+
+unsigned vce_v2_0_bo_size(struct radeon_device *rdev)
+{
+	WARN_ON(rdev->vce_fw->size > VCE_V2_0_FW_SIZE);
+	return VCE_V2_0_FW_SIZE + VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE;
+}
+
+int vce_v2_0_resume(struct radeon_device *rdev)
+{
+	uint64_t addr = rdev->vce.gpu_addr;
+	uint32_t size;
+
+	WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16));
+	WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+	WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+	WREG32(VCE_CLOCK_GATING_B, 0xf7);
+
+	WREG32(VCE_LMI_CTRL, 0x00398000);
+	WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+	WREG32(VCE_LMI_SWAP_CNTL, 0);
+	WREG32(VCE_LMI_SWAP_CNTL1, 0);
+	WREG32(VCE_LMI_VM_CTRL, 0);
+
+	WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
+
+	addr &= 0xff;
+	size = VCE_V2_0_FW_SIZE;
+	WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
+	WREG32(VCE_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = VCE_V2_0_STACK_SIZE;
+	WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
+	WREG32(VCE_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = VCE_V2_0_DATA_SIZE;
+	WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
+	WREG32(VCE_VCPU_CACHE_SIZE2, size);
+
+	WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100);
+
+	WREG32_P(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN,
+		 ~VCE_SYS_INT_TRAP_INTERRUPT_EN);
+
+	vce_v2_0_init_cg(rdev);
+
+	return 0;
+}